2 * raid5.c : Multiple Devices driver for Linux
3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman
4 * Copyright (C) 1999, 2000 Ingo Molnar
5 * Copyright (C) 2002, 2003 H. Peter Anvin
7 * RAID-4/5/6 management functions.
8 * Thanks to Penguin Computing for making the RAID-6 development possible
9 * by donating a test server!
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * You should have received a copy of the GNU General Public License
17 * (for example /usr/src/linux/COPYING); if not, write to the Free
18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * The sequencing for updating the bitmap reliably is a little
25 * subtle (and I got it wrong the first time) so it deserves some
28 * We group bitmap updates into batches. Each batch has a number.
29 * We may write out several batches at once, but that isn't very important.
30 * conf->bm_write is the number of the last batch successfully written.
31 * conf->bm_flush is the number of the last batch that was closed to
33 * When we discover that we will need to write to any block in a stripe
34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
35 * the number of the batch it will be in. This is bm_flush+1.
36 * When we are ready to do a write, if that batch hasn't been written yet,
37 * we plug the array and queue the stripe for later.
38 * When an unplug happens, we increment bm_flush, thus closing the current
40 * When we notice that bm_flush > bm_write, we write out all pending updates
41 * to the bitmap, and advance bm_write to where bm_flush was.
42 * This may occasionally write a bit out twice, but is sure never to
46 #include <linux/blkdev.h>
47 #include <linux/kthread.h>
48 #include <linux/raid/pq.h>
49 #include <linux/async_tx.h>
50 #include <linux/async.h>
51 #include <linux/seq_file.h>
52 #include <linux/cpu.h>
62 #define NR_STRIPES 256
63 #define STRIPE_SIZE PAGE_SIZE
64 #define STRIPE_SHIFT (PAGE_SHIFT - 9)
65 #define STRIPE_SECTORS (STRIPE_SIZE>>9)
66 #define IO_THRESHOLD 1
67 #define BYPASS_THRESHOLD 1
68 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
69 #define HASH_MASK (NR_HASH - 1)
71 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK]))
73 /* bio's attached to a stripe+device for I/O are linked together in bi_sector
74 * order without overlap. There may be several bio's per stripe+device, and
75 * a bio could span several devices.
76 * When walking this list for a particular stripe+device, we must never proceed
77 * beyond a bio that extends past this device, as the next bio might no longer
79 * This macro is used to determine the 'next' bio in the list, given the sector
80 * of the current stripe+device
82 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL)
84 * The following can be used to debug the driver
86 #define RAID5_PARANOIA 1
87 #if RAID5_PARANOIA && defined(CONFIG_SMP)
88 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock)
90 # define CHECK_DEVLOCK()
98 #define printk_rl(args...) ((void) (printk_ratelimit() && printk(args)))
101 * We maintain a biased count of active stripes in the bottom 16 bits of
102 * bi_phys_segments, and a count of processed stripes in the upper 16 bits
104 static inline int raid5_bi_phys_segments(struct bio
*bio
)
106 return bio
->bi_phys_segments
& 0xffff;
109 static inline int raid5_bi_hw_segments(struct bio
*bio
)
111 return (bio
->bi_phys_segments
>> 16) & 0xffff;
114 static inline int raid5_dec_bi_phys_segments(struct bio
*bio
)
116 --bio
->bi_phys_segments
;
117 return raid5_bi_phys_segments(bio
);
120 static inline int raid5_dec_bi_hw_segments(struct bio
*bio
)
122 unsigned short val
= raid5_bi_hw_segments(bio
);
125 bio
->bi_phys_segments
= (val
<< 16) | raid5_bi_phys_segments(bio
);
129 static inline void raid5_set_bi_hw_segments(struct bio
*bio
, unsigned int cnt
)
131 bio
->bi_phys_segments
= raid5_bi_phys_segments(bio
) || (cnt
<< 16);
134 /* Find first data disk in a raid6 stripe */
135 static inline int raid6_d0(struct stripe_head
*sh
)
138 /* ddf always start from first device */
140 /* md starts just after Q block */
141 if (sh
->qd_idx
== sh
->disks
- 1)
144 return sh
->qd_idx
+ 1;
146 static inline int raid6_next_disk(int disk
, int raid_disks
)
149 return (disk
< raid_disks
) ? disk
: 0;
152 /* When walking through the disks in a raid5, starting at raid6_d0,
153 * We need to map each disk to a 'slot', where the data disks are slot
154 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk
155 * is raid_disks-1. This help does that mapping.
157 static int raid6_idx_to_slot(int idx
, struct stripe_head
*sh
,
158 int *count
, int syndrome_disks
)
164 if (idx
== sh
->pd_idx
)
165 return syndrome_disks
;
166 if (idx
== sh
->qd_idx
)
167 return syndrome_disks
+ 1;
173 static void return_io(struct bio
*return_bi
)
175 struct bio
*bi
= return_bi
;
178 return_bi
= bi
->bi_next
;
186 static void print_raid5_conf (raid5_conf_t
*conf
);
188 static int stripe_operations_active(struct stripe_head
*sh
)
190 return sh
->check_state
|| sh
->reconstruct_state
||
191 test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
) ||
192 test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
195 static void __release_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
)
197 if (atomic_dec_and_test(&sh
->count
)) {
198 BUG_ON(!list_empty(&sh
->lru
));
199 BUG_ON(atomic_read(&conf
->active_stripes
)==0);
200 if (test_bit(STRIPE_HANDLE
, &sh
->state
)) {
201 if (test_bit(STRIPE_DELAYED
, &sh
->state
)) {
202 list_add_tail(&sh
->lru
, &conf
->delayed_list
);
203 blk_plug_device(conf
->mddev
->queue
);
204 } else if (test_bit(STRIPE_BIT_DELAY
, &sh
->state
) &&
205 sh
->bm_seq
- conf
->seq_write
> 0) {
206 list_add_tail(&sh
->lru
, &conf
->bitmap_list
);
207 blk_plug_device(conf
->mddev
->queue
);
209 clear_bit(STRIPE_BIT_DELAY
, &sh
->state
);
210 list_add_tail(&sh
->lru
, &conf
->handle_list
);
212 md_wakeup_thread(conf
->mddev
->thread
);
214 BUG_ON(stripe_operations_active(sh
));
215 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
216 atomic_dec(&conf
->preread_active_stripes
);
217 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
)
218 md_wakeup_thread(conf
->mddev
->thread
);
220 atomic_dec(&conf
->active_stripes
);
221 if (!test_bit(STRIPE_EXPANDING
, &sh
->state
)) {
222 list_add_tail(&sh
->lru
, &conf
->inactive_list
);
223 wake_up(&conf
->wait_for_stripe
);
224 if (conf
->retry_read_aligned
)
225 md_wakeup_thread(conf
->mddev
->thread
);
231 static void release_stripe(struct stripe_head
*sh
)
233 raid5_conf_t
*conf
= sh
->raid_conf
;
236 spin_lock_irqsave(&conf
->device_lock
, flags
);
237 __release_stripe(conf
, sh
);
238 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
241 static inline void remove_hash(struct stripe_head
*sh
)
243 pr_debug("remove_hash(), stripe %llu\n",
244 (unsigned long long)sh
->sector
);
246 hlist_del_init(&sh
->hash
);
249 static inline void insert_hash(raid5_conf_t
*conf
, struct stripe_head
*sh
)
251 struct hlist_head
*hp
= stripe_hash(conf
, sh
->sector
);
253 pr_debug("insert_hash(), stripe %llu\n",
254 (unsigned long long)sh
->sector
);
257 hlist_add_head(&sh
->hash
, hp
);
261 /* find an idle stripe, make sure it is unhashed, and return it. */
262 static struct stripe_head
*get_free_stripe(raid5_conf_t
*conf
)
264 struct stripe_head
*sh
= NULL
;
265 struct list_head
*first
;
268 if (list_empty(&conf
->inactive_list
))
270 first
= conf
->inactive_list
.next
;
271 sh
= list_entry(first
, struct stripe_head
, lru
);
272 list_del_init(first
);
274 atomic_inc(&conf
->active_stripes
);
279 static void shrink_buffers(struct stripe_head
*sh
, int num
)
284 for (i
=0; i
<num
; i
++) {
288 sh
->dev
[i
].page
= NULL
;
293 static int grow_buffers(struct stripe_head
*sh
, int num
)
297 for (i
=0; i
<num
; i
++) {
300 if (!(page
= alloc_page(GFP_KERNEL
))) {
303 sh
->dev
[i
].page
= page
;
308 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
);
309 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
310 struct stripe_head
*sh
);
312 static void init_stripe(struct stripe_head
*sh
, sector_t sector
, int previous
)
314 raid5_conf_t
*conf
= sh
->raid_conf
;
317 BUG_ON(atomic_read(&sh
->count
) != 0);
318 BUG_ON(test_bit(STRIPE_HANDLE
, &sh
->state
));
319 BUG_ON(stripe_operations_active(sh
));
322 pr_debug("init_stripe called, stripe %llu\n",
323 (unsigned long long)sh
->sector
);
327 sh
->generation
= conf
->generation
- previous
;
328 sh
->disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
330 stripe_set_idx(sector
, conf
, previous
, sh
);
334 for (i
= sh
->disks
; i
--; ) {
335 struct r5dev
*dev
= &sh
->dev
[i
];
337 if (dev
->toread
|| dev
->read
|| dev
->towrite
|| dev
->written
||
338 test_bit(R5_LOCKED
, &dev
->flags
)) {
339 printk(KERN_ERR
"sector=%llx i=%d %p %p %p %p %d\n",
340 (unsigned long long)sh
->sector
, i
, dev
->toread
,
341 dev
->read
, dev
->towrite
, dev
->written
,
342 test_bit(R5_LOCKED
, &dev
->flags
));
346 raid5_build_block(sh
, i
, previous
);
348 insert_hash(conf
, sh
);
351 static struct stripe_head
*__find_stripe(raid5_conf_t
*conf
, sector_t sector
,
354 struct stripe_head
*sh
;
355 struct hlist_node
*hn
;
358 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector
);
359 hlist_for_each_entry(sh
, hn
, stripe_hash(conf
, sector
), hash
)
360 if (sh
->sector
== sector
&& sh
->generation
== generation
)
362 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector
);
366 static void unplug_slaves(mddev_t
*mddev
);
367 static void raid5_unplug_device(struct request_queue
*q
);
369 static struct stripe_head
*
370 get_active_stripe(raid5_conf_t
*conf
, sector_t sector
,
371 int previous
, int noblock
, int noquiesce
)
373 struct stripe_head
*sh
;
375 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector
);
377 spin_lock_irq(&conf
->device_lock
);
380 wait_event_lock_irq(conf
->wait_for_stripe
,
381 conf
->quiesce
== 0 || noquiesce
,
382 conf
->device_lock
, /* nothing */);
383 sh
= __find_stripe(conf
, sector
, conf
->generation
- previous
);
385 if (!conf
->inactive_blocked
)
386 sh
= get_free_stripe(conf
);
387 if (noblock
&& sh
== NULL
)
390 conf
->inactive_blocked
= 1;
391 wait_event_lock_irq(conf
->wait_for_stripe
,
392 !list_empty(&conf
->inactive_list
) &&
393 (atomic_read(&conf
->active_stripes
)
394 < (conf
->max_nr_stripes
*3/4)
395 || !conf
->inactive_blocked
),
397 raid5_unplug_device(conf
->mddev
->queue
)
399 conf
->inactive_blocked
= 0;
401 init_stripe(sh
, sector
, previous
);
403 if (atomic_read(&sh
->count
)) {
404 BUG_ON(!list_empty(&sh
->lru
)
405 && !test_bit(STRIPE_EXPANDING
, &sh
->state
));
407 if (!test_bit(STRIPE_HANDLE
, &sh
->state
))
408 atomic_inc(&conf
->active_stripes
);
409 if (list_empty(&sh
->lru
) &&
410 !test_bit(STRIPE_EXPANDING
, &sh
->state
))
412 list_del_init(&sh
->lru
);
415 } while (sh
== NULL
);
418 atomic_inc(&sh
->count
);
420 spin_unlock_irq(&conf
->device_lock
);
425 raid5_end_read_request(struct bio
*bi
, int error
);
427 raid5_end_write_request(struct bio
*bi
, int error
);
429 static void ops_run_io(struct stripe_head
*sh
, struct stripe_head_state
*s
)
431 raid5_conf_t
*conf
= sh
->raid_conf
;
432 int i
, disks
= sh
->disks
;
436 for (i
= disks
; i
--; ) {
440 if (test_and_clear_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
))
442 else if (test_and_clear_bit(R5_Wantread
, &sh
->dev
[i
].flags
))
447 bi
= &sh
->dev
[i
].req
;
451 bi
->bi_end_io
= raid5_end_write_request
;
453 bi
->bi_end_io
= raid5_end_read_request
;
456 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
457 if (rdev
&& test_bit(Faulty
, &rdev
->flags
))
460 atomic_inc(&rdev
->nr_pending
);
464 if (s
->syncing
|| s
->expanding
|| s
->expanded
)
465 md_sync_acct(rdev
->bdev
, STRIPE_SECTORS
);
467 set_bit(STRIPE_IO_STARTED
, &sh
->state
);
469 bi
->bi_bdev
= rdev
->bdev
;
470 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
471 __func__
, (unsigned long long)sh
->sector
,
473 atomic_inc(&sh
->count
);
474 bi
->bi_sector
= sh
->sector
+ rdev
->data_offset
;
475 bi
->bi_flags
= 1 << BIO_UPTODATE
;
479 bi
->bi_io_vec
= &sh
->dev
[i
].vec
;
480 bi
->bi_io_vec
[0].bv_len
= STRIPE_SIZE
;
481 bi
->bi_io_vec
[0].bv_offset
= 0;
482 bi
->bi_size
= STRIPE_SIZE
;
485 test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
486 atomic_add(STRIPE_SECTORS
,
487 &rdev
->corrected_errors
);
488 generic_make_request(bi
);
491 set_bit(STRIPE_DEGRADED
, &sh
->state
);
492 pr_debug("skip op %ld on disc %d for sector %llu\n",
493 bi
->bi_rw
, i
, (unsigned long long)sh
->sector
);
494 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
495 set_bit(STRIPE_HANDLE
, &sh
->state
);
500 static struct dma_async_tx_descriptor
*
501 async_copy_data(int frombio
, struct bio
*bio
, struct page
*page
,
502 sector_t sector
, struct dma_async_tx_descriptor
*tx
)
505 struct page
*bio_page
;
508 struct async_submit_ctl submit
;
509 enum async_tx_flags flags
= 0;
511 if (bio
->bi_sector
>= sector
)
512 page_offset
= (signed)(bio
->bi_sector
- sector
) * 512;
514 page_offset
= (signed)(sector
- bio
->bi_sector
) * -512;
517 flags
|= ASYNC_TX_FENCE
;
518 init_async_submit(&submit
, flags
, tx
, NULL
, NULL
, NULL
);
520 bio_for_each_segment(bvl
, bio
, i
) {
521 int len
= bio_iovec_idx(bio
, i
)->bv_len
;
525 if (page_offset
< 0) {
526 b_offset
= -page_offset
;
527 page_offset
+= b_offset
;
531 if (len
> 0 && page_offset
+ len
> STRIPE_SIZE
)
532 clen
= STRIPE_SIZE
- page_offset
;
537 b_offset
+= bio_iovec_idx(bio
, i
)->bv_offset
;
538 bio_page
= bio_iovec_idx(bio
, i
)->bv_page
;
540 tx
= async_memcpy(page
, bio_page
, page_offset
,
541 b_offset
, clen
, &submit
);
543 tx
= async_memcpy(bio_page
, page
, b_offset
,
544 page_offset
, clen
, &submit
);
546 /* chain the operations */
547 submit
.depend_tx
= tx
;
549 if (clen
< len
) /* hit end of page */
557 static void ops_complete_biofill(void *stripe_head_ref
)
559 struct stripe_head
*sh
= stripe_head_ref
;
560 struct bio
*return_bi
= NULL
;
561 raid5_conf_t
*conf
= sh
->raid_conf
;
564 pr_debug("%s: stripe %llu\n", __func__
,
565 (unsigned long long)sh
->sector
);
567 /* clear completed biofills */
568 spin_lock_irq(&conf
->device_lock
);
569 for (i
= sh
->disks
; i
--; ) {
570 struct r5dev
*dev
= &sh
->dev
[i
];
572 /* acknowledge completion of a biofill operation */
573 /* and check if we need to reply to a read request,
574 * new R5_Wantfill requests are held off until
575 * !STRIPE_BIOFILL_RUN
577 if (test_and_clear_bit(R5_Wantfill
, &dev
->flags
)) {
578 struct bio
*rbi
, *rbi2
;
583 while (rbi
&& rbi
->bi_sector
<
584 dev
->sector
+ STRIPE_SECTORS
) {
585 rbi2
= r5_next_bio(rbi
, dev
->sector
);
586 if (!raid5_dec_bi_phys_segments(rbi
)) {
587 rbi
->bi_next
= return_bi
;
594 spin_unlock_irq(&conf
->device_lock
);
595 clear_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
597 return_io(return_bi
);
599 set_bit(STRIPE_HANDLE
, &sh
->state
);
603 static void ops_run_biofill(struct stripe_head
*sh
)
605 struct dma_async_tx_descriptor
*tx
= NULL
;
606 raid5_conf_t
*conf
= sh
->raid_conf
;
607 struct async_submit_ctl submit
;
610 pr_debug("%s: stripe %llu\n", __func__
,
611 (unsigned long long)sh
->sector
);
613 for (i
= sh
->disks
; i
--; ) {
614 struct r5dev
*dev
= &sh
->dev
[i
];
615 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
617 spin_lock_irq(&conf
->device_lock
);
618 dev
->read
= rbi
= dev
->toread
;
620 spin_unlock_irq(&conf
->device_lock
);
621 while (rbi
&& rbi
->bi_sector
<
622 dev
->sector
+ STRIPE_SECTORS
) {
623 tx
= async_copy_data(0, rbi
, dev
->page
,
625 rbi
= r5_next_bio(rbi
, dev
->sector
);
630 atomic_inc(&sh
->count
);
631 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_biofill
, sh
, NULL
);
632 async_trigger_callback(&submit
);
635 static void mark_target_uptodate(struct stripe_head
*sh
, int target
)
642 tgt
= &sh
->dev
[target
];
643 set_bit(R5_UPTODATE
, &tgt
->flags
);
644 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
645 clear_bit(R5_Wantcompute
, &tgt
->flags
);
648 static void ops_complete_compute(void *stripe_head_ref
)
650 struct stripe_head
*sh
= stripe_head_ref
;
652 pr_debug("%s: stripe %llu\n", __func__
,
653 (unsigned long long)sh
->sector
);
655 /* mark the computed target(s) as uptodate */
656 mark_target_uptodate(sh
, sh
->ops
.target
);
657 mark_target_uptodate(sh
, sh
->ops
.target2
);
659 clear_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
660 if (sh
->check_state
== check_state_compute_run
)
661 sh
->check_state
= check_state_compute_result
;
662 set_bit(STRIPE_HANDLE
, &sh
->state
);
666 /* return a pointer to the address conversion region of the scribble buffer */
667 static addr_conv_t
*to_addr_conv(struct stripe_head
*sh
,
668 struct raid5_percpu
*percpu
)
670 return percpu
->scribble
+ sizeof(struct page
*) * (sh
->disks
+ 2);
673 static struct dma_async_tx_descriptor
*
674 ops_run_compute5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
676 int disks
= sh
->disks
;
677 struct page
**xor_srcs
= percpu
->scribble
;
678 int target
= sh
->ops
.target
;
679 struct r5dev
*tgt
= &sh
->dev
[target
];
680 struct page
*xor_dest
= tgt
->page
;
682 struct dma_async_tx_descriptor
*tx
;
683 struct async_submit_ctl submit
;
686 pr_debug("%s: stripe %llu block: %d\n",
687 __func__
, (unsigned long long)sh
->sector
, target
);
688 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
690 for (i
= disks
; i
--; )
692 xor_srcs
[count
++] = sh
->dev
[i
].page
;
694 atomic_inc(&sh
->count
);
696 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
, NULL
,
697 ops_complete_compute
, sh
, to_addr_conv(sh
, percpu
));
698 if (unlikely(count
== 1))
699 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
701 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
706 /* set_syndrome_sources - populate source buffers for gen_syndrome
707 * @srcs - (struct page *) array of size sh->disks
708 * @sh - stripe_head to parse
710 * Populates srcs in proper layout order for the stripe and returns the
711 * 'count' of sources to be used in a call to async_gen_syndrome. The P
712 * destination buffer is recorded in srcs[count] and the Q destination
713 * is recorded in srcs[count+1]].
715 static int set_syndrome_sources(struct page
**srcs
, struct stripe_head
*sh
)
717 int disks
= sh
->disks
;
718 int syndrome_disks
= sh
->ddf_layout
? disks
: (disks
- 2);
719 int d0_idx
= raid6_d0(sh
);
723 for (i
= 0; i
< disks
; i
++)
729 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
731 srcs
[slot
] = sh
->dev
[i
].page
;
732 i
= raid6_next_disk(i
, disks
);
733 } while (i
!= d0_idx
);
735 return syndrome_disks
;
738 static struct dma_async_tx_descriptor
*
739 ops_run_compute6_1(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
741 int disks
= sh
->disks
;
742 struct page
**blocks
= percpu
->scribble
;
744 int qd_idx
= sh
->qd_idx
;
745 struct dma_async_tx_descriptor
*tx
;
746 struct async_submit_ctl submit
;
752 if (sh
->ops
.target
< 0)
753 target
= sh
->ops
.target2
;
754 else if (sh
->ops
.target2
< 0)
755 target
= sh
->ops
.target
;
757 /* we should only have one valid target */
760 pr_debug("%s: stripe %llu block: %d\n",
761 __func__
, (unsigned long long)sh
->sector
, target
);
763 tgt
= &sh
->dev
[target
];
764 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
767 atomic_inc(&sh
->count
);
769 if (target
== qd_idx
) {
770 count
= set_syndrome_sources(blocks
, sh
);
771 blocks
[count
] = NULL
; /* regenerating p is not necessary */
772 BUG_ON(blocks
[count
+1] != dest
); /* q should already be set */
773 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
774 ops_complete_compute
, sh
,
775 to_addr_conv(sh
, percpu
));
776 tx
= async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
778 /* Compute any data- or p-drive using XOR */
780 for (i
= disks
; i
-- ; ) {
781 if (i
== target
|| i
== qd_idx
)
783 blocks
[count
++] = sh
->dev
[i
].page
;
786 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
787 NULL
, ops_complete_compute
, sh
,
788 to_addr_conv(sh
, percpu
));
789 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
, &submit
);
795 static struct dma_async_tx_descriptor
*
796 ops_run_compute6_2(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
798 int i
, count
, disks
= sh
->disks
;
799 int syndrome_disks
= sh
->ddf_layout
? disks
: disks
-2;
800 int d0_idx
= raid6_d0(sh
);
801 int faila
= -1, failb
= -1;
802 int target
= sh
->ops
.target
;
803 int target2
= sh
->ops
.target2
;
804 struct r5dev
*tgt
= &sh
->dev
[target
];
805 struct r5dev
*tgt2
= &sh
->dev
[target2
];
806 struct dma_async_tx_descriptor
*tx
;
807 struct page
**blocks
= percpu
->scribble
;
808 struct async_submit_ctl submit
;
810 pr_debug("%s: stripe %llu block1: %d block2: %d\n",
811 __func__
, (unsigned long long)sh
->sector
, target
, target2
);
812 BUG_ON(target
< 0 || target2
< 0);
813 BUG_ON(!test_bit(R5_Wantcompute
, &tgt
->flags
));
814 BUG_ON(!test_bit(R5_Wantcompute
, &tgt2
->flags
));
816 /* we need to open-code set_syndrome_sources to handle the
817 * slot number conversion for 'faila' and 'failb'
819 for (i
= 0; i
< disks
; i
++)
824 int slot
= raid6_idx_to_slot(i
, sh
, &count
, syndrome_disks
);
826 blocks
[slot
] = sh
->dev
[i
].page
;
832 i
= raid6_next_disk(i
, disks
);
833 } while (i
!= d0_idx
);
835 BUG_ON(faila
== failb
);
838 pr_debug("%s: stripe: %llu faila: %d failb: %d\n",
839 __func__
, (unsigned long long)sh
->sector
, faila
, failb
);
841 atomic_inc(&sh
->count
);
843 if (failb
== syndrome_disks
+1) {
844 /* Q disk is one of the missing disks */
845 if (faila
== syndrome_disks
) {
846 /* Missing P+Q, just recompute */
847 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
848 ops_complete_compute
, sh
,
849 to_addr_conv(sh
, percpu
));
850 return async_gen_syndrome(blocks
, 0, syndrome_disks
+2,
851 STRIPE_SIZE
, &submit
);
855 int qd_idx
= sh
->qd_idx
;
857 /* Missing D+Q: recompute D from P, then recompute Q */
858 if (target
== qd_idx
)
859 data_target
= target2
;
861 data_target
= target
;
864 for (i
= disks
; i
-- ; ) {
865 if (i
== data_target
|| i
== qd_idx
)
867 blocks
[count
++] = sh
->dev
[i
].page
;
869 dest
= sh
->dev
[data_target
].page
;
870 init_async_submit(&submit
,
871 ASYNC_TX_FENCE
|ASYNC_TX_XOR_ZERO_DST
,
873 to_addr_conv(sh
, percpu
));
874 tx
= async_xor(dest
, blocks
, 0, count
, STRIPE_SIZE
,
877 count
= set_syndrome_sources(blocks
, sh
);
878 init_async_submit(&submit
, ASYNC_TX_FENCE
, tx
,
879 ops_complete_compute
, sh
,
880 to_addr_conv(sh
, percpu
));
881 return async_gen_syndrome(blocks
, 0, count
+2,
882 STRIPE_SIZE
, &submit
);
885 init_async_submit(&submit
, ASYNC_TX_FENCE
, NULL
,
886 ops_complete_compute
, sh
,
887 to_addr_conv(sh
, percpu
));
888 if (failb
== syndrome_disks
) {
889 /* We're missing D+P. */
890 return async_raid6_datap_recov(syndrome_disks
+2,
894 /* We're missing D+D. */
895 return async_raid6_2data_recov(syndrome_disks
+2,
896 STRIPE_SIZE
, faila
, failb
,
903 static void ops_complete_prexor(void *stripe_head_ref
)
905 struct stripe_head
*sh
= stripe_head_ref
;
907 pr_debug("%s: stripe %llu\n", __func__
,
908 (unsigned long long)sh
->sector
);
911 static struct dma_async_tx_descriptor
*
912 ops_run_prexor(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
913 struct dma_async_tx_descriptor
*tx
)
915 int disks
= sh
->disks
;
916 struct page
**xor_srcs
= percpu
->scribble
;
917 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
918 struct async_submit_ctl submit
;
920 /* existing parity data subtracted */
921 struct page
*xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
923 pr_debug("%s: stripe %llu\n", __func__
,
924 (unsigned long long)sh
->sector
);
926 for (i
= disks
; i
--; ) {
927 struct r5dev
*dev
= &sh
->dev
[i
];
928 /* Only process blocks that are known to be uptodate */
929 if (test_bit(R5_Wantdrain
, &dev
->flags
))
930 xor_srcs
[count
++] = dev
->page
;
933 init_async_submit(&submit
, ASYNC_TX_FENCE
|ASYNC_TX_XOR_DROP_DST
, tx
,
934 ops_complete_prexor
, sh
, to_addr_conv(sh
, percpu
));
935 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
940 static struct dma_async_tx_descriptor
*
941 ops_run_biodrain(struct stripe_head
*sh
, struct dma_async_tx_descriptor
*tx
)
943 int disks
= sh
->disks
;
946 pr_debug("%s: stripe %llu\n", __func__
,
947 (unsigned long long)sh
->sector
);
949 for (i
= disks
; i
--; ) {
950 struct r5dev
*dev
= &sh
->dev
[i
];
953 if (test_and_clear_bit(R5_Wantdrain
, &dev
->flags
)) {
956 spin_lock(&sh
->lock
);
957 chosen
= dev
->towrite
;
959 BUG_ON(dev
->written
);
960 wbi
= dev
->written
= chosen
;
961 spin_unlock(&sh
->lock
);
963 while (wbi
&& wbi
->bi_sector
<
964 dev
->sector
+ STRIPE_SECTORS
) {
965 tx
= async_copy_data(1, wbi
, dev
->page
,
967 wbi
= r5_next_bio(wbi
, dev
->sector
);
975 static void ops_complete_reconstruct(void *stripe_head_ref
)
977 struct stripe_head
*sh
= stripe_head_ref
;
978 int disks
= sh
->disks
;
979 int pd_idx
= sh
->pd_idx
;
980 int qd_idx
= sh
->qd_idx
;
983 pr_debug("%s: stripe %llu\n", __func__
,
984 (unsigned long long)sh
->sector
);
986 for (i
= disks
; i
--; ) {
987 struct r5dev
*dev
= &sh
->dev
[i
];
989 if (dev
->written
|| i
== pd_idx
|| i
== qd_idx
)
990 set_bit(R5_UPTODATE
, &dev
->flags
);
993 if (sh
->reconstruct_state
== reconstruct_state_drain_run
)
994 sh
->reconstruct_state
= reconstruct_state_drain_result
;
995 else if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
)
996 sh
->reconstruct_state
= reconstruct_state_prexor_drain_result
;
998 BUG_ON(sh
->reconstruct_state
!= reconstruct_state_run
);
999 sh
->reconstruct_state
= reconstruct_state_result
;
1002 set_bit(STRIPE_HANDLE
, &sh
->state
);
1007 ops_run_reconstruct5(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1008 struct dma_async_tx_descriptor
*tx
)
1010 int disks
= sh
->disks
;
1011 struct page
**xor_srcs
= percpu
->scribble
;
1012 struct async_submit_ctl submit
;
1013 int count
= 0, pd_idx
= sh
->pd_idx
, i
;
1014 struct page
*xor_dest
;
1016 unsigned long flags
;
1018 pr_debug("%s: stripe %llu\n", __func__
,
1019 (unsigned long long)sh
->sector
);
1021 /* check if prexor is active which means only process blocks
1022 * that are part of a read-modify-write (written)
1024 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_run
) {
1026 xor_dest
= xor_srcs
[count
++] = sh
->dev
[pd_idx
].page
;
1027 for (i
= disks
; i
--; ) {
1028 struct r5dev
*dev
= &sh
->dev
[i
];
1030 xor_srcs
[count
++] = dev
->page
;
1033 xor_dest
= sh
->dev
[pd_idx
].page
;
1034 for (i
= disks
; i
--; ) {
1035 struct r5dev
*dev
= &sh
->dev
[i
];
1037 xor_srcs
[count
++] = dev
->page
;
1041 /* 1/ if we prexor'd then the dest is reused as a source
1042 * 2/ if we did not prexor then we are redoing the parity
1043 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST
1044 * for the synchronous xor case
1046 flags
= ASYNC_TX_ACK
|
1047 (prexor
? ASYNC_TX_XOR_DROP_DST
: ASYNC_TX_XOR_ZERO_DST
);
1049 atomic_inc(&sh
->count
);
1051 init_async_submit(&submit
, flags
, tx
, ops_complete_reconstruct
, sh
,
1052 to_addr_conv(sh
, percpu
));
1053 if (unlikely(count
== 1))
1054 tx
= async_memcpy(xor_dest
, xor_srcs
[0], 0, 0, STRIPE_SIZE
, &submit
);
1056 tx
= async_xor(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
, &submit
);
1060 ops_run_reconstruct6(struct stripe_head
*sh
, struct raid5_percpu
*percpu
,
1061 struct dma_async_tx_descriptor
*tx
)
1063 struct async_submit_ctl submit
;
1064 struct page
**blocks
= percpu
->scribble
;
1067 pr_debug("%s: stripe %llu\n", __func__
, (unsigned long long)sh
->sector
);
1069 count
= set_syndrome_sources(blocks
, sh
);
1071 atomic_inc(&sh
->count
);
1073 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_reconstruct
,
1074 sh
, to_addr_conv(sh
, percpu
));
1075 async_gen_syndrome(blocks
, 0, count
+2, STRIPE_SIZE
, &submit
);
1078 static void ops_complete_check(void *stripe_head_ref
)
1080 struct stripe_head
*sh
= stripe_head_ref
;
1082 pr_debug("%s: stripe %llu\n", __func__
,
1083 (unsigned long long)sh
->sector
);
1085 sh
->check_state
= check_state_check_result
;
1086 set_bit(STRIPE_HANDLE
, &sh
->state
);
1090 static void ops_run_check_p(struct stripe_head
*sh
, struct raid5_percpu
*percpu
)
1092 int disks
= sh
->disks
;
1093 int pd_idx
= sh
->pd_idx
;
1094 int qd_idx
= sh
->qd_idx
;
1095 struct page
*xor_dest
;
1096 struct page
**xor_srcs
= percpu
->scribble
;
1097 struct dma_async_tx_descriptor
*tx
;
1098 struct async_submit_ctl submit
;
1102 pr_debug("%s: stripe %llu\n", __func__
,
1103 (unsigned long long)sh
->sector
);
1106 xor_dest
= sh
->dev
[pd_idx
].page
;
1107 xor_srcs
[count
++] = xor_dest
;
1108 for (i
= disks
; i
--; ) {
1109 if (i
== pd_idx
|| i
== qd_idx
)
1111 xor_srcs
[count
++] = sh
->dev
[i
].page
;
1114 init_async_submit(&submit
, 0, NULL
, NULL
, NULL
,
1115 to_addr_conv(sh
, percpu
));
1116 tx
= async_xor_val(xor_dest
, xor_srcs
, 0, count
, STRIPE_SIZE
,
1117 &sh
->ops
.zero_sum_result
, &submit
);
1119 atomic_inc(&sh
->count
);
1120 init_async_submit(&submit
, ASYNC_TX_ACK
, tx
, ops_complete_check
, sh
, NULL
);
1121 tx
= async_trigger_callback(&submit
);
1124 static void ops_run_check_pq(struct stripe_head
*sh
, struct raid5_percpu
*percpu
, int checkp
)
1126 struct page
**srcs
= percpu
->scribble
;
1127 struct async_submit_ctl submit
;
1130 pr_debug("%s: stripe %llu checkp: %d\n", __func__
,
1131 (unsigned long long)sh
->sector
, checkp
);
1133 count
= set_syndrome_sources(srcs
, sh
);
1137 atomic_inc(&sh
->count
);
1138 init_async_submit(&submit
, ASYNC_TX_ACK
, NULL
, ops_complete_check
,
1139 sh
, to_addr_conv(sh
, percpu
));
1140 async_syndrome_val(srcs
, 0, count
+2, STRIPE_SIZE
,
1141 &sh
->ops
.zero_sum_result
, percpu
->spare_page
, &submit
);
1144 static void __raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1146 int overlap_clear
= 0, i
, disks
= sh
->disks
;
1147 struct dma_async_tx_descriptor
*tx
= NULL
;
1148 raid5_conf_t
*conf
= sh
->raid_conf
;
1149 int level
= conf
->level
;
1150 struct raid5_percpu
*percpu
;
1154 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1155 if (test_bit(STRIPE_OP_BIOFILL
, &ops_request
)) {
1156 ops_run_biofill(sh
);
1160 if (test_bit(STRIPE_OP_COMPUTE_BLK
, &ops_request
)) {
1162 tx
= ops_run_compute5(sh
, percpu
);
1164 if (sh
->ops
.target2
< 0 || sh
->ops
.target
< 0)
1165 tx
= ops_run_compute6_1(sh
, percpu
);
1167 tx
= ops_run_compute6_2(sh
, percpu
);
1169 /* terminate the chain if reconstruct is not set to be run */
1170 if (tx
&& !test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
))
1174 if (test_bit(STRIPE_OP_PREXOR
, &ops_request
))
1175 tx
= ops_run_prexor(sh
, percpu
, tx
);
1177 if (test_bit(STRIPE_OP_BIODRAIN
, &ops_request
)) {
1178 tx
= ops_run_biodrain(sh
, tx
);
1182 if (test_bit(STRIPE_OP_RECONSTRUCT
, &ops_request
)) {
1184 ops_run_reconstruct5(sh
, percpu
, tx
);
1186 ops_run_reconstruct6(sh
, percpu
, tx
);
1189 if (test_bit(STRIPE_OP_CHECK
, &ops_request
)) {
1190 if (sh
->check_state
== check_state_run
)
1191 ops_run_check_p(sh
, percpu
);
1192 else if (sh
->check_state
== check_state_run_q
)
1193 ops_run_check_pq(sh
, percpu
, 0);
1194 else if (sh
->check_state
== check_state_run_pq
)
1195 ops_run_check_pq(sh
, percpu
, 1);
1201 for (i
= disks
; i
--; ) {
1202 struct r5dev
*dev
= &sh
->dev
[i
];
1203 if (test_and_clear_bit(R5_Overlap
, &dev
->flags
))
1204 wake_up(&sh
->raid_conf
->wait_for_overlap
);
1209 #ifdef CONFIG_MULTICORE_RAID456
1210 static void async_run_ops(void *param
, async_cookie_t cookie
)
1212 struct stripe_head
*sh
= param
;
1213 unsigned long ops_request
= sh
->ops
.request
;
1215 clear_bit_unlock(STRIPE_OPS_REQ_PENDING
, &sh
->state
);
1216 wake_up(&sh
->ops
.wait_for_ops
);
1218 __raid_run_ops(sh
, ops_request
);
1222 static void raid_run_ops(struct stripe_head
*sh
, unsigned long ops_request
)
1224 /* since handle_stripe can be called outside of raid5d context
1225 * we need to ensure sh->ops.request is de-staged before another
1228 wait_event(sh
->ops
.wait_for_ops
,
1229 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING
, &sh
->state
));
1230 sh
->ops
.request
= ops_request
;
1232 atomic_inc(&sh
->count
);
1233 async_schedule(async_run_ops
, sh
);
1236 #define raid_run_ops __raid_run_ops
1239 static int grow_one_stripe(raid5_conf_t
*conf
)
1241 struct stripe_head
*sh
;
1242 int disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1243 sh
= kmem_cache_alloc(conf
->slab_cache
, GFP_KERNEL
);
1246 memset(sh
, 0, sizeof(*sh
) + (disks
-1)*sizeof(struct r5dev
));
1247 sh
->raid_conf
= conf
;
1248 spin_lock_init(&sh
->lock
);
1249 #ifdef CONFIG_MULTICORE_RAID456
1250 init_waitqueue_head(&sh
->ops
.wait_for_ops
);
1253 if (grow_buffers(sh
, disks
)) {
1254 shrink_buffers(sh
, disks
);
1255 kmem_cache_free(conf
->slab_cache
, sh
);
1258 /* we just created an active stripe so... */
1259 atomic_set(&sh
->count
, 1);
1260 atomic_inc(&conf
->active_stripes
);
1261 INIT_LIST_HEAD(&sh
->lru
);
1266 static int grow_stripes(raid5_conf_t
*conf
, int num
)
1268 struct kmem_cache
*sc
;
1269 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
1271 sprintf(conf
->cache_name
[0],
1272 "raid%d-%s", conf
->level
, mdname(conf
->mddev
));
1273 sprintf(conf
->cache_name
[1],
1274 "raid%d-%s-alt", conf
->level
, mdname(conf
->mddev
));
1275 conf
->active_name
= 0;
1276 sc
= kmem_cache_create(conf
->cache_name
[conf
->active_name
],
1277 sizeof(struct stripe_head
)+(devs
-1)*sizeof(struct r5dev
),
1281 conf
->slab_cache
= sc
;
1282 conf
->pool_size
= devs
;
1284 if (!grow_one_stripe(conf
))
1290 * scribble_len - return the required size of the scribble region
1291 * @num - total number of disks in the array
1293 * The size must be enough to contain:
1294 * 1/ a struct page pointer for each device in the array +2
1295 * 2/ room to convert each entry in (1) to its corresponding dma
1296 * (dma_map_page()) or page (page_address()) address.
1298 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we
1299 * calculate over all devices (not just the data blocks), using zeros in place
1300 * of the P and Q blocks.
1302 static size_t scribble_len(int num
)
1306 len
= sizeof(struct page
*) * (num
+2) + sizeof(addr_conv_t
) * (num
+2);
1311 static int resize_stripes(raid5_conf_t
*conf
, int newsize
)
1313 /* Make all the stripes able to hold 'newsize' devices.
1314 * New slots in each stripe get 'page' set to a new page.
1316 * This happens in stages:
1317 * 1/ create a new kmem_cache and allocate the required number of
1319 * 2/ gather all the old stripe_heads and tranfer the pages across
1320 * to the new stripe_heads. This will have the side effect of
1321 * freezing the array as once all stripe_heads have been collected,
1322 * no IO will be possible. Old stripe heads are freed once their
1323 * pages have been transferred over, and the old kmem_cache is
1324 * freed when all stripes are done.
1325 * 3/ reallocate conf->disks to be suitable bigger. If this fails,
1326 * we simple return a failre status - no need to clean anything up.
1327 * 4/ allocate new pages for the new slots in the new stripe_heads.
1328 * If this fails, we don't bother trying the shrink the
1329 * stripe_heads down again, we just leave them as they are.
1330 * As each stripe_head is processed the new one is released into
1333 * Once step2 is started, we cannot afford to wait for a write,
1334 * so we use GFP_NOIO allocations.
1336 struct stripe_head
*osh
, *nsh
;
1337 LIST_HEAD(newstripes
);
1338 struct disk_info
*ndisks
;
1341 struct kmem_cache
*sc
;
1344 if (newsize
<= conf
->pool_size
)
1345 return 0; /* never bother to shrink */
1347 err
= md_allow_write(conf
->mddev
);
1352 sc
= kmem_cache_create(conf
->cache_name
[1-conf
->active_name
],
1353 sizeof(struct stripe_head
)+(newsize
-1)*sizeof(struct r5dev
),
1358 for (i
= conf
->max_nr_stripes
; i
; i
--) {
1359 nsh
= kmem_cache_alloc(sc
, GFP_KERNEL
);
1363 memset(nsh
, 0, sizeof(*nsh
) + (newsize
-1)*sizeof(struct r5dev
));
1365 nsh
->raid_conf
= conf
;
1366 spin_lock_init(&nsh
->lock
);
1367 #ifdef CONFIG_MULTICORE_RAID456
1368 init_waitqueue_head(&nsh
->ops
.wait_for_ops
);
1371 list_add(&nsh
->lru
, &newstripes
);
1374 /* didn't get enough, give up */
1375 while (!list_empty(&newstripes
)) {
1376 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1377 list_del(&nsh
->lru
);
1378 kmem_cache_free(sc
, nsh
);
1380 kmem_cache_destroy(sc
);
1383 /* Step 2 - Must use GFP_NOIO now.
1384 * OK, we have enough stripes, start collecting inactive
1385 * stripes and copying them over
1387 list_for_each_entry(nsh
, &newstripes
, lru
) {
1388 spin_lock_irq(&conf
->device_lock
);
1389 wait_event_lock_irq(conf
->wait_for_stripe
,
1390 !list_empty(&conf
->inactive_list
),
1392 unplug_slaves(conf
->mddev
)
1394 osh
= get_free_stripe(conf
);
1395 spin_unlock_irq(&conf
->device_lock
);
1396 atomic_set(&nsh
->count
, 1);
1397 for(i
=0; i
<conf
->pool_size
; i
++)
1398 nsh
->dev
[i
].page
= osh
->dev
[i
].page
;
1399 for( ; i
<newsize
; i
++)
1400 nsh
->dev
[i
].page
= NULL
;
1401 kmem_cache_free(conf
->slab_cache
, osh
);
1403 kmem_cache_destroy(conf
->slab_cache
);
1406 * At this point, we are holding all the stripes so the array
1407 * is completely stalled, so now is a good time to resize
1408 * conf->disks and the scribble region
1410 ndisks
= kzalloc(newsize
* sizeof(struct disk_info
), GFP_NOIO
);
1412 for (i
=0; i
<conf
->raid_disks
; i
++)
1413 ndisks
[i
] = conf
->disks
[i
];
1415 conf
->disks
= ndisks
;
1420 conf
->scribble_len
= scribble_len(newsize
);
1421 for_each_present_cpu(cpu
) {
1422 struct raid5_percpu
*percpu
;
1425 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
1426 scribble
= kmalloc(conf
->scribble_len
, GFP_NOIO
);
1429 kfree(percpu
->scribble
);
1430 percpu
->scribble
= scribble
;
1438 /* Step 4, return new stripes to service */
1439 while(!list_empty(&newstripes
)) {
1440 nsh
= list_entry(newstripes
.next
, struct stripe_head
, lru
);
1441 list_del_init(&nsh
->lru
);
1443 for (i
=conf
->raid_disks
; i
< newsize
; i
++)
1444 if (nsh
->dev
[i
].page
== NULL
) {
1445 struct page
*p
= alloc_page(GFP_NOIO
);
1446 nsh
->dev
[i
].page
= p
;
1450 release_stripe(nsh
);
1452 /* critical section pass, GFP_NOIO no longer needed */
1454 conf
->slab_cache
= sc
;
1455 conf
->active_name
= 1-conf
->active_name
;
1456 conf
->pool_size
= newsize
;
1460 static int drop_one_stripe(raid5_conf_t
*conf
)
1462 struct stripe_head
*sh
;
1464 spin_lock_irq(&conf
->device_lock
);
1465 sh
= get_free_stripe(conf
);
1466 spin_unlock_irq(&conf
->device_lock
);
1469 BUG_ON(atomic_read(&sh
->count
));
1470 shrink_buffers(sh
, conf
->pool_size
);
1471 kmem_cache_free(conf
->slab_cache
, sh
);
1472 atomic_dec(&conf
->active_stripes
);
1476 static void shrink_stripes(raid5_conf_t
*conf
)
1478 while (drop_one_stripe(conf
))
1481 if (conf
->slab_cache
)
1482 kmem_cache_destroy(conf
->slab_cache
);
1483 conf
->slab_cache
= NULL
;
1486 static void raid5_end_read_request(struct bio
* bi
, int error
)
1488 struct stripe_head
*sh
= bi
->bi_private
;
1489 raid5_conf_t
*conf
= sh
->raid_conf
;
1490 int disks
= sh
->disks
, i
;
1491 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1492 char b
[BDEVNAME_SIZE
];
1496 for (i
=0 ; i
<disks
; i
++)
1497 if (bi
== &sh
->dev
[i
].req
)
1500 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n",
1501 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1509 set_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1510 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
1511 rdev
= conf
->disks
[i
].rdev
;
1512 printk_rl(KERN_INFO
"raid5:%s: read error corrected"
1513 " (%lu sectors at %llu on %s)\n",
1514 mdname(conf
->mddev
), STRIPE_SECTORS
,
1515 (unsigned long long)(sh
->sector
1516 + rdev
->data_offset
),
1517 bdevname(rdev
->bdev
, b
));
1518 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1519 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1521 if (atomic_read(&conf
->disks
[i
].rdev
->read_errors
))
1522 atomic_set(&conf
->disks
[i
].rdev
->read_errors
, 0);
1524 const char *bdn
= bdevname(conf
->disks
[i
].rdev
->bdev
, b
);
1526 rdev
= conf
->disks
[i
].rdev
;
1528 clear_bit(R5_UPTODATE
, &sh
->dev
[i
].flags
);
1529 atomic_inc(&rdev
->read_errors
);
1530 if (conf
->mddev
->degraded
)
1531 printk_rl(KERN_WARNING
1532 "raid5:%s: read error not correctable "
1533 "(sector %llu on %s).\n",
1534 mdname(conf
->mddev
),
1535 (unsigned long long)(sh
->sector
1536 + rdev
->data_offset
),
1538 else if (test_bit(R5_ReWrite
, &sh
->dev
[i
].flags
))
1540 printk_rl(KERN_WARNING
1541 "raid5:%s: read error NOT corrected!! "
1542 "(sector %llu on %s).\n",
1543 mdname(conf
->mddev
),
1544 (unsigned long long)(sh
->sector
1545 + rdev
->data_offset
),
1547 else if (atomic_read(&rdev
->read_errors
)
1548 > conf
->max_nr_stripes
)
1550 "raid5:%s: Too many read errors, failing device %s.\n",
1551 mdname(conf
->mddev
), bdn
);
1555 set_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1557 clear_bit(R5_ReadError
, &sh
->dev
[i
].flags
);
1558 clear_bit(R5_ReWrite
, &sh
->dev
[i
].flags
);
1559 md_error(conf
->mddev
, rdev
);
1562 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1563 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1564 set_bit(STRIPE_HANDLE
, &sh
->state
);
1568 static void raid5_end_write_request(struct bio
*bi
, int error
)
1570 struct stripe_head
*sh
= bi
->bi_private
;
1571 raid5_conf_t
*conf
= sh
->raid_conf
;
1572 int disks
= sh
->disks
, i
;
1573 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
1575 for (i
=0 ; i
<disks
; i
++)
1576 if (bi
== &sh
->dev
[i
].req
)
1579 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n",
1580 (unsigned long long)sh
->sector
, i
, atomic_read(&sh
->count
),
1588 md_error(conf
->mddev
, conf
->disks
[i
].rdev
);
1590 rdev_dec_pending(conf
->disks
[i
].rdev
, conf
->mddev
);
1592 clear_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
1593 set_bit(STRIPE_HANDLE
, &sh
->state
);
1598 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
);
1600 static void raid5_build_block(struct stripe_head
*sh
, int i
, int previous
)
1602 struct r5dev
*dev
= &sh
->dev
[i
];
1604 bio_init(&dev
->req
);
1605 dev
->req
.bi_io_vec
= &dev
->vec
;
1607 dev
->req
.bi_max_vecs
++;
1608 dev
->vec
.bv_page
= dev
->page
;
1609 dev
->vec
.bv_len
= STRIPE_SIZE
;
1610 dev
->vec
.bv_offset
= 0;
1612 dev
->req
.bi_sector
= sh
->sector
;
1613 dev
->req
.bi_private
= sh
;
1616 dev
->sector
= compute_blocknr(sh
, i
, previous
);
1619 static void error(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
1621 char b
[BDEVNAME_SIZE
];
1622 raid5_conf_t
*conf
= mddev
->private;
1623 pr_debug("raid5: error called\n");
1625 if (!test_bit(Faulty
, &rdev
->flags
)) {
1626 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
1627 if (test_and_clear_bit(In_sync
, &rdev
->flags
)) {
1628 unsigned long flags
;
1629 spin_lock_irqsave(&conf
->device_lock
, flags
);
1631 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
1633 * if recovery was running, make sure it aborts.
1635 set_bit(MD_RECOVERY_INTR
, &mddev
->recovery
);
1637 set_bit(Faulty
, &rdev
->flags
);
1639 "raid5: Disk failure on %s, disabling device.\n"
1640 "raid5: Operation continuing on %d devices.\n",
1641 bdevname(rdev
->bdev
,b
), conf
->raid_disks
- mddev
->degraded
);
1646 * Input: a 'big' sector number,
1647 * Output: index of the data and parity disk, and the sector # in them.
1649 static sector_t
raid5_compute_sector(raid5_conf_t
*conf
, sector_t r_sector
,
1650 int previous
, int *dd_idx
,
1651 struct stripe_head
*sh
)
1654 unsigned long chunk_number
;
1655 unsigned int chunk_offset
;
1658 sector_t new_sector
;
1659 int algorithm
= previous
? conf
->prev_algo
1661 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1662 : conf
->chunk_sectors
;
1663 int raid_disks
= previous
? conf
->previous_raid_disks
1665 int data_disks
= raid_disks
- conf
->max_degraded
;
1667 /* First compute the information on this sector */
1670 * Compute the chunk number and the sector offset inside the chunk
1672 chunk_offset
= sector_div(r_sector
, sectors_per_chunk
);
1673 chunk_number
= r_sector
;
1674 BUG_ON(r_sector
!= chunk_number
);
1677 * Compute the stripe number
1679 stripe
= chunk_number
/ data_disks
;
1682 * Compute the data disk and parity disk indexes inside the stripe
1684 *dd_idx
= chunk_number
% data_disks
;
1687 * Select the parity disk based on the user selected algorithm.
1689 pd_idx
= qd_idx
= ~0;
1690 switch(conf
->level
) {
1692 pd_idx
= data_disks
;
1695 switch (algorithm
) {
1696 case ALGORITHM_LEFT_ASYMMETRIC
:
1697 pd_idx
= data_disks
- stripe
% raid_disks
;
1698 if (*dd_idx
>= pd_idx
)
1701 case ALGORITHM_RIGHT_ASYMMETRIC
:
1702 pd_idx
= stripe
% raid_disks
;
1703 if (*dd_idx
>= pd_idx
)
1706 case ALGORITHM_LEFT_SYMMETRIC
:
1707 pd_idx
= data_disks
- stripe
% raid_disks
;
1708 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1710 case ALGORITHM_RIGHT_SYMMETRIC
:
1711 pd_idx
= stripe
% raid_disks
;
1712 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1714 case ALGORITHM_PARITY_0
:
1718 case ALGORITHM_PARITY_N
:
1719 pd_idx
= data_disks
;
1722 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1729 switch (algorithm
) {
1730 case ALGORITHM_LEFT_ASYMMETRIC
:
1731 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1732 qd_idx
= pd_idx
+ 1;
1733 if (pd_idx
== raid_disks
-1) {
1734 (*dd_idx
)++; /* Q D D D P */
1736 } else if (*dd_idx
>= pd_idx
)
1737 (*dd_idx
) += 2; /* D D P Q D */
1739 case ALGORITHM_RIGHT_ASYMMETRIC
:
1740 pd_idx
= stripe
% raid_disks
;
1741 qd_idx
= pd_idx
+ 1;
1742 if (pd_idx
== raid_disks
-1) {
1743 (*dd_idx
)++; /* Q D D D P */
1745 } else if (*dd_idx
>= pd_idx
)
1746 (*dd_idx
) += 2; /* D D P Q D */
1748 case ALGORITHM_LEFT_SYMMETRIC
:
1749 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1750 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1751 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1753 case ALGORITHM_RIGHT_SYMMETRIC
:
1754 pd_idx
= stripe
% raid_disks
;
1755 qd_idx
= (pd_idx
+ 1) % raid_disks
;
1756 *dd_idx
= (pd_idx
+ 2 + *dd_idx
) % raid_disks
;
1759 case ALGORITHM_PARITY_0
:
1764 case ALGORITHM_PARITY_N
:
1765 pd_idx
= data_disks
;
1766 qd_idx
= data_disks
+ 1;
1769 case ALGORITHM_ROTATING_ZERO_RESTART
:
1770 /* Exactly the same as RIGHT_ASYMMETRIC, but or
1771 * of blocks for computing Q is different.
1773 pd_idx
= stripe
% raid_disks
;
1774 qd_idx
= pd_idx
+ 1;
1775 if (pd_idx
== raid_disks
-1) {
1776 (*dd_idx
)++; /* Q D D D P */
1778 } else if (*dd_idx
>= pd_idx
)
1779 (*dd_idx
) += 2; /* D D P Q D */
1783 case ALGORITHM_ROTATING_N_RESTART
:
1784 /* Same a left_asymmetric, by first stripe is
1785 * D D D P Q rather than
1788 pd_idx
= raid_disks
- 1 - ((stripe
+ 1) % raid_disks
);
1789 qd_idx
= pd_idx
+ 1;
1790 if (pd_idx
== raid_disks
-1) {
1791 (*dd_idx
)++; /* Q D D D P */
1793 } else if (*dd_idx
>= pd_idx
)
1794 (*dd_idx
) += 2; /* D D P Q D */
1798 case ALGORITHM_ROTATING_N_CONTINUE
:
1799 /* Same as left_symmetric but Q is before P */
1800 pd_idx
= raid_disks
- 1 - (stripe
% raid_disks
);
1801 qd_idx
= (pd_idx
+ raid_disks
- 1) % raid_disks
;
1802 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % raid_disks
;
1806 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1807 /* RAID5 left_asymmetric, with Q on last device */
1808 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1809 if (*dd_idx
>= pd_idx
)
1811 qd_idx
= raid_disks
- 1;
1814 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1815 pd_idx
= stripe
% (raid_disks
-1);
1816 if (*dd_idx
>= pd_idx
)
1818 qd_idx
= raid_disks
- 1;
1821 case ALGORITHM_LEFT_SYMMETRIC_6
:
1822 pd_idx
= data_disks
- stripe
% (raid_disks
-1);
1823 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1824 qd_idx
= raid_disks
- 1;
1827 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1828 pd_idx
= stripe
% (raid_disks
-1);
1829 *dd_idx
= (pd_idx
+ 1 + *dd_idx
) % (raid_disks
-1);
1830 qd_idx
= raid_disks
- 1;
1833 case ALGORITHM_PARITY_0_6
:
1836 qd_idx
= raid_disks
- 1;
1841 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1849 sh
->pd_idx
= pd_idx
;
1850 sh
->qd_idx
= qd_idx
;
1851 sh
->ddf_layout
= ddf_layout
;
1854 * Finally, compute the new sector number
1856 new_sector
= (sector_t
)stripe
* sectors_per_chunk
+ chunk_offset
;
1861 static sector_t
compute_blocknr(struct stripe_head
*sh
, int i
, int previous
)
1863 raid5_conf_t
*conf
= sh
->raid_conf
;
1864 int raid_disks
= sh
->disks
;
1865 int data_disks
= raid_disks
- conf
->max_degraded
;
1866 sector_t new_sector
= sh
->sector
, check
;
1867 int sectors_per_chunk
= previous
? conf
->prev_chunk_sectors
1868 : conf
->chunk_sectors
;
1869 int algorithm
= previous
? conf
->prev_algo
1873 int chunk_number
, dummy1
, dd_idx
= i
;
1875 struct stripe_head sh2
;
1878 chunk_offset
= sector_div(new_sector
, sectors_per_chunk
);
1879 stripe
= new_sector
;
1880 BUG_ON(new_sector
!= stripe
);
1882 if (i
== sh
->pd_idx
)
1884 switch(conf
->level
) {
1887 switch (algorithm
) {
1888 case ALGORITHM_LEFT_ASYMMETRIC
:
1889 case ALGORITHM_RIGHT_ASYMMETRIC
:
1893 case ALGORITHM_LEFT_SYMMETRIC
:
1894 case ALGORITHM_RIGHT_SYMMETRIC
:
1897 i
-= (sh
->pd_idx
+ 1);
1899 case ALGORITHM_PARITY_0
:
1902 case ALGORITHM_PARITY_N
:
1905 printk(KERN_ERR
"raid5: unsupported algorithm %d\n",
1911 if (i
== sh
->qd_idx
)
1912 return 0; /* It is the Q disk */
1913 switch (algorithm
) {
1914 case ALGORITHM_LEFT_ASYMMETRIC
:
1915 case ALGORITHM_RIGHT_ASYMMETRIC
:
1916 case ALGORITHM_ROTATING_ZERO_RESTART
:
1917 case ALGORITHM_ROTATING_N_RESTART
:
1918 if (sh
->pd_idx
== raid_disks
-1)
1919 i
--; /* Q D D D P */
1920 else if (i
> sh
->pd_idx
)
1921 i
-= 2; /* D D P Q D */
1923 case ALGORITHM_LEFT_SYMMETRIC
:
1924 case ALGORITHM_RIGHT_SYMMETRIC
:
1925 if (sh
->pd_idx
== raid_disks
-1)
1926 i
--; /* Q D D D P */
1931 i
-= (sh
->pd_idx
+ 2);
1934 case ALGORITHM_PARITY_0
:
1937 case ALGORITHM_PARITY_N
:
1939 case ALGORITHM_ROTATING_N_CONTINUE
:
1940 /* Like left_symmetric, but P is before Q */
1941 if (sh
->pd_idx
== 0)
1942 i
--; /* P D D D Q */
1947 i
-= (sh
->pd_idx
+ 1);
1950 case ALGORITHM_LEFT_ASYMMETRIC_6
:
1951 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
1955 case ALGORITHM_LEFT_SYMMETRIC_6
:
1956 case ALGORITHM_RIGHT_SYMMETRIC_6
:
1958 i
+= data_disks
+ 1;
1959 i
-= (sh
->pd_idx
+ 1);
1961 case ALGORITHM_PARITY_0_6
:
1965 printk(KERN_CRIT
"raid6: unsupported algorithm %d\n",
1972 chunk_number
= stripe
* data_disks
+ i
;
1973 r_sector
= (sector_t
)chunk_number
* sectors_per_chunk
+ chunk_offset
;
1975 check
= raid5_compute_sector(conf
, r_sector
,
1976 previous
, &dummy1
, &sh2
);
1977 if (check
!= sh
->sector
|| dummy1
!= dd_idx
|| sh2
.pd_idx
!= sh
->pd_idx
1978 || sh2
.qd_idx
!= sh
->qd_idx
) {
1979 printk(KERN_ERR
"compute_blocknr: map not correct\n");
1987 schedule_reconstruction(struct stripe_head
*sh
, struct stripe_head_state
*s
,
1988 int rcw
, int expand
)
1990 int i
, pd_idx
= sh
->pd_idx
, disks
= sh
->disks
;
1991 raid5_conf_t
*conf
= sh
->raid_conf
;
1992 int level
= conf
->level
;
1995 /* if we are not expanding this is a proper write request, and
1996 * there will be bios with new data to be drained into the
2000 sh
->reconstruct_state
= reconstruct_state_drain_run
;
2001 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2003 sh
->reconstruct_state
= reconstruct_state_run
;
2005 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2007 for (i
= disks
; i
--; ) {
2008 struct r5dev
*dev
= &sh
->dev
[i
];
2011 set_bit(R5_LOCKED
, &dev
->flags
);
2012 set_bit(R5_Wantdrain
, &dev
->flags
);
2014 clear_bit(R5_UPTODATE
, &dev
->flags
);
2018 if (s
->locked
+ conf
->max_degraded
== disks
)
2019 if (!test_and_set_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2020 atomic_inc(&conf
->pending_full_writes
);
2023 BUG_ON(!(test_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
) ||
2024 test_bit(R5_Wantcompute
, &sh
->dev
[pd_idx
].flags
)));
2026 sh
->reconstruct_state
= reconstruct_state_prexor_drain_run
;
2027 set_bit(STRIPE_OP_PREXOR
, &s
->ops_request
);
2028 set_bit(STRIPE_OP_BIODRAIN
, &s
->ops_request
);
2029 set_bit(STRIPE_OP_RECONSTRUCT
, &s
->ops_request
);
2031 for (i
= disks
; i
--; ) {
2032 struct r5dev
*dev
= &sh
->dev
[i
];
2037 (test_bit(R5_UPTODATE
, &dev
->flags
) ||
2038 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2039 set_bit(R5_Wantdrain
, &dev
->flags
);
2040 set_bit(R5_LOCKED
, &dev
->flags
);
2041 clear_bit(R5_UPTODATE
, &dev
->flags
);
2047 /* keep the parity disk(s) locked while asynchronous operations
2050 set_bit(R5_LOCKED
, &sh
->dev
[pd_idx
].flags
);
2051 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2055 int qd_idx
= sh
->qd_idx
;
2056 struct r5dev
*dev
= &sh
->dev
[qd_idx
];
2058 set_bit(R5_LOCKED
, &dev
->flags
);
2059 clear_bit(R5_UPTODATE
, &dev
->flags
);
2063 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n",
2064 __func__
, (unsigned long long)sh
->sector
,
2065 s
->locked
, s
->ops_request
);
2069 * Each stripe/dev can have one or more bion attached.
2070 * toread/towrite point to the first in a chain.
2071 * The bi_next chain must be in order.
2073 static int add_stripe_bio(struct stripe_head
*sh
, struct bio
*bi
, int dd_idx
, int forwrite
)
2076 raid5_conf_t
*conf
= sh
->raid_conf
;
2079 pr_debug("adding bh b#%llu to stripe s#%llu\n",
2080 (unsigned long long)bi
->bi_sector
,
2081 (unsigned long long)sh
->sector
);
2084 spin_lock(&sh
->lock
);
2085 spin_lock_irq(&conf
->device_lock
);
2087 bip
= &sh
->dev
[dd_idx
].towrite
;
2088 if (*bip
== NULL
&& sh
->dev
[dd_idx
].written
== NULL
)
2091 bip
= &sh
->dev
[dd_idx
].toread
;
2092 while (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
) {
2093 if ((*bip
)->bi_sector
+ ((*bip
)->bi_size
>> 9) > bi
->bi_sector
)
2095 bip
= & (*bip
)->bi_next
;
2097 if (*bip
&& (*bip
)->bi_sector
< bi
->bi_sector
+ ((bi
->bi_size
)>>9))
2100 BUG_ON(*bip
&& bi
->bi_next
&& (*bip
) != bi
->bi_next
);
2104 bi
->bi_phys_segments
++;
2105 spin_unlock_irq(&conf
->device_lock
);
2106 spin_unlock(&sh
->lock
);
2108 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2109 (unsigned long long)bi
->bi_sector
,
2110 (unsigned long long)sh
->sector
, dd_idx
);
2112 if (conf
->mddev
->bitmap
&& firstwrite
) {
2113 bitmap_startwrite(conf
->mddev
->bitmap
, sh
->sector
,
2115 sh
->bm_seq
= conf
->seq_flush
+1;
2116 set_bit(STRIPE_BIT_DELAY
, &sh
->state
);
2120 /* check if page is covered */
2121 sector_t sector
= sh
->dev
[dd_idx
].sector
;
2122 for (bi
=sh
->dev
[dd_idx
].towrite
;
2123 sector
< sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
&&
2124 bi
&& bi
->bi_sector
<= sector
;
2125 bi
= r5_next_bio(bi
, sh
->dev
[dd_idx
].sector
)) {
2126 if (bi
->bi_sector
+ (bi
->bi_size
>>9) >= sector
)
2127 sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
2129 if (sector
>= sh
->dev
[dd_idx
].sector
+ STRIPE_SECTORS
)
2130 set_bit(R5_OVERWRITE
, &sh
->dev
[dd_idx
].flags
);
2135 set_bit(R5_Overlap
, &sh
->dev
[dd_idx
].flags
);
2136 spin_unlock_irq(&conf
->device_lock
);
2137 spin_unlock(&sh
->lock
);
2141 static void end_reshape(raid5_conf_t
*conf
);
2143 static void stripe_set_idx(sector_t stripe
, raid5_conf_t
*conf
, int previous
,
2144 struct stripe_head
*sh
)
2146 int sectors_per_chunk
=
2147 previous
? conf
->prev_chunk_sectors
: conf
->chunk_sectors
;
2149 int chunk_offset
= sector_div(stripe
, sectors_per_chunk
);
2150 int disks
= previous
? conf
->previous_raid_disks
: conf
->raid_disks
;
2152 raid5_compute_sector(conf
,
2153 stripe
* (disks
- conf
->max_degraded
)
2154 *sectors_per_chunk
+ chunk_offset
,
2160 handle_failed_stripe(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2161 struct stripe_head_state
*s
, int disks
,
2162 struct bio
**return_bi
)
2165 for (i
= disks
; i
--; ) {
2169 if (test_bit(R5_ReadError
, &sh
->dev
[i
].flags
)) {
2172 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
2173 if (rdev
&& test_bit(In_sync
, &rdev
->flags
))
2174 /* multiple read failures in one stripe */
2175 md_error(conf
->mddev
, rdev
);
2178 spin_lock_irq(&conf
->device_lock
);
2179 /* fail all writes first */
2180 bi
= sh
->dev
[i
].towrite
;
2181 sh
->dev
[i
].towrite
= NULL
;
2187 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2188 wake_up(&conf
->wait_for_overlap
);
2190 while (bi
&& bi
->bi_sector
<
2191 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2192 struct bio
*nextbi
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2193 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2194 if (!raid5_dec_bi_phys_segments(bi
)) {
2195 md_write_end(conf
->mddev
);
2196 bi
->bi_next
= *return_bi
;
2201 /* and fail all 'written' */
2202 bi
= sh
->dev
[i
].written
;
2203 sh
->dev
[i
].written
= NULL
;
2204 if (bi
) bitmap_end
= 1;
2205 while (bi
&& bi
->bi_sector
<
2206 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2207 struct bio
*bi2
= r5_next_bio(bi
, sh
->dev
[i
].sector
);
2208 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2209 if (!raid5_dec_bi_phys_segments(bi
)) {
2210 md_write_end(conf
->mddev
);
2211 bi
->bi_next
= *return_bi
;
2217 /* fail any reads if this device is non-operational and
2218 * the data has not reached the cache yet.
2220 if (!test_bit(R5_Wantfill
, &sh
->dev
[i
].flags
) &&
2221 (!test_bit(R5_Insync
, &sh
->dev
[i
].flags
) ||
2222 test_bit(R5_ReadError
, &sh
->dev
[i
].flags
))) {
2223 bi
= sh
->dev
[i
].toread
;
2224 sh
->dev
[i
].toread
= NULL
;
2225 if (test_and_clear_bit(R5_Overlap
, &sh
->dev
[i
].flags
))
2226 wake_up(&conf
->wait_for_overlap
);
2227 if (bi
) s
->to_read
--;
2228 while (bi
&& bi
->bi_sector
<
2229 sh
->dev
[i
].sector
+ STRIPE_SECTORS
) {
2230 struct bio
*nextbi
=
2231 r5_next_bio(bi
, sh
->dev
[i
].sector
);
2232 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
2233 if (!raid5_dec_bi_phys_segments(bi
)) {
2234 bi
->bi_next
= *return_bi
;
2240 spin_unlock_irq(&conf
->device_lock
);
2242 bitmap_endwrite(conf
->mddev
->bitmap
, sh
->sector
,
2243 STRIPE_SECTORS
, 0, 0);
2246 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2247 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2248 md_wakeup_thread(conf
->mddev
->thread
);
2251 /* fetch_block5 - checks the given member device to see if its data needs
2252 * to be read or computed to satisfy a request.
2254 * Returns 1 when no more member devices need to be checked, otherwise returns
2255 * 0 to tell the loop in handle_stripe_fill5 to continue
2257 static int fetch_block5(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2258 int disk_idx
, int disks
)
2260 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2261 struct r5dev
*failed_dev
= &sh
->dev
[s
->failed_num
];
2263 /* is the data in this block needed, and can we get it? */
2264 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2265 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2267 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2268 s
->syncing
|| s
->expanding
||
2270 (failed_dev
->toread
||
2271 (failed_dev
->towrite
&&
2272 !test_bit(R5_OVERWRITE
, &failed_dev
->flags
)))))) {
2273 /* We would like to get this block, possibly by computing it,
2274 * otherwise read it if the backing disk is insync
2276 if ((s
->uptodate
== disks
- 1) &&
2277 (s
->failed
&& disk_idx
== s
->failed_num
)) {
2278 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2279 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2280 set_bit(R5_Wantcompute
, &dev
->flags
);
2281 sh
->ops
.target
= disk_idx
;
2282 sh
->ops
.target2
= -1;
2284 /* Careful: from this point on 'uptodate' is in the eye
2285 * of raid_run_ops which services 'compute' operations
2286 * before writes. R5_Wantcompute flags a block that will
2287 * be R5_UPTODATE by the time it is needed for a
2288 * subsequent operation.
2291 return 1; /* uptodate + compute == disks */
2292 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2293 set_bit(R5_LOCKED
, &dev
->flags
);
2294 set_bit(R5_Wantread
, &dev
->flags
);
2296 pr_debug("Reading block %d (sync=%d)\n", disk_idx
,
2305 * handle_stripe_fill5 - read or compute data to satisfy pending requests.
2307 static void handle_stripe_fill5(struct stripe_head
*sh
,
2308 struct stripe_head_state
*s
, int disks
)
2312 /* look for blocks to read/compute, skip this if a compute
2313 * is already in flight, or if the stripe contents are in the
2314 * midst of changing due to a write
2316 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2317 !sh
->reconstruct_state
)
2318 for (i
= disks
; i
--; )
2319 if (fetch_block5(sh
, s
, i
, disks
))
2321 set_bit(STRIPE_HANDLE
, &sh
->state
);
2324 /* fetch_block6 - checks the given member device to see if its data needs
2325 * to be read or computed to satisfy a request.
2327 * Returns 1 when no more member devices need to be checked, otherwise returns
2328 * 0 to tell the loop in handle_stripe_fill6 to continue
2330 static int fetch_block6(struct stripe_head
*sh
, struct stripe_head_state
*s
,
2331 struct r6_state
*r6s
, int disk_idx
, int disks
)
2333 struct r5dev
*dev
= &sh
->dev
[disk_idx
];
2334 struct r5dev
*fdev
[2] = { &sh
->dev
[r6s
->failed_num
[0]],
2335 &sh
->dev
[r6s
->failed_num
[1]] };
2337 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2338 !test_bit(R5_UPTODATE
, &dev
->flags
) &&
2340 (dev
->towrite
&& !test_bit(R5_OVERWRITE
, &dev
->flags
)) ||
2341 s
->syncing
|| s
->expanding
||
2343 (fdev
[0]->toread
|| s
->to_write
)) ||
2345 (fdev
[1]->toread
|| s
->to_write
)))) {
2346 /* we would like to get this block, possibly by computing it,
2347 * otherwise read it if the backing disk is insync
2349 BUG_ON(test_bit(R5_Wantcompute
, &dev
->flags
));
2350 BUG_ON(test_bit(R5_Wantread
, &dev
->flags
));
2351 if ((s
->uptodate
== disks
- 1) &&
2352 (s
->failed
&& (disk_idx
== r6s
->failed_num
[0] ||
2353 disk_idx
== r6s
->failed_num
[1]))) {
2354 /* have disk failed, and we're requested to fetch it;
2357 pr_debug("Computing stripe %llu block %d\n",
2358 (unsigned long long)sh
->sector
, disk_idx
);
2359 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2360 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2361 set_bit(R5_Wantcompute
, &dev
->flags
);
2362 sh
->ops
.target
= disk_idx
;
2363 sh
->ops
.target2
= -1; /* no 2nd target */
2367 } else if (s
->uptodate
== disks
-2 && s
->failed
>= 2) {
2368 /* Computing 2-failure is *very* expensive; only
2369 * do it if failed >= 2
2372 for (other
= disks
; other
--; ) {
2373 if (other
== disk_idx
)
2375 if (!test_bit(R5_UPTODATE
,
2376 &sh
->dev
[other
].flags
))
2380 pr_debug("Computing stripe %llu blocks %d,%d\n",
2381 (unsigned long long)sh
->sector
,
2383 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2384 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2385 set_bit(R5_Wantcompute
, &sh
->dev
[disk_idx
].flags
);
2386 set_bit(R5_Wantcompute
, &sh
->dev
[other
].flags
);
2387 sh
->ops
.target
= disk_idx
;
2388 sh
->ops
.target2
= other
;
2392 } else if (test_bit(R5_Insync
, &dev
->flags
)) {
2393 set_bit(R5_LOCKED
, &dev
->flags
);
2394 set_bit(R5_Wantread
, &dev
->flags
);
2396 pr_debug("Reading block %d (sync=%d)\n",
2397 disk_idx
, s
->syncing
);
2405 * handle_stripe_fill6 - read or compute data to satisfy pending requests.
2407 static void handle_stripe_fill6(struct stripe_head
*sh
,
2408 struct stripe_head_state
*s
, struct r6_state
*r6s
,
2413 /* look for blocks to read/compute, skip this if a compute
2414 * is already in flight, or if the stripe contents are in the
2415 * midst of changing due to a write
2417 if (!test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) && !sh
->check_state
&&
2418 !sh
->reconstruct_state
)
2419 for (i
= disks
; i
--; )
2420 if (fetch_block6(sh
, s
, r6s
, i
, disks
))
2422 set_bit(STRIPE_HANDLE
, &sh
->state
);
2426 /* handle_stripe_clean_event
2427 * any written block on an uptodate or failed drive can be returned.
2428 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
2429 * never LOCKED, so we don't need to test 'failed' directly.
2431 static void handle_stripe_clean_event(raid5_conf_t
*conf
,
2432 struct stripe_head
*sh
, int disks
, struct bio
**return_bi
)
2437 for (i
= disks
; i
--; )
2438 if (sh
->dev
[i
].written
) {
2440 if (!test_bit(R5_LOCKED
, &dev
->flags
) &&
2441 test_bit(R5_UPTODATE
, &dev
->flags
)) {
2442 /* We can return any write requests */
2443 struct bio
*wbi
, *wbi2
;
2445 pr_debug("Return write for disc %d\n", i
);
2446 spin_lock_irq(&conf
->device_lock
);
2448 dev
->written
= NULL
;
2449 while (wbi
&& wbi
->bi_sector
<
2450 dev
->sector
+ STRIPE_SECTORS
) {
2451 wbi2
= r5_next_bio(wbi
, dev
->sector
);
2452 if (!raid5_dec_bi_phys_segments(wbi
)) {
2453 md_write_end(conf
->mddev
);
2454 wbi
->bi_next
= *return_bi
;
2459 if (dev
->towrite
== NULL
)
2461 spin_unlock_irq(&conf
->device_lock
);
2463 bitmap_endwrite(conf
->mddev
->bitmap
,
2466 !test_bit(STRIPE_DEGRADED
, &sh
->state
),
2471 if (test_and_clear_bit(STRIPE_FULL_WRITE
, &sh
->state
))
2472 if (atomic_dec_and_test(&conf
->pending_full_writes
))
2473 md_wakeup_thread(conf
->mddev
->thread
);
2476 static void handle_stripe_dirtying5(raid5_conf_t
*conf
,
2477 struct stripe_head
*sh
, struct stripe_head_state
*s
, int disks
)
2479 int rmw
= 0, rcw
= 0, i
;
2480 for (i
= disks
; i
--; ) {
2481 /* would I have to read this buffer for read_modify_write */
2482 struct r5dev
*dev
= &sh
->dev
[i
];
2483 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2484 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2485 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2486 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2487 if (test_bit(R5_Insync
, &dev
->flags
))
2490 rmw
+= 2*disks
; /* cannot read it */
2492 /* Would I have to read this buffer for reconstruct_write */
2493 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) && i
!= sh
->pd_idx
&&
2494 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2495 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2496 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2497 if (test_bit(R5_Insync
, &dev
->flags
)) rcw
++;
2502 pr_debug("for sector %llu, rmw=%d rcw=%d\n",
2503 (unsigned long long)sh
->sector
, rmw
, rcw
);
2504 set_bit(STRIPE_HANDLE
, &sh
->state
);
2505 if (rmw
< rcw
&& rmw
> 0)
2506 /* prefer read-modify-write, but need to get some data */
2507 for (i
= disks
; i
--; ) {
2508 struct r5dev
*dev
= &sh
->dev
[i
];
2509 if ((dev
->towrite
|| i
== sh
->pd_idx
) &&
2510 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2511 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2512 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2513 test_bit(R5_Insync
, &dev
->flags
)) {
2515 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2516 pr_debug("Read_old block "
2517 "%d for r-m-w\n", i
);
2518 set_bit(R5_LOCKED
, &dev
->flags
);
2519 set_bit(R5_Wantread
, &dev
->flags
);
2522 set_bit(STRIPE_DELAYED
, &sh
->state
);
2523 set_bit(STRIPE_HANDLE
, &sh
->state
);
2527 if (rcw
<= rmw
&& rcw
> 0)
2528 /* want reconstruct write, but need to get some data */
2529 for (i
= disks
; i
--; ) {
2530 struct r5dev
*dev
= &sh
->dev
[i
];
2531 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2533 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2534 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2535 test_bit(R5_Wantcompute
, &dev
->flags
)) &&
2536 test_bit(R5_Insync
, &dev
->flags
)) {
2538 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2539 pr_debug("Read_old block "
2540 "%d for Reconstruct\n", i
);
2541 set_bit(R5_LOCKED
, &dev
->flags
);
2542 set_bit(R5_Wantread
, &dev
->flags
);
2545 set_bit(STRIPE_DELAYED
, &sh
->state
);
2546 set_bit(STRIPE_HANDLE
, &sh
->state
);
2550 /* now if nothing is locked, and if we have enough data,
2551 * we can start a write request
2553 /* since handle_stripe can be called at any time we need to handle the
2554 * case where a compute block operation has been submitted and then a
2555 * subsequent call wants to start a write request. raid_run_ops only
2556 * handles the case where compute block and reconstruct are requested
2557 * simultaneously. If this is not the case then new writes need to be
2558 * held off until the compute completes.
2560 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2561 (s
->locked
== 0 && (rcw
== 0 || rmw
== 0) &&
2562 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)))
2563 schedule_reconstruction(sh
, s
, rcw
== 0, 0);
2566 static void handle_stripe_dirtying6(raid5_conf_t
*conf
,
2567 struct stripe_head
*sh
, struct stripe_head_state
*s
,
2568 struct r6_state
*r6s
, int disks
)
2570 int rcw
= 0, pd_idx
= sh
->pd_idx
, i
;
2571 int qd_idx
= sh
->qd_idx
;
2573 set_bit(STRIPE_HANDLE
, &sh
->state
);
2574 for (i
= disks
; i
--; ) {
2575 struct r5dev
*dev
= &sh
->dev
[i
];
2576 /* check if we haven't enough data */
2577 if (!test_bit(R5_OVERWRITE
, &dev
->flags
) &&
2578 i
!= pd_idx
&& i
!= qd_idx
&&
2579 !test_bit(R5_LOCKED
, &dev
->flags
) &&
2580 !(test_bit(R5_UPTODATE
, &dev
->flags
) ||
2581 test_bit(R5_Wantcompute
, &dev
->flags
))) {
2583 if (!test_bit(R5_Insync
, &dev
->flags
))
2584 continue; /* it's a failed drive */
2587 test_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
)) {
2588 pr_debug("Read_old stripe %llu "
2589 "block %d for Reconstruct\n",
2590 (unsigned long long)sh
->sector
, i
);
2591 set_bit(R5_LOCKED
, &dev
->flags
);
2592 set_bit(R5_Wantread
, &dev
->flags
);
2595 pr_debug("Request delayed stripe %llu "
2596 "block %d for Reconstruct\n",
2597 (unsigned long long)sh
->sector
, i
);
2598 set_bit(STRIPE_DELAYED
, &sh
->state
);
2599 set_bit(STRIPE_HANDLE
, &sh
->state
);
2603 /* now if nothing is locked, and if we have enough data, we can start a
2606 if ((s
->req_compute
|| !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
)) &&
2607 s
->locked
== 0 && rcw
== 0 &&
2608 !test_bit(STRIPE_BIT_DELAY
, &sh
->state
)) {
2609 schedule_reconstruction(sh
, s
, 1, 0);
2613 static void handle_parity_checks5(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2614 struct stripe_head_state
*s
, int disks
)
2616 struct r5dev
*dev
= NULL
;
2618 set_bit(STRIPE_HANDLE
, &sh
->state
);
2620 switch (sh
->check_state
) {
2621 case check_state_idle
:
2622 /* start a new check operation if there are no failures */
2623 if (s
->failed
== 0) {
2624 BUG_ON(s
->uptodate
!= disks
);
2625 sh
->check_state
= check_state_run
;
2626 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2627 clear_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
);
2631 dev
= &sh
->dev
[s
->failed_num
];
2633 case check_state_compute_result
:
2634 sh
->check_state
= check_state_idle
;
2636 dev
= &sh
->dev
[sh
->pd_idx
];
2638 /* check that a write has not made the stripe insync */
2639 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2642 /* either failed parity check, or recovery is happening */
2643 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
2644 BUG_ON(s
->uptodate
!= disks
);
2646 set_bit(R5_LOCKED
, &dev
->flags
);
2648 set_bit(R5_Wantwrite
, &dev
->flags
);
2650 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2651 set_bit(STRIPE_INSYNC
, &sh
->state
);
2653 case check_state_run
:
2654 break; /* we will be called again upon completion */
2655 case check_state_check_result
:
2656 sh
->check_state
= check_state_idle
;
2658 /* if a failure occurred during the check operation, leave
2659 * STRIPE_INSYNC not set and let the stripe be handled again
2664 /* handle a successful check operation, if parity is correct
2665 * we are done. Otherwise update the mismatch count and repair
2666 * parity if !MD_RECOVERY_CHECK
2668 if ((sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) == 0)
2669 /* parity is correct (on disc,
2670 * not in buffer any more)
2672 set_bit(STRIPE_INSYNC
, &sh
->state
);
2674 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2675 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2676 /* don't try to repair!! */
2677 set_bit(STRIPE_INSYNC
, &sh
->state
);
2679 sh
->check_state
= check_state_compute_run
;
2680 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2681 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2682 set_bit(R5_Wantcompute
,
2683 &sh
->dev
[sh
->pd_idx
].flags
);
2684 sh
->ops
.target
= sh
->pd_idx
;
2685 sh
->ops
.target2
= -1;
2690 case check_state_compute_run
:
2693 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2694 __func__
, sh
->check_state
,
2695 (unsigned long long) sh
->sector
);
2701 static void handle_parity_checks6(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2702 struct stripe_head_state
*s
,
2703 struct r6_state
*r6s
, int disks
)
2705 int pd_idx
= sh
->pd_idx
;
2706 int qd_idx
= sh
->qd_idx
;
2709 set_bit(STRIPE_HANDLE
, &sh
->state
);
2711 BUG_ON(s
->failed
> 2);
2713 /* Want to check and possibly repair P and Q.
2714 * However there could be one 'failed' device, in which
2715 * case we can only check one of them, possibly using the
2716 * other to generate missing data
2719 switch (sh
->check_state
) {
2720 case check_state_idle
:
2721 /* start a new check operation if there are < 2 failures */
2722 if (s
->failed
== r6s
->q_failed
) {
2723 /* The only possible failed device holds Q, so it
2724 * makes sense to check P (If anything else were failed,
2725 * we would have used P to recreate it).
2727 sh
->check_state
= check_state_run
;
2729 if (!r6s
->q_failed
&& s
->failed
< 2) {
2730 /* Q is not failed, and we didn't use it to generate
2731 * anything, so it makes sense to check it
2733 if (sh
->check_state
== check_state_run
)
2734 sh
->check_state
= check_state_run_pq
;
2736 sh
->check_state
= check_state_run_q
;
2739 /* discard potentially stale zero_sum_result */
2740 sh
->ops
.zero_sum_result
= 0;
2742 if (sh
->check_state
== check_state_run
) {
2743 /* async_xor_zero_sum destroys the contents of P */
2744 clear_bit(R5_UPTODATE
, &sh
->dev
[pd_idx
].flags
);
2747 if (sh
->check_state
>= check_state_run
&&
2748 sh
->check_state
<= check_state_run_pq
) {
2749 /* async_syndrome_zero_sum preserves P and Q, so
2750 * no need to mark them !uptodate here
2752 set_bit(STRIPE_OP_CHECK
, &s
->ops_request
);
2756 /* we have 2-disk failure */
2757 BUG_ON(s
->failed
!= 2);
2759 case check_state_compute_result
:
2760 sh
->check_state
= check_state_idle
;
2762 /* check that a write has not made the stripe insync */
2763 if (test_bit(STRIPE_INSYNC
, &sh
->state
))
2766 /* now write out any block on a failed drive,
2767 * or P or Q if they were recomputed
2769 BUG_ON(s
->uptodate
< disks
- 1); /* We don't need Q to recover */
2770 if (s
->failed
== 2) {
2771 dev
= &sh
->dev
[r6s
->failed_num
[1]];
2773 set_bit(R5_LOCKED
, &dev
->flags
);
2774 set_bit(R5_Wantwrite
, &dev
->flags
);
2776 if (s
->failed
>= 1) {
2777 dev
= &sh
->dev
[r6s
->failed_num
[0]];
2779 set_bit(R5_LOCKED
, &dev
->flags
);
2780 set_bit(R5_Wantwrite
, &dev
->flags
);
2782 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2783 dev
= &sh
->dev
[pd_idx
];
2785 set_bit(R5_LOCKED
, &dev
->flags
);
2786 set_bit(R5_Wantwrite
, &dev
->flags
);
2788 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2789 dev
= &sh
->dev
[qd_idx
];
2791 set_bit(R5_LOCKED
, &dev
->flags
);
2792 set_bit(R5_Wantwrite
, &dev
->flags
);
2794 clear_bit(STRIPE_DEGRADED
, &sh
->state
);
2796 set_bit(STRIPE_INSYNC
, &sh
->state
);
2798 case check_state_run
:
2799 case check_state_run_q
:
2800 case check_state_run_pq
:
2801 break; /* we will be called again upon completion */
2802 case check_state_check_result
:
2803 sh
->check_state
= check_state_idle
;
2805 /* handle a successful check operation, if parity is correct
2806 * we are done. Otherwise update the mismatch count and repair
2807 * parity if !MD_RECOVERY_CHECK
2809 if (sh
->ops
.zero_sum_result
== 0) {
2810 /* both parities are correct */
2812 set_bit(STRIPE_INSYNC
, &sh
->state
);
2814 /* in contrast to the raid5 case we can validate
2815 * parity, but still have a failure to write
2818 sh
->check_state
= check_state_compute_result
;
2819 /* Returning at this point means that we may go
2820 * off and bring p and/or q uptodate again so
2821 * we make sure to check zero_sum_result again
2822 * to verify if p or q need writeback
2826 conf
->mddev
->resync_mismatches
+= STRIPE_SECTORS
;
2827 if (test_bit(MD_RECOVERY_CHECK
, &conf
->mddev
->recovery
))
2828 /* don't try to repair!! */
2829 set_bit(STRIPE_INSYNC
, &sh
->state
);
2831 int *target
= &sh
->ops
.target
;
2833 sh
->ops
.target
= -1;
2834 sh
->ops
.target2
= -1;
2835 sh
->check_state
= check_state_compute_run
;
2836 set_bit(STRIPE_COMPUTE_RUN
, &sh
->state
);
2837 set_bit(STRIPE_OP_COMPUTE_BLK
, &s
->ops_request
);
2838 if (sh
->ops
.zero_sum_result
& SUM_CHECK_P_RESULT
) {
2839 set_bit(R5_Wantcompute
,
2840 &sh
->dev
[pd_idx
].flags
);
2842 target
= &sh
->ops
.target2
;
2845 if (sh
->ops
.zero_sum_result
& SUM_CHECK_Q_RESULT
) {
2846 set_bit(R5_Wantcompute
,
2847 &sh
->dev
[qd_idx
].flags
);
2854 case check_state_compute_run
:
2857 printk(KERN_ERR
"%s: unknown check_state: %d sector: %llu\n",
2858 __func__
, sh
->check_state
,
2859 (unsigned long long) sh
->sector
);
2864 static void handle_stripe_expansion(raid5_conf_t
*conf
, struct stripe_head
*sh
,
2865 struct r6_state
*r6s
)
2869 /* We have read all the blocks in this stripe and now we need to
2870 * copy some of them into a target stripe for expand.
2872 struct dma_async_tx_descriptor
*tx
= NULL
;
2873 clear_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2874 for (i
= 0; i
< sh
->disks
; i
++)
2875 if (i
!= sh
->pd_idx
&& i
!= sh
->qd_idx
) {
2877 struct stripe_head
*sh2
;
2878 struct async_submit_ctl submit
;
2880 sector_t bn
= compute_blocknr(sh
, i
, 1);
2881 sector_t s
= raid5_compute_sector(conf
, bn
, 0,
2883 sh2
= get_active_stripe(conf
, s
, 0, 1, 1);
2885 /* so far only the early blocks of this stripe
2886 * have been requested. When later blocks
2887 * get requested, we will try again
2890 if (!test_bit(STRIPE_EXPANDING
, &sh2
->state
) ||
2891 test_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
)) {
2892 /* must have already done this block */
2893 release_stripe(sh2
);
2897 /* place all the copies on one channel */
2898 init_async_submit(&submit
, 0, tx
, NULL
, NULL
, NULL
);
2899 tx
= async_memcpy(sh2
->dev
[dd_idx
].page
,
2900 sh
->dev
[i
].page
, 0, 0, STRIPE_SIZE
,
2903 set_bit(R5_Expanded
, &sh2
->dev
[dd_idx
].flags
);
2904 set_bit(R5_UPTODATE
, &sh2
->dev
[dd_idx
].flags
);
2905 for (j
= 0; j
< conf
->raid_disks
; j
++)
2906 if (j
!= sh2
->pd_idx
&&
2907 (!r6s
|| j
!= sh2
->qd_idx
) &&
2908 !test_bit(R5_Expanded
, &sh2
->dev
[j
].flags
))
2910 if (j
== conf
->raid_disks
) {
2911 set_bit(STRIPE_EXPAND_READY
, &sh2
->state
);
2912 set_bit(STRIPE_HANDLE
, &sh2
->state
);
2914 release_stripe(sh2
);
2917 /* done submitting copies, wait for them to complete */
2920 dma_wait_for_async_tx(tx
);
2926 * handle_stripe - do things to a stripe.
2928 * We lock the stripe and then examine the state of various bits
2929 * to see what needs to be done.
2931 * return some read request which now have data
2932 * return some write requests which are safely on disc
2933 * schedule a read on some buffers
2934 * schedule a write of some buffers
2935 * return confirmation of parity correctness
2937 * buffers are taken off read_list or write_list, and bh_cache buffers
2938 * get BH_Lock set before the stripe lock is released.
2942 static void handle_stripe5(struct stripe_head
*sh
)
2944 raid5_conf_t
*conf
= sh
->raid_conf
;
2945 int disks
= sh
->disks
, i
;
2946 struct bio
*return_bi
= NULL
;
2947 struct stripe_head_state s
;
2949 mdk_rdev_t
*blocked_rdev
= NULL
;
2951 int dec_preread_active
= 0;
2953 memset(&s
, 0, sizeof(s
));
2954 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d check:%d "
2955 "reconstruct:%d\n", (unsigned long long)sh
->sector
, sh
->state
,
2956 atomic_read(&sh
->count
), sh
->pd_idx
, sh
->check_state
,
2957 sh
->reconstruct_state
);
2959 spin_lock(&sh
->lock
);
2960 clear_bit(STRIPE_HANDLE
, &sh
->state
);
2961 clear_bit(STRIPE_DELAYED
, &sh
->state
);
2963 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
2964 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
2965 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
2967 /* Now to look around and see what can be done */
2969 for (i
=disks
; i
--; ) {
2973 clear_bit(R5_Insync
, &dev
->flags
);
2975 pr_debug("check %d: state 0x%lx toread %p read %p write %p "
2976 "written %p\n", i
, dev
->flags
, dev
->toread
, dev
->read
,
2977 dev
->towrite
, dev
->written
);
2979 /* maybe we can request a biofill operation
2981 * new wantfill requests are only permitted while
2982 * ops_complete_biofill is guaranteed to be inactive
2984 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
2985 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
2986 set_bit(R5_Wantfill
, &dev
->flags
);
2988 /* now count some things */
2989 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
2990 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
2991 if (test_bit(R5_Wantcompute
, &dev
->flags
)) s
.compute
++;
2993 if (test_bit(R5_Wantfill
, &dev
->flags
))
2995 else if (dev
->toread
)
2999 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3004 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3005 if (blocked_rdev
== NULL
&&
3006 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3007 blocked_rdev
= rdev
;
3008 atomic_inc(&rdev
->nr_pending
);
3010 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3011 /* The ReadError flag will just be confusing now */
3012 clear_bit(R5_ReadError
, &dev
->flags
);
3013 clear_bit(R5_ReWrite
, &dev
->flags
);
3015 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3016 || test_bit(R5_ReadError
, &dev
->flags
)) {
3020 set_bit(R5_Insync
, &dev
->flags
);
3024 if (unlikely(blocked_rdev
)) {
3025 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3026 s
.to_write
|| s
.written
) {
3027 set_bit(STRIPE_HANDLE
, &sh
->state
);
3030 /* There is nothing for the blocked_rdev to block */
3031 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3032 blocked_rdev
= NULL
;
3035 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3036 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3037 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3040 pr_debug("locked=%d uptodate=%d to_read=%d"
3041 " to_write=%d failed=%d failed_num=%d\n",
3042 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
,
3043 s
.failed
, s
.failed_num
);
3044 /* check if the array has lost two devices and, if so, some requests might
3047 if (s
.failed
> 1 && s
.to_read
+s
.to_write
+s
.written
)
3048 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3049 if (s
.failed
> 1 && s
.syncing
) {
3050 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3051 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3055 /* might be able to return some write requests if the parity block
3056 * is safe, or on a failed drive
3058 dev
= &sh
->dev
[sh
->pd_idx
];
3060 ((test_bit(R5_Insync
, &dev
->flags
) &&
3061 !test_bit(R5_LOCKED
, &dev
->flags
) &&
3062 test_bit(R5_UPTODATE
, &dev
->flags
)) ||
3063 (s
.failed
== 1 && s
.failed_num
== sh
->pd_idx
)))
3064 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3066 /* Now we might consider reading some blocks, either to check/generate
3067 * parity, or to satisfy requests
3068 * or to load a block that is being partially written.
3070 if (s
.to_read
|| s
.non_overwrite
||
3071 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3072 handle_stripe_fill5(sh
, &s
, disks
);
3074 /* Now we check to see if any write operations have recently
3078 if (sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
)
3080 if (sh
->reconstruct_state
== reconstruct_state_drain_result
||
3081 sh
->reconstruct_state
== reconstruct_state_prexor_drain_result
) {
3082 sh
->reconstruct_state
= reconstruct_state_idle
;
3084 /* All the 'written' buffers and the parity block are ready to
3085 * be written back to disk
3087 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3088 for (i
= disks
; i
--; ) {
3090 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3091 (i
== sh
->pd_idx
|| dev
->written
)) {
3092 pr_debug("Writing block %d\n", i
);
3093 set_bit(R5_Wantwrite
, &dev
->flags
);
3096 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3097 (i
== sh
->pd_idx
&& s
.failed
== 0))
3098 set_bit(STRIPE_INSYNC
, &sh
->state
);
3101 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3102 dec_preread_active
= 1;
3105 /* Now to consider new write requests and what else, if anything
3106 * should be read. We do not handle new writes when:
3107 * 1/ A 'write' operation (copy+xor) is already in flight.
3108 * 2/ A 'check' operation is in flight, as it may clobber the parity
3111 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3112 handle_stripe_dirtying5(conf
, sh
, &s
, disks
);
3114 /* maybe we need to check and possibly fix the parity for this stripe
3115 * Any reads will already have been scheduled, so we just see if enough
3116 * data is available. The parity check is held off while parity
3117 * dependent operations are in flight.
3119 if (sh
->check_state
||
3120 (s
.syncing
&& s
.locked
== 0 &&
3121 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3122 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3123 handle_parity_checks5(conf
, sh
, &s
, disks
);
3125 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3126 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3127 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3130 /* If the failed drive is just a ReadError, then we might need to progress
3131 * the repair/check process
3133 if (s
.failed
== 1 && !conf
->mddev
->ro
&&
3134 test_bit(R5_ReadError
, &sh
->dev
[s
.failed_num
].flags
)
3135 && !test_bit(R5_LOCKED
, &sh
->dev
[s
.failed_num
].flags
)
3136 && test_bit(R5_UPTODATE
, &sh
->dev
[s
.failed_num
].flags
)
3138 dev
= &sh
->dev
[s
.failed_num
];
3139 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3140 set_bit(R5_Wantwrite
, &dev
->flags
);
3141 set_bit(R5_ReWrite
, &dev
->flags
);
3142 set_bit(R5_LOCKED
, &dev
->flags
);
3145 /* let's read it back */
3146 set_bit(R5_Wantread
, &dev
->flags
);
3147 set_bit(R5_LOCKED
, &dev
->flags
);
3152 /* Finish reconstruct operations initiated by the expansion process */
3153 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3154 struct stripe_head
*sh2
3155 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3156 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3157 /* sh cannot be written until sh2 has been read.
3158 * so arrange for sh to be delayed a little
3160 set_bit(STRIPE_DELAYED
, &sh
->state
);
3161 set_bit(STRIPE_HANDLE
, &sh
->state
);
3162 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3164 atomic_inc(&conf
->preread_active_stripes
);
3165 release_stripe(sh2
);
3169 release_stripe(sh2
);
3171 sh
->reconstruct_state
= reconstruct_state_idle
;
3172 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3173 for (i
= conf
->raid_disks
; i
--; ) {
3174 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3175 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3180 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3181 !sh
->reconstruct_state
) {
3182 /* Need to write out all blocks after computing parity */
3183 sh
->disks
= conf
->raid_disks
;
3184 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3185 schedule_reconstruction(sh
, &s
, 1, 1);
3186 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3187 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3188 atomic_dec(&conf
->reshape_stripes
);
3189 wake_up(&conf
->wait_for_overlap
);
3190 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3193 if (s
.expanding
&& s
.locked
== 0 &&
3194 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3195 handle_stripe_expansion(conf
, sh
, NULL
);
3198 spin_unlock(&sh
->lock
);
3200 /* wait for this device to become unblocked */
3201 if (unlikely(blocked_rdev
))
3202 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3205 raid_run_ops(sh
, s
.ops_request
);
3209 if (dec_preread_active
) {
3210 /* We delay this until after ops_run_io so that if make_request
3211 * is waiting on a barrier, it won't continue until the writes
3212 * have actually been submitted.
3214 atomic_dec(&conf
->preread_active_stripes
);
3215 if (atomic_read(&conf
->preread_active_stripes
) <
3217 md_wakeup_thread(conf
->mddev
->thread
);
3219 return_io(return_bi
);
3222 static void handle_stripe6(struct stripe_head
*sh
)
3224 raid5_conf_t
*conf
= sh
->raid_conf
;
3225 int disks
= sh
->disks
;
3226 struct bio
*return_bi
= NULL
;
3227 int i
, pd_idx
= sh
->pd_idx
, qd_idx
= sh
->qd_idx
;
3228 struct stripe_head_state s
;
3229 struct r6_state r6s
;
3230 struct r5dev
*dev
, *pdev
, *qdev
;
3231 mdk_rdev_t
*blocked_rdev
= NULL
;
3232 int dec_preread_active
= 0;
3234 pr_debug("handling stripe %llu, state=%#lx cnt=%d, "
3235 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n",
3236 (unsigned long long)sh
->sector
, sh
->state
,
3237 atomic_read(&sh
->count
), pd_idx
, qd_idx
,
3238 sh
->check_state
, sh
->reconstruct_state
);
3239 memset(&s
, 0, sizeof(s
));
3241 spin_lock(&sh
->lock
);
3242 clear_bit(STRIPE_HANDLE
, &sh
->state
);
3243 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3245 s
.syncing
= test_bit(STRIPE_SYNCING
, &sh
->state
);
3246 s
.expanding
= test_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
3247 s
.expanded
= test_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3248 /* Now to look around and see what can be done */
3251 for (i
=disks
; i
--; ) {
3254 clear_bit(R5_Insync
, &dev
->flags
);
3256 pr_debug("check %d: state 0x%lx read %p write %p written %p\n",
3257 i
, dev
->flags
, dev
->toread
, dev
->towrite
, dev
->written
);
3258 /* maybe we can reply to a read
3260 * new wantfill requests are only permitted while
3261 * ops_complete_biofill is guaranteed to be inactive
3263 if (test_bit(R5_UPTODATE
, &dev
->flags
) && dev
->toread
&&
3264 !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
))
3265 set_bit(R5_Wantfill
, &dev
->flags
);
3267 /* now count some things */
3268 if (test_bit(R5_LOCKED
, &dev
->flags
)) s
.locked
++;
3269 if (test_bit(R5_UPTODATE
, &dev
->flags
)) s
.uptodate
++;
3270 if (test_bit(R5_Wantcompute
, &dev
->flags
)) {
3272 BUG_ON(s
.compute
> 2);
3275 if (test_bit(R5_Wantfill
, &dev
->flags
)) {
3277 } else if (dev
->toread
)
3281 if (!test_bit(R5_OVERWRITE
, &dev
->flags
))
3286 rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3287 if (blocked_rdev
== NULL
&&
3288 rdev
&& unlikely(test_bit(Blocked
, &rdev
->flags
))) {
3289 blocked_rdev
= rdev
;
3290 atomic_inc(&rdev
->nr_pending
);
3292 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)) {
3293 /* The ReadError flag will just be confusing now */
3294 clear_bit(R5_ReadError
, &dev
->flags
);
3295 clear_bit(R5_ReWrite
, &dev
->flags
);
3297 if (!rdev
|| !test_bit(In_sync
, &rdev
->flags
)
3298 || test_bit(R5_ReadError
, &dev
->flags
)) {
3300 r6s
.failed_num
[s
.failed
] = i
;
3303 set_bit(R5_Insync
, &dev
->flags
);
3307 if (unlikely(blocked_rdev
)) {
3308 if (s
.syncing
|| s
.expanding
|| s
.expanded
||
3309 s
.to_write
|| s
.written
) {
3310 set_bit(STRIPE_HANDLE
, &sh
->state
);
3313 /* There is nothing for the blocked_rdev to block */
3314 rdev_dec_pending(blocked_rdev
, conf
->mddev
);
3315 blocked_rdev
= NULL
;
3318 if (s
.to_fill
&& !test_bit(STRIPE_BIOFILL_RUN
, &sh
->state
)) {
3319 set_bit(STRIPE_OP_BIOFILL
, &s
.ops_request
);
3320 set_bit(STRIPE_BIOFILL_RUN
, &sh
->state
);
3323 pr_debug("locked=%d uptodate=%d to_read=%d"
3324 " to_write=%d failed=%d failed_num=%d,%d\n",
3325 s
.locked
, s
.uptodate
, s
.to_read
, s
.to_write
, s
.failed
,
3326 r6s
.failed_num
[0], r6s
.failed_num
[1]);
3327 /* check if the array has lost >2 devices and, if so, some requests
3328 * might need to be failed
3330 if (s
.failed
> 2 && s
.to_read
+s
.to_write
+s
.written
)
3331 handle_failed_stripe(conf
, sh
, &s
, disks
, &return_bi
);
3332 if (s
.failed
> 2 && s
.syncing
) {
3333 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,0);
3334 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3339 * might be able to return some write requests if the parity blocks
3340 * are safe, or on a failed drive
3342 pdev
= &sh
->dev
[pd_idx
];
3343 r6s
.p_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == pd_idx
)
3344 || (s
.failed
>= 2 && r6s
.failed_num
[1] == pd_idx
);
3345 qdev
= &sh
->dev
[qd_idx
];
3346 r6s
.q_failed
= (s
.failed
>= 1 && r6s
.failed_num
[0] == qd_idx
)
3347 || (s
.failed
>= 2 && r6s
.failed_num
[1] == qd_idx
);
3350 ( r6s
.p_failed
|| ((test_bit(R5_Insync
, &pdev
->flags
)
3351 && !test_bit(R5_LOCKED
, &pdev
->flags
)
3352 && test_bit(R5_UPTODATE
, &pdev
->flags
)))) &&
3353 ( r6s
.q_failed
|| ((test_bit(R5_Insync
, &qdev
->flags
)
3354 && !test_bit(R5_LOCKED
, &qdev
->flags
)
3355 && test_bit(R5_UPTODATE
, &qdev
->flags
)))))
3356 handle_stripe_clean_event(conf
, sh
, disks
, &return_bi
);
3358 /* Now we might consider reading some blocks, either to check/generate
3359 * parity, or to satisfy requests
3360 * or to load a block that is being partially written.
3362 if (s
.to_read
|| s
.non_overwrite
|| (s
.to_write
&& s
.failed
) ||
3363 (s
.syncing
&& (s
.uptodate
+ s
.compute
< disks
)) || s
.expanding
)
3364 handle_stripe_fill6(sh
, &s
, &r6s
, disks
);
3366 /* Now we check to see if any write operations have recently
3369 if (sh
->reconstruct_state
== reconstruct_state_drain_result
) {
3371 sh
->reconstruct_state
= reconstruct_state_idle
;
3372 /* All the 'written' buffers and the parity blocks are ready to
3373 * be written back to disk
3375 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[sh
->pd_idx
].flags
));
3376 BUG_ON(!test_bit(R5_UPTODATE
, &sh
->dev
[qd_idx
].flags
));
3377 for (i
= disks
; i
--; ) {
3379 if (test_bit(R5_LOCKED
, &dev
->flags
) &&
3380 (i
== sh
->pd_idx
|| i
== qd_idx
||
3382 pr_debug("Writing block %d\n", i
);
3383 BUG_ON(!test_bit(R5_UPTODATE
, &dev
->flags
));
3384 set_bit(R5_Wantwrite
, &dev
->flags
);
3385 if (!test_bit(R5_Insync
, &dev
->flags
) ||
3386 ((i
== sh
->pd_idx
|| i
== qd_idx
) &&
3388 set_bit(STRIPE_INSYNC
, &sh
->state
);
3391 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3392 dec_preread_active
= 1;
3395 /* Now to consider new write requests and what else, if anything
3396 * should be read. We do not handle new writes when:
3397 * 1/ A 'write' operation (copy+gen_syndrome) is already in flight.
3398 * 2/ A 'check' operation is in flight, as it may clobber the parity
3401 if (s
.to_write
&& !sh
->reconstruct_state
&& !sh
->check_state
)
3402 handle_stripe_dirtying6(conf
, sh
, &s
, &r6s
, disks
);
3404 /* maybe we need to check and possibly fix the parity for this stripe
3405 * Any reads will already have been scheduled, so we just see if enough
3406 * data is available. The parity check is held off while parity
3407 * dependent operations are in flight.
3409 if (sh
->check_state
||
3410 (s
.syncing
&& s
.locked
== 0 &&
3411 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
) &&
3412 !test_bit(STRIPE_INSYNC
, &sh
->state
)))
3413 handle_parity_checks6(conf
, sh
, &s
, &r6s
, disks
);
3415 if (s
.syncing
&& s
.locked
== 0 && test_bit(STRIPE_INSYNC
, &sh
->state
)) {
3416 md_done_sync(conf
->mddev
, STRIPE_SECTORS
,1);
3417 clear_bit(STRIPE_SYNCING
, &sh
->state
);
3420 /* If the failed drives are just a ReadError, then we might need
3421 * to progress the repair/check process
3423 if (s
.failed
<= 2 && !conf
->mddev
->ro
)
3424 for (i
= 0; i
< s
.failed
; i
++) {
3425 dev
= &sh
->dev
[r6s
.failed_num
[i
]];
3426 if (test_bit(R5_ReadError
, &dev
->flags
)
3427 && !test_bit(R5_LOCKED
, &dev
->flags
)
3428 && test_bit(R5_UPTODATE
, &dev
->flags
)
3430 if (!test_bit(R5_ReWrite
, &dev
->flags
)) {
3431 set_bit(R5_Wantwrite
, &dev
->flags
);
3432 set_bit(R5_ReWrite
, &dev
->flags
);
3433 set_bit(R5_LOCKED
, &dev
->flags
);
3436 /* let's read it back */
3437 set_bit(R5_Wantread
, &dev
->flags
);
3438 set_bit(R5_LOCKED
, &dev
->flags
);
3444 /* Finish reconstruct operations initiated by the expansion process */
3445 if (sh
->reconstruct_state
== reconstruct_state_result
) {
3446 sh
->reconstruct_state
= reconstruct_state_idle
;
3447 clear_bit(STRIPE_EXPANDING
, &sh
->state
);
3448 for (i
= conf
->raid_disks
; i
--; ) {
3449 set_bit(R5_Wantwrite
, &sh
->dev
[i
].flags
);
3450 set_bit(R5_LOCKED
, &sh
->dev
[i
].flags
);
3455 if (s
.expanded
&& test_bit(STRIPE_EXPANDING
, &sh
->state
) &&
3456 !sh
->reconstruct_state
) {
3457 struct stripe_head
*sh2
3458 = get_active_stripe(conf
, sh
->sector
, 1, 1, 1);
3459 if (sh2
&& test_bit(STRIPE_EXPAND_SOURCE
, &sh2
->state
)) {
3460 /* sh cannot be written until sh2 has been read.
3461 * so arrange for sh to be delayed a little
3463 set_bit(STRIPE_DELAYED
, &sh
->state
);
3464 set_bit(STRIPE_HANDLE
, &sh
->state
);
3465 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
,
3467 atomic_inc(&conf
->preread_active_stripes
);
3468 release_stripe(sh2
);
3472 release_stripe(sh2
);
3474 /* Need to write out all blocks after computing P&Q */
3475 sh
->disks
= conf
->raid_disks
;
3476 stripe_set_idx(sh
->sector
, conf
, 0, sh
);
3477 schedule_reconstruction(sh
, &s
, 1, 1);
3478 } else if (s
.expanded
&& !sh
->reconstruct_state
&& s
.locked
== 0) {
3479 clear_bit(STRIPE_EXPAND_READY
, &sh
->state
);
3480 atomic_dec(&conf
->reshape_stripes
);
3481 wake_up(&conf
->wait_for_overlap
);
3482 md_done_sync(conf
->mddev
, STRIPE_SECTORS
, 1);
3485 if (s
.expanding
&& s
.locked
== 0 &&
3486 !test_bit(STRIPE_COMPUTE_RUN
, &sh
->state
))
3487 handle_stripe_expansion(conf
, sh
, &r6s
);
3490 spin_unlock(&sh
->lock
);
3492 /* wait for this device to become unblocked */
3493 if (unlikely(blocked_rdev
))
3494 md_wait_for_blocked_rdev(blocked_rdev
, conf
->mddev
);
3497 raid_run_ops(sh
, s
.ops_request
);
3502 if (dec_preread_active
) {
3503 /* We delay this until after ops_run_io so that if make_request
3504 * is waiting on a barrier, it won't continue until the writes
3505 * have actually been submitted.
3507 atomic_dec(&conf
->preread_active_stripes
);
3508 if (atomic_read(&conf
->preread_active_stripes
) <
3510 md_wakeup_thread(conf
->mddev
->thread
);
3513 return_io(return_bi
);
3516 static void handle_stripe(struct stripe_head
*sh
)
3518 if (sh
->raid_conf
->level
== 6)
3524 static void raid5_activate_delayed(raid5_conf_t
*conf
)
3526 if (atomic_read(&conf
->preread_active_stripes
) < IO_THRESHOLD
) {
3527 while (!list_empty(&conf
->delayed_list
)) {
3528 struct list_head
*l
= conf
->delayed_list
.next
;
3529 struct stripe_head
*sh
;
3530 sh
= list_entry(l
, struct stripe_head
, lru
);
3532 clear_bit(STRIPE_DELAYED
, &sh
->state
);
3533 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
3534 atomic_inc(&conf
->preread_active_stripes
);
3535 list_add_tail(&sh
->lru
, &conf
->hold_list
);
3538 blk_plug_device(conf
->mddev
->queue
);
3541 static void activate_bit_delay(raid5_conf_t
*conf
)
3543 /* device_lock is held */
3544 struct list_head head
;
3545 list_add(&head
, &conf
->bitmap_list
);
3546 list_del_init(&conf
->bitmap_list
);
3547 while (!list_empty(&head
)) {
3548 struct stripe_head
*sh
= list_entry(head
.next
, struct stripe_head
, lru
);
3549 list_del_init(&sh
->lru
);
3550 atomic_inc(&sh
->count
);
3551 __release_stripe(conf
, sh
);
3555 static void unplug_slaves(mddev_t
*mddev
)
3557 raid5_conf_t
*conf
= mddev
->private;
3559 int devs
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
3562 for (i
= 0; i
< devs
; i
++) {
3563 mdk_rdev_t
*rdev
= rcu_dereference(conf
->disks
[i
].rdev
);
3564 if (rdev
&& !test_bit(Faulty
, &rdev
->flags
) && atomic_read(&rdev
->nr_pending
)) {
3565 struct request_queue
*r_queue
= bdev_get_queue(rdev
->bdev
);
3567 atomic_inc(&rdev
->nr_pending
);
3570 blk_unplug(r_queue
);
3572 rdev_dec_pending(rdev
, mddev
);
3579 static void raid5_unplug_device(struct request_queue
*q
)
3581 mddev_t
*mddev
= q
->queuedata
;
3582 raid5_conf_t
*conf
= mddev
->private;
3583 unsigned long flags
;
3585 spin_lock_irqsave(&conf
->device_lock
, flags
);
3587 if (blk_remove_plug(q
)) {
3589 raid5_activate_delayed(conf
);
3591 md_wakeup_thread(mddev
->thread
);
3593 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3595 unplug_slaves(mddev
);
3598 static int raid5_congested(void *data
, int bits
)
3600 mddev_t
*mddev
= data
;
3601 raid5_conf_t
*conf
= mddev
->private;
3603 /* No difference between reads and writes. Just check
3604 * how busy the stripe_cache is
3607 if (mddev_congested(mddev
, bits
))
3609 if (conf
->inactive_blocked
)
3613 if (list_empty_careful(&conf
->inactive_list
))
3619 /* We want read requests to align with chunks where possible,
3620 * but write requests don't need to.
3622 static int raid5_mergeable_bvec(struct request_queue
*q
,
3623 struct bvec_merge_data
*bvm
,
3624 struct bio_vec
*biovec
)
3626 mddev_t
*mddev
= q
->queuedata
;
3627 sector_t sector
= bvm
->bi_sector
+ get_start_sect(bvm
->bi_bdev
);
3629 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3630 unsigned int bio_sectors
= bvm
->bi_size
>> 9;
3632 if ((bvm
->bi_rw
& 1) == WRITE
)
3633 return biovec
->bv_len
; /* always allow writes to be mergeable */
3635 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3636 chunk_sectors
= mddev
->new_chunk_sectors
;
3637 max
= (chunk_sectors
- ((sector
& (chunk_sectors
- 1)) + bio_sectors
)) << 9;
3638 if (max
< 0) max
= 0;
3639 if (max
<= biovec
->bv_len
&& bio_sectors
== 0)
3640 return biovec
->bv_len
;
3646 static int in_chunk_boundary(mddev_t
*mddev
, struct bio
*bio
)
3648 sector_t sector
= bio
->bi_sector
+ get_start_sect(bio
->bi_bdev
);
3649 unsigned int chunk_sectors
= mddev
->chunk_sectors
;
3650 unsigned int bio_sectors
= bio
->bi_size
>> 9;
3652 if (mddev
->new_chunk_sectors
< mddev
->chunk_sectors
)
3653 chunk_sectors
= mddev
->new_chunk_sectors
;
3654 return chunk_sectors
>=
3655 ((sector
& (chunk_sectors
- 1)) + bio_sectors
);
3659 * add bio to the retry LIFO ( in O(1) ... we are in interrupt )
3660 * later sampled by raid5d.
3662 static void add_bio_to_retry(struct bio
*bi
,raid5_conf_t
*conf
)
3664 unsigned long flags
;
3666 spin_lock_irqsave(&conf
->device_lock
, flags
);
3668 bi
->bi_next
= conf
->retry_read_aligned_list
;
3669 conf
->retry_read_aligned_list
= bi
;
3671 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
3672 md_wakeup_thread(conf
->mddev
->thread
);
3676 static struct bio
*remove_bio_from_retry(raid5_conf_t
*conf
)
3680 bi
= conf
->retry_read_aligned
;
3682 conf
->retry_read_aligned
= NULL
;
3685 bi
= conf
->retry_read_aligned_list
;
3687 conf
->retry_read_aligned_list
= bi
->bi_next
;
3690 * this sets the active strip count to 1 and the processed
3691 * strip count to zero (upper 8 bits)
3693 bi
->bi_phys_segments
= 1; /* biased count of active stripes */
3701 * The "raid5_align_endio" should check if the read succeeded and if it
3702 * did, call bio_endio on the original bio (having bio_put the new bio
3704 * If the read failed..
3706 static void raid5_align_endio(struct bio
*bi
, int error
)
3708 struct bio
* raid_bi
= bi
->bi_private
;
3711 int uptodate
= test_bit(BIO_UPTODATE
, &bi
->bi_flags
);
3716 mddev
= raid_bi
->bi_bdev
->bd_disk
->queue
->queuedata
;
3717 conf
= mddev
->private;
3718 rdev
= (void*)raid_bi
->bi_next
;
3719 raid_bi
->bi_next
= NULL
;
3721 rdev_dec_pending(rdev
, conf
->mddev
);
3723 if (!error
&& uptodate
) {
3724 bio_endio(raid_bi
, 0);
3725 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
3726 wake_up(&conf
->wait_for_stripe
);
3731 pr_debug("raid5_align_endio : io error...handing IO for a retry\n");
3733 add_bio_to_retry(raid_bi
, conf
);
3736 static int bio_fits_rdev(struct bio
*bi
)
3738 struct request_queue
*q
= bdev_get_queue(bi
->bi_bdev
);
3740 if ((bi
->bi_size
>>9) > queue_max_sectors(q
))
3742 blk_recount_segments(q
, bi
);
3743 if (bi
->bi_phys_segments
> queue_max_segments(q
))
3746 if (q
->merge_bvec_fn
)
3747 /* it's too hard to apply the merge_bvec_fn at this stage,
3756 static int chunk_aligned_read(struct request_queue
*q
, struct bio
* raid_bio
)
3758 mddev_t
*mddev
= q
->queuedata
;
3759 raid5_conf_t
*conf
= mddev
->private;
3761 struct bio
* align_bi
;
3764 if (!in_chunk_boundary(mddev
, raid_bio
)) {
3765 pr_debug("chunk_aligned_read : non aligned\n");
3769 * use bio_clone to make a copy of the bio
3771 align_bi
= bio_clone(raid_bio
, GFP_NOIO
);
3775 * set bi_end_io to a new function, and set bi_private to the
3778 align_bi
->bi_end_io
= raid5_align_endio
;
3779 align_bi
->bi_private
= raid_bio
;
3783 align_bi
->bi_sector
= raid5_compute_sector(conf
, raid_bio
->bi_sector
,
3788 rdev
= rcu_dereference(conf
->disks
[dd_idx
].rdev
);
3789 if (rdev
&& test_bit(In_sync
, &rdev
->flags
)) {
3790 atomic_inc(&rdev
->nr_pending
);
3792 raid_bio
->bi_next
= (void*)rdev
;
3793 align_bi
->bi_bdev
= rdev
->bdev
;
3794 align_bi
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
3795 align_bi
->bi_sector
+= rdev
->data_offset
;
3797 if (!bio_fits_rdev(align_bi
)) {
3798 /* too big in some way */
3800 rdev_dec_pending(rdev
, mddev
);
3804 spin_lock_irq(&conf
->device_lock
);
3805 wait_event_lock_irq(conf
->wait_for_stripe
,
3807 conf
->device_lock
, /* nothing */);
3808 atomic_inc(&conf
->active_aligned_reads
);
3809 spin_unlock_irq(&conf
->device_lock
);
3811 generic_make_request(align_bi
);
3820 /* __get_priority_stripe - get the next stripe to process
3822 * Full stripe writes are allowed to pass preread active stripes up until
3823 * the bypass_threshold is exceeded. In general the bypass_count
3824 * increments when the handle_list is handled before the hold_list; however, it
3825 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a
3826 * stripe with in flight i/o. The bypass_count will be reset when the
3827 * head of the hold_list has changed, i.e. the head was promoted to the
3830 static struct stripe_head
*__get_priority_stripe(raid5_conf_t
*conf
)
3832 struct stripe_head
*sh
;
3834 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n",
3836 list_empty(&conf
->handle_list
) ? "empty" : "busy",
3837 list_empty(&conf
->hold_list
) ? "empty" : "busy",
3838 atomic_read(&conf
->pending_full_writes
), conf
->bypass_count
);
3840 if (!list_empty(&conf
->handle_list
)) {
3841 sh
= list_entry(conf
->handle_list
.next
, typeof(*sh
), lru
);
3843 if (list_empty(&conf
->hold_list
))
3844 conf
->bypass_count
= 0;
3845 else if (!test_bit(STRIPE_IO_STARTED
, &sh
->state
)) {
3846 if (conf
->hold_list
.next
== conf
->last_hold
)
3847 conf
->bypass_count
++;
3849 conf
->last_hold
= conf
->hold_list
.next
;
3850 conf
->bypass_count
-= conf
->bypass_threshold
;
3851 if (conf
->bypass_count
< 0)
3852 conf
->bypass_count
= 0;
3855 } else if (!list_empty(&conf
->hold_list
) &&
3856 ((conf
->bypass_threshold
&&
3857 conf
->bypass_count
> conf
->bypass_threshold
) ||
3858 atomic_read(&conf
->pending_full_writes
) == 0)) {
3859 sh
= list_entry(conf
->hold_list
.next
,
3861 conf
->bypass_count
-= conf
->bypass_threshold
;
3862 if (conf
->bypass_count
< 0)
3863 conf
->bypass_count
= 0;
3867 list_del_init(&sh
->lru
);
3868 atomic_inc(&sh
->count
);
3869 BUG_ON(atomic_read(&sh
->count
) != 1);
3873 static int make_request(struct request_queue
*q
, struct bio
* bi
)
3875 mddev_t
*mddev
= q
->queuedata
;
3876 raid5_conf_t
*conf
= mddev
->private;
3878 sector_t new_sector
;
3879 sector_t logical_sector
, last_sector
;
3880 struct stripe_head
*sh
;
3881 const int rw
= bio_data_dir(bi
);
3884 if (unlikely(bio_rw_flagged(bi
, BIO_RW_BARRIER
))) {
3885 /* Drain all pending writes. We only really need
3886 * to ensure they have been submitted, but this is
3889 mddev
->pers
->quiesce(mddev
, 1);
3890 mddev
->pers
->quiesce(mddev
, 0);
3891 md_barrier_request(mddev
, bi
);
3895 md_write_start(mddev
, bi
);
3897 cpu
= part_stat_lock();
3898 part_stat_inc(cpu
, &mddev
->gendisk
->part0
, ios
[rw
]);
3899 part_stat_add(cpu
, &mddev
->gendisk
->part0
, sectors
[rw
],
3904 mddev
->reshape_position
== MaxSector
&&
3905 chunk_aligned_read(q
,bi
))
3908 logical_sector
= bi
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
3909 last_sector
= bi
->bi_sector
+ (bi
->bi_size
>>9);
3911 bi
->bi_phys_segments
= 1; /* over-loaded to count active stripes */
3913 for (;logical_sector
< last_sector
; logical_sector
+= STRIPE_SECTORS
) {
3915 int disks
, data_disks
;
3920 disks
= conf
->raid_disks
;
3921 prepare_to_wait(&conf
->wait_for_overlap
, &w
, TASK_UNINTERRUPTIBLE
);
3922 if (unlikely(conf
->reshape_progress
!= MaxSector
)) {
3923 /* spinlock is needed as reshape_progress may be
3924 * 64bit on a 32bit platform, and so it might be
3925 * possible to see a half-updated value
3926 * Ofcourse reshape_progress could change after
3927 * the lock is dropped, so once we get a reference
3928 * to the stripe that we think it is, we will have
3931 spin_lock_irq(&conf
->device_lock
);
3932 if (mddev
->delta_disks
< 0
3933 ? logical_sector
< conf
->reshape_progress
3934 : logical_sector
>= conf
->reshape_progress
) {
3935 disks
= conf
->previous_raid_disks
;
3938 if (mddev
->delta_disks
< 0
3939 ? logical_sector
< conf
->reshape_safe
3940 : logical_sector
>= conf
->reshape_safe
) {
3941 spin_unlock_irq(&conf
->device_lock
);
3946 spin_unlock_irq(&conf
->device_lock
);
3948 data_disks
= disks
- conf
->max_degraded
;
3950 new_sector
= raid5_compute_sector(conf
, logical_sector
,
3953 pr_debug("raid5: make_request, sector %llu logical %llu\n",
3954 (unsigned long long)new_sector
,
3955 (unsigned long long)logical_sector
);
3957 sh
= get_active_stripe(conf
, new_sector
, previous
,
3958 (bi
->bi_rw
&RWA_MASK
), 0);
3960 if (unlikely(previous
)) {
3961 /* expansion might have moved on while waiting for a
3962 * stripe, so we must do the range check again.
3963 * Expansion could still move past after this
3964 * test, but as we are holding a reference to
3965 * 'sh', we know that if that happens,
3966 * STRIPE_EXPANDING will get set and the expansion
3967 * won't proceed until we finish with the stripe.
3970 spin_lock_irq(&conf
->device_lock
);
3971 if (mddev
->delta_disks
< 0
3972 ? logical_sector
>= conf
->reshape_progress
3973 : logical_sector
< conf
->reshape_progress
)
3974 /* mismatch, need to try again */
3976 spin_unlock_irq(&conf
->device_lock
);
3984 if (bio_data_dir(bi
) == WRITE
&&
3985 logical_sector
>= mddev
->suspend_lo
&&
3986 logical_sector
< mddev
->suspend_hi
) {
3988 /* As the suspend_* range is controlled by
3989 * userspace, we want an interruptible
3992 flush_signals(current
);
3993 prepare_to_wait(&conf
->wait_for_overlap
,
3994 &w
, TASK_INTERRUPTIBLE
);
3995 if (logical_sector
>= mddev
->suspend_lo
&&
3996 logical_sector
< mddev
->suspend_hi
)
4001 if (test_bit(STRIPE_EXPANDING
, &sh
->state
) ||
4002 !add_stripe_bio(sh
, bi
, dd_idx
, (bi
->bi_rw
&RW_MASK
))) {
4003 /* Stripe is busy expanding or
4004 * add failed due to overlap. Flush everything
4007 raid5_unplug_device(mddev
->queue
);
4012 finish_wait(&conf
->wait_for_overlap
, &w
);
4013 set_bit(STRIPE_HANDLE
, &sh
->state
);
4014 clear_bit(STRIPE_DELAYED
, &sh
->state
);
4015 if (mddev
->barrier
&&
4016 !test_and_set_bit(STRIPE_PREREAD_ACTIVE
, &sh
->state
))
4017 atomic_inc(&conf
->preread_active_stripes
);
4020 /* cannot get stripe for read-ahead, just give-up */
4021 clear_bit(BIO_UPTODATE
, &bi
->bi_flags
);
4022 finish_wait(&conf
->wait_for_overlap
, &w
);
4027 spin_lock_irq(&conf
->device_lock
);
4028 remaining
= raid5_dec_bi_phys_segments(bi
);
4029 spin_unlock_irq(&conf
->device_lock
);
4030 if (remaining
== 0) {
4033 md_write_end(mddev
);
4038 if (mddev
->barrier
) {
4039 /* We need to wait for the stripes to all be handled.
4040 * So: wait for preread_active_stripes to drop to 0.
4042 wait_event(mddev
->thread
->wqueue
,
4043 atomic_read(&conf
->preread_active_stripes
) == 0);
4048 static sector_t
raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
);
4050 static sector_t
reshape_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
)
4052 /* reshaping is quite different to recovery/resync so it is
4053 * handled quite separately ... here.
4055 * On each call to sync_request, we gather one chunk worth of
4056 * destination stripes and flag them as expanding.
4057 * Then we find all the source stripes and request reads.
4058 * As the reads complete, handle_stripe will copy the data
4059 * into the destination stripe and release that stripe.
4061 raid5_conf_t
*conf
= mddev
->private;
4062 struct stripe_head
*sh
;
4063 sector_t first_sector
, last_sector
;
4064 int raid_disks
= conf
->previous_raid_disks
;
4065 int data_disks
= raid_disks
- conf
->max_degraded
;
4066 int new_data_disks
= conf
->raid_disks
- conf
->max_degraded
;
4069 sector_t writepos
, readpos
, safepos
;
4070 sector_t stripe_addr
;
4071 int reshape_sectors
;
4072 struct list_head stripes
;
4074 if (sector_nr
== 0) {
4075 /* If restarting in the middle, skip the initial sectors */
4076 if (mddev
->delta_disks
< 0 &&
4077 conf
->reshape_progress
< raid5_size(mddev
, 0, 0)) {
4078 sector_nr
= raid5_size(mddev
, 0, 0)
4079 - conf
->reshape_progress
;
4080 } else if (mddev
->delta_disks
>= 0 &&
4081 conf
->reshape_progress
> 0)
4082 sector_nr
= conf
->reshape_progress
;
4083 sector_div(sector_nr
, new_data_disks
);
4085 mddev
->curr_resync_completed
= sector_nr
;
4086 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4092 /* We need to process a full chunk at a time.
4093 * If old and new chunk sizes differ, we need to process the
4096 if (mddev
->new_chunk_sectors
> mddev
->chunk_sectors
)
4097 reshape_sectors
= mddev
->new_chunk_sectors
;
4099 reshape_sectors
= mddev
->chunk_sectors
;
4101 /* we update the metadata when there is more than 3Meg
4102 * in the block range (that is rather arbitrary, should
4103 * probably be time based) or when the data about to be
4104 * copied would over-write the source of the data at
4105 * the front of the range.
4106 * i.e. one new_stripe along from reshape_progress new_maps
4107 * to after where reshape_safe old_maps to
4109 writepos
= conf
->reshape_progress
;
4110 sector_div(writepos
, new_data_disks
);
4111 readpos
= conf
->reshape_progress
;
4112 sector_div(readpos
, data_disks
);
4113 safepos
= conf
->reshape_safe
;
4114 sector_div(safepos
, data_disks
);
4115 if (mddev
->delta_disks
< 0) {
4116 writepos
-= min_t(sector_t
, reshape_sectors
, writepos
);
4117 readpos
+= reshape_sectors
;
4118 safepos
+= reshape_sectors
;
4120 writepos
+= reshape_sectors
;
4121 readpos
-= min_t(sector_t
, reshape_sectors
, readpos
);
4122 safepos
-= min_t(sector_t
, reshape_sectors
, safepos
);
4125 /* 'writepos' is the most advanced device address we might write.
4126 * 'readpos' is the least advanced device address we might read.
4127 * 'safepos' is the least address recorded in the metadata as having
4129 * If 'readpos' is behind 'writepos', then there is no way that we can
4130 * ensure safety in the face of a crash - that must be done by userspace
4131 * making a backup of the data. So in that case there is no particular
4132 * rush to update metadata.
4133 * Otherwise if 'safepos' is behind 'writepos', then we really need to
4134 * update the metadata to advance 'safepos' to match 'readpos' so that
4135 * we can be safe in the event of a crash.
4136 * So we insist on updating metadata if safepos is behind writepos and
4137 * readpos is beyond writepos.
4138 * In any case, update the metadata every 10 seconds.
4139 * Maybe that number should be configurable, but I'm not sure it is
4140 * worth it.... maybe it could be a multiple of safemode_delay???
4142 if ((mddev
->delta_disks
< 0
4143 ? (safepos
> writepos
&& readpos
< writepos
)
4144 : (safepos
< writepos
&& readpos
> writepos
)) ||
4145 time_after(jiffies
, conf
->reshape_checkpoint
+ 10*HZ
)) {
4146 /* Cannot proceed until we've updated the superblock... */
4147 wait_event(conf
->wait_for_overlap
,
4148 atomic_read(&conf
->reshape_stripes
)==0);
4149 mddev
->reshape_position
= conf
->reshape_progress
;
4150 mddev
->curr_resync_completed
= mddev
->curr_resync
;
4151 conf
->reshape_checkpoint
= jiffies
;
4152 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4153 md_wakeup_thread(mddev
->thread
);
4154 wait_event(mddev
->sb_wait
, mddev
->flags
== 0 ||
4155 kthread_should_stop());
4156 spin_lock_irq(&conf
->device_lock
);
4157 conf
->reshape_safe
= mddev
->reshape_position
;
4158 spin_unlock_irq(&conf
->device_lock
);
4159 wake_up(&conf
->wait_for_overlap
);
4160 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4163 if (mddev
->delta_disks
< 0) {
4164 BUG_ON(conf
->reshape_progress
== 0);
4165 stripe_addr
= writepos
;
4166 BUG_ON((mddev
->dev_sectors
&
4167 ~((sector_t
)reshape_sectors
- 1))
4168 - reshape_sectors
- stripe_addr
4171 BUG_ON(writepos
!= sector_nr
+ reshape_sectors
);
4172 stripe_addr
= sector_nr
;
4174 INIT_LIST_HEAD(&stripes
);
4175 for (i
= 0; i
< reshape_sectors
; i
+= STRIPE_SECTORS
) {
4177 int skipped_disk
= 0;
4178 sh
= get_active_stripe(conf
, stripe_addr
+i
, 0, 0, 1);
4179 set_bit(STRIPE_EXPANDING
, &sh
->state
);
4180 atomic_inc(&conf
->reshape_stripes
);
4181 /* If any of this stripe is beyond the end of the old
4182 * array, then we need to zero those blocks
4184 for (j
=sh
->disks
; j
--;) {
4186 if (j
== sh
->pd_idx
)
4188 if (conf
->level
== 6 &&
4191 s
= compute_blocknr(sh
, j
, 0);
4192 if (s
< raid5_size(mddev
, 0, 0)) {
4196 memset(page_address(sh
->dev
[j
].page
), 0, STRIPE_SIZE
);
4197 set_bit(R5_Expanded
, &sh
->dev
[j
].flags
);
4198 set_bit(R5_UPTODATE
, &sh
->dev
[j
].flags
);
4200 if (!skipped_disk
) {
4201 set_bit(STRIPE_EXPAND_READY
, &sh
->state
);
4202 set_bit(STRIPE_HANDLE
, &sh
->state
);
4204 list_add(&sh
->lru
, &stripes
);
4206 spin_lock_irq(&conf
->device_lock
);
4207 if (mddev
->delta_disks
< 0)
4208 conf
->reshape_progress
-= reshape_sectors
* new_data_disks
;
4210 conf
->reshape_progress
+= reshape_sectors
* new_data_disks
;
4211 spin_unlock_irq(&conf
->device_lock
);
4212 /* Ok, those stripe are ready. We can start scheduling
4213 * reads on the source stripes.
4214 * The source stripes are determined by mapping the first and last
4215 * block on the destination stripes.
4218 raid5_compute_sector(conf
, stripe_addr
*(new_data_disks
),
4221 raid5_compute_sector(conf
, ((stripe_addr
+reshape_sectors
)
4222 * new_data_disks
- 1),
4224 if (last_sector
>= mddev
->dev_sectors
)
4225 last_sector
= mddev
->dev_sectors
- 1;
4226 while (first_sector
<= last_sector
) {
4227 sh
= get_active_stripe(conf
, first_sector
, 1, 0, 1);
4228 set_bit(STRIPE_EXPAND_SOURCE
, &sh
->state
);
4229 set_bit(STRIPE_HANDLE
, &sh
->state
);
4231 first_sector
+= STRIPE_SECTORS
;
4233 /* Now that the sources are clearly marked, we can release
4234 * the destination stripes
4236 while (!list_empty(&stripes
)) {
4237 sh
= list_entry(stripes
.next
, struct stripe_head
, lru
);
4238 list_del_init(&sh
->lru
);
4241 /* If this takes us to the resync_max point where we have to pause,
4242 * then we need to write out the superblock.
4244 sector_nr
+= reshape_sectors
;
4245 if ((sector_nr
- mddev
->curr_resync_completed
) * 2
4246 >= mddev
->resync_max
- mddev
->curr_resync_completed
) {
4247 /* Cannot proceed until we've updated the superblock... */
4248 wait_event(conf
->wait_for_overlap
,
4249 atomic_read(&conf
->reshape_stripes
) == 0);
4250 mddev
->reshape_position
= conf
->reshape_progress
;
4251 mddev
->curr_resync_completed
= mddev
->curr_resync
+ reshape_sectors
;
4252 conf
->reshape_checkpoint
= jiffies
;
4253 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
4254 md_wakeup_thread(mddev
->thread
);
4255 wait_event(mddev
->sb_wait
,
4256 !test_bit(MD_CHANGE_DEVS
, &mddev
->flags
)
4257 || kthread_should_stop());
4258 spin_lock_irq(&conf
->device_lock
);
4259 conf
->reshape_safe
= mddev
->reshape_position
;
4260 spin_unlock_irq(&conf
->device_lock
);
4261 wake_up(&conf
->wait_for_overlap
);
4262 sysfs_notify(&mddev
->kobj
, NULL
, "sync_completed");
4264 return reshape_sectors
;
4267 /* FIXME go_faster isn't used */
4268 static inline sector_t
sync_request(mddev_t
*mddev
, sector_t sector_nr
, int *skipped
, int go_faster
)
4270 raid5_conf_t
*conf
= mddev
->private;
4271 struct stripe_head
*sh
;
4272 sector_t max_sector
= mddev
->dev_sectors
;
4274 int still_degraded
= 0;
4277 if (sector_nr
>= max_sector
) {
4278 /* just being told to finish up .. nothing much to do */
4279 unplug_slaves(mddev
);
4281 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
)) {
4286 if (mddev
->curr_resync
< max_sector
) /* aborted */
4287 bitmap_end_sync(mddev
->bitmap
, mddev
->curr_resync
,
4289 else /* completed sync */
4291 bitmap_close_sync(mddev
->bitmap
);
4296 /* Allow raid5_quiesce to complete */
4297 wait_event(conf
->wait_for_overlap
, conf
->quiesce
!= 2);
4299 if (test_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
))
4300 return reshape_request(mddev
, sector_nr
, skipped
);
4302 /* No need to check resync_max as we never do more than one
4303 * stripe, and as resync_max will always be on a chunk boundary,
4304 * if the check in md_do_sync didn't fire, there is no chance
4305 * of overstepping resync_max here
4308 /* if there is too many failed drives and we are trying
4309 * to resync, then assert that we are finished, because there is
4310 * nothing we can do.
4312 if (mddev
->degraded
>= conf
->max_degraded
&&
4313 test_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
)) {
4314 sector_t rv
= mddev
->dev_sectors
- sector_nr
;
4318 if (!bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, 1) &&
4319 !test_bit(MD_RECOVERY_REQUESTED
, &mddev
->recovery
) &&
4320 !conf
->fullsync
&& sync_blocks
>= STRIPE_SECTORS
) {
4321 /* we can skip this block, and probably more */
4322 sync_blocks
/= STRIPE_SECTORS
;
4324 return sync_blocks
* STRIPE_SECTORS
; /* keep things rounded to whole stripes */
4328 bitmap_cond_end_sync(mddev
->bitmap
, sector_nr
);
4330 sh
= get_active_stripe(conf
, sector_nr
, 0, 1, 0);
4332 sh
= get_active_stripe(conf
, sector_nr
, 0, 0, 0);
4333 /* make sure we don't swamp the stripe cache if someone else
4334 * is trying to get access
4336 schedule_timeout_uninterruptible(1);
4338 /* Need to check if array will still be degraded after recovery/resync
4339 * We don't need to check the 'failed' flag as when that gets set,
4342 for (i
= 0; i
< conf
->raid_disks
; i
++)
4343 if (conf
->disks
[i
].rdev
== NULL
)
4346 bitmap_start_sync(mddev
->bitmap
, sector_nr
, &sync_blocks
, still_degraded
);
4348 spin_lock(&sh
->lock
);
4349 set_bit(STRIPE_SYNCING
, &sh
->state
);
4350 clear_bit(STRIPE_INSYNC
, &sh
->state
);
4351 spin_unlock(&sh
->lock
);
4356 return STRIPE_SECTORS
;
4359 static int retry_aligned_read(raid5_conf_t
*conf
, struct bio
*raid_bio
)
4361 /* We may not be able to submit a whole bio at once as there
4362 * may not be enough stripe_heads available.
4363 * We cannot pre-allocate enough stripe_heads as we may need
4364 * more than exist in the cache (if we allow ever large chunks).
4365 * So we do one stripe head at a time and record in
4366 * ->bi_hw_segments how many have been done.
4368 * We *know* that this entire raid_bio is in one chunk, so
4369 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector.
4371 struct stripe_head
*sh
;
4373 sector_t sector
, logical_sector
, last_sector
;
4378 logical_sector
= raid_bio
->bi_sector
& ~((sector_t
)STRIPE_SECTORS
-1);
4379 sector
= raid5_compute_sector(conf
, logical_sector
,
4381 last_sector
= raid_bio
->bi_sector
+ (raid_bio
->bi_size
>>9);
4383 for (; logical_sector
< last_sector
;
4384 logical_sector
+= STRIPE_SECTORS
,
4385 sector
+= STRIPE_SECTORS
,
4388 if (scnt
< raid5_bi_hw_segments(raid_bio
))
4389 /* already done this stripe */
4392 sh
= get_active_stripe(conf
, sector
, 0, 1, 0);
4395 /* failed to get a stripe - must wait */
4396 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4397 conf
->retry_read_aligned
= raid_bio
;
4401 set_bit(R5_ReadError
, &sh
->dev
[dd_idx
].flags
);
4402 if (!add_stripe_bio(sh
, raid_bio
, dd_idx
, 0)) {
4404 raid5_set_bi_hw_segments(raid_bio
, scnt
);
4405 conf
->retry_read_aligned
= raid_bio
;
4413 spin_lock_irq(&conf
->device_lock
);
4414 remaining
= raid5_dec_bi_phys_segments(raid_bio
);
4415 spin_unlock_irq(&conf
->device_lock
);
4417 bio_endio(raid_bio
, 0);
4418 if (atomic_dec_and_test(&conf
->active_aligned_reads
))
4419 wake_up(&conf
->wait_for_stripe
);
4425 * This is our raid5 kernel thread.
4427 * We scan the hash table for stripes which can be handled now.
4428 * During the scan, completed stripes are saved for us by the interrupt
4429 * handler, so that they will not have to wait for our next wakeup.
4431 static void raid5d(mddev_t
*mddev
)
4433 struct stripe_head
*sh
;
4434 raid5_conf_t
*conf
= mddev
->private;
4437 pr_debug("+++ raid5d active\n");
4439 md_check_recovery(mddev
);
4442 spin_lock_irq(&conf
->device_lock
);
4446 if (conf
->seq_flush
!= conf
->seq_write
) {
4447 int seq
= conf
->seq_flush
;
4448 spin_unlock_irq(&conf
->device_lock
);
4449 bitmap_unplug(mddev
->bitmap
);
4450 spin_lock_irq(&conf
->device_lock
);
4451 conf
->seq_write
= seq
;
4452 activate_bit_delay(conf
);
4455 while ((bio
= remove_bio_from_retry(conf
))) {
4457 spin_unlock_irq(&conf
->device_lock
);
4458 ok
= retry_aligned_read(conf
, bio
);
4459 spin_lock_irq(&conf
->device_lock
);
4465 sh
= __get_priority_stripe(conf
);
4469 spin_unlock_irq(&conf
->device_lock
);
4476 spin_lock_irq(&conf
->device_lock
);
4478 pr_debug("%d stripes handled\n", handled
);
4480 spin_unlock_irq(&conf
->device_lock
);
4482 async_tx_issue_pending_all();
4483 unplug_slaves(mddev
);
4485 pr_debug("--- raid5d inactive\n");
4489 raid5_show_stripe_cache_size(mddev_t
*mddev
, char *page
)
4491 raid5_conf_t
*conf
= mddev
->private;
4493 return sprintf(page
, "%d\n", conf
->max_nr_stripes
);
4499 raid5_store_stripe_cache_size(mddev_t
*mddev
, const char *page
, size_t len
)
4501 raid5_conf_t
*conf
= mddev
->private;
4505 if (len
>= PAGE_SIZE
)
4510 if (strict_strtoul(page
, 10, &new))
4512 if (new <= 16 || new > 32768)
4514 while (new < conf
->max_nr_stripes
) {
4515 if (drop_one_stripe(conf
))
4516 conf
->max_nr_stripes
--;
4520 err
= md_allow_write(mddev
);
4523 while (new > conf
->max_nr_stripes
) {
4524 if (grow_one_stripe(conf
))
4525 conf
->max_nr_stripes
++;
4531 static struct md_sysfs_entry
4532 raid5_stripecache_size
= __ATTR(stripe_cache_size
, S_IRUGO
| S_IWUSR
,
4533 raid5_show_stripe_cache_size
,
4534 raid5_store_stripe_cache_size
);
4537 raid5_show_preread_threshold(mddev_t
*mddev
, char *page
)
4539 raid5_conf_t
*conf
= mddev
->private;
4541 return sprintf(page
, "%d\n", conf
->bypass_threshold
);
4547 raid5_store_preread_threshold(mddev_t
*mddev
, const char *page
, size_t len
)
4549 raid5_conf_t
*conf
= mddev
->private;
4551 if (len
>= PAGE_SIZE
)
4556 if (strict_strtoul(page
, 10, &new))
4558 if (new > conf
->max_nr_stripes
)
4560 conf
->bypass_threshold
= new;
4564 static struct md_sysfs_entry
4565 raid5_preread_bypass_threshold
= __ATTR(preread_bypass_threshold
,
4567 raid5_show_preread_threshold
,
4568 raid5_store_preread_threshold
);
4571 stripe_cache_active_show(mddev_t
*mddev
, char *page
)
4573 raid5_conf_t
*conf
= mddev
->private;
4575 return sprintf(page
, "%d\n", atomic_read(&conf
->active_stripes
));
4580 static struct md_sysfs_entry
4581 raid5_stripecache_active
= __ATTR_RO(stripe_cache_active
);
4583 static struct attribute
*raid5_attrs
[] = {
4584 &raid5_stripecache_size
.attr
,
4585 &raid5_stripecache_active
.attr
,
4586 &raid5_preread_bypass_threshold
.attr
,
4589 static struct attribute_group raid5_attrs_group
= {
4591 .attrs
= raid5_attrs
,
4595 raid5_size(mddev_t
*mddev
, sector_t sectors
, int raid_disks
)
4597 raid5_conf_t
*conf
= mddev
->private;
4600 sectors
= mddev
->dev_sectors
;
4602 /* size is defined by the smallest of previous and new size */
4603 raid_disks
= min(conf
->raid_disks
, conf
->previous_raid_disks
);
4605 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
4606 sectors
&= ~((sector_t
)mddev
->new_chunk_sectors
- 1);
4607 return sectors
* (raid_disks
- conf
->max_degraded
);
4610 static void raid5_free_percpu(raid5_conf_t
*conf
)
4612 struct raid5_percpu
*percpu
;
4619 for_each_possible_cpu(cpu
) {
4620 percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4621 safe_put_page(percpu
->spare_page
);
4622 kfree(percpu
->scribble
);
4624 #ifdef CONFIG_HOTPLUG_CPU
4625 unregister_cpu_notifier(&conf
->cpu_notify
);
4629 free_percpu(conf
->percpu
);
4632 static void free_conf(raid5_conf_t
*conf
)
4634 shrink_stripes(conf
);
4635 raid5_free_percpu(conf
);
4637 kfree(conf
->stripe_hashtbl
);
4641 #ifdef CONFIG_HOTPLUG_CPU
4642 static int raid456_cpu_notify(struct notifier_block
*nfb
, unsigned long action
,
4645 raid5_conf_t
*conf
= container_of(nfb
, raid5_conf_t
, cpu_notify
);
4646 long cpu
= (long)hcpu
;
4647 struct raid5_percpu
*percpu
= per_cpu_ptr(conf
->percpu
, cpu
);
4650 case CPU_UP_PREPARE
:
4651 case CPU_UP_PREPARE_FROZEN
:
4652 if (conf
->level
== 6 && !percpu
->spare_page
)
4653 percpu
->spare_page
= alloc_page(GFP_KERNEL
);
4654 if (!percpu
->scribble
)
4655 percpu
->scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4657 if (!percpu
->scribble
||
4658 (conf
->level
== 6 && !percpu
->spare_page
)) {
4659 safe_put_page(percpu
->spare_page
);
4660 kfree(percpu
->scribble
);
4661 pr_err("%s: failed memory allocation for cpu%ld\n",
4667 case CPU_DEAD_FROZEN
:
4668 safe_put_page(percpu
->spare_page
);
4669 kfree(percpu
->scribble
);
4670 percpu
->spare_page
= NULL
;
4671 percpu
->scribble
= NULL
;
4680 static int raid5_alloc_percpu(raid5_conf_t
*conf
)
4683 struct page
*spare_page
;
4684 struct raid5_percpu __percpu
*allcpus
;
4688 allcpus
= alloc_percpu(struct raid5_percpu
);
4691 conf
->percpu
= allcpus
;
4695 for_each_present_cpu(cpu
) {
4696 if (conf
->level
== 6) {
4697 spare_page
= alloc_page(GFP_KERNEL
);
4702 per_cpu_ptr(conf
->percpu
, cpu
)->spare_page
= spare_page
;
4704 scribble
= kmalloc(conf
->scribble_len
, GFP_KERNEL
);
4709 per_cpu_ptr(conf
->percpu
, cpu
)->scribble
= scribble
;
4711 #ifdef CONFIG_HOTPLUG_CPU
4712 conf
->cpu_notify
.notifier_call
= raid456_cpu_notify
;
4713 conf
->cpu_notify
.priority
= 0;
4715 err
= register_cpu_notifier(&conf
->cpu_notify
);
4722 static raid5_conf_t
*setup_conf(mddev_t
*mddev
)
4725 int raid_disk
, memory
, max_disks
;
4727 struct disk_info
*disk
;
4729 if (mddev
->new_level
!= 5
4730 && mddev
->new_level
!= 4
4731 && mddev
->new_level
!= 6) {
4732 printk(KERN_ERR
"raid5: %s: raid level not set to 4/5/6 (%d)\n",
4733 mdname(mddev
), mddev
->new_level
);
4734 return ERR_PTR(-EIO
);
4736 if ((mddev
->new_level
== 5
4737 && !algorithm_valid_raid5(mddev
->new_layout
)) ||
4738 (mddev
->new_level
== 6
4739 && !algorithm_valid_raid6(mddev
->new_layout
))) {
4740 printk(KERN_ERR
"raid5: %s: layout %d not supported\n",
4741 mdname(mddev
), mddev
->new_layout
);
4742 return ERR_PTR(-EIO
);
4744 if (mddev
->new_level
== 6 && mddev
->raid_disks
< 4) {
4745 printk(KERN_ERR
"raid6: not enough configured devices for %s (%d, minimum 4)\n",
4746 mdname(mddev
), mddev
->raid_disks
);
4747 return ERR_PTR(-EINVAL
);
4750 if (!mddev
->new_chunk_sectors
||
4751 (mddev
->new_chunk_sectors
<< 9) % PAGE_SIZE
||
4752 !is_power_of_2(mddev
->new_chunk_sectors
)) {
4753 printk(KERN_ERR
"raid5: invalid chunk size %d for %s\n",
4754 mddev
->new_chunk_sectors
<< 9, mdname(mddev
));
4755 return ERR_PTR(-EINVAL
);
4758 conf
= kzalloc(sizeof(raid5_conf_t
), GFP_KERNEL
);
4761 spin_lock_init(&conf
->device_lock
);
4762 init_waitqueue_head(&conf
->wait_for_stripe
);
4763 init_waitqueue_head(&conf
->wait_for_overlap
);
4764 INIT_LIST_HEAD(&conf
->handle_list
);
4765 INIT_LIST_HEAD(&conf
->hold_list
);
4766 INIT_LIST_HEAD(&conf
->delayed_list
);
4767 INIT_LIST_HEAD(&conf
->bitmap_list
);
4768 INIT_LIST_HEAD(&conf
->inactive_list
);
4769 atomic_set(&conf
->active_stripes
, 0);
4770 atomic_set(&conf
->preread_active_stripes
, 0);
4771 atomic_set(&conf
->active_aligned_reads
, 0);
4772 conf
->bypass_threshold
= BYPASS_THRESHOLD
;
4774 conf
->raid_disks
= mddev
->raid_disks
;
4775 if (mddev
->reshape_position
== MaxSector
)
4776 conf
->previous_raid_disks
= mddev
->raid_disks
;
4778 conf
->previous_raid_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4779 max_disks
= max(conf
->raid_disks
, conf
->previous_raid_disks
);
4780 conf
->scribble_len
= scribble_len(max_disks
);
4782 conf
->disks
= kzalloc(max_disks
* sizeof(struct disk_info
),
4787 conf
->mddev
= mddev
;
4789 if ((conf
->stripe_hashtbl
= kzalloc(PAGE_SIZE
, GFP_KERNEL
)) == NULL
)
4792 conf
->level
= mddev
->new_level
;
4793 if (raid5_alloc_percpu(conf
) != 0)
4796 pr_debug("raid5: run(%s) called.\n", mdname(mddev
));
4798 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4799 raid_disk
= rdev
->raid_disk
;
4800 if (raid_disk
>= max_disks
4803 disk
= conf
->disks
+ raid_disk
;
4807 if (test_bit(In_sync
, &rdev
->flags
)) {
4808 char b
[BDEVNAME_SIZE
];
4809 printk(KERN_INFO
"raid5: device %s operational as raid"
4810 " disk %d\n", bdevname(rdev
->bdev
,b
),
4813 /* Cannot rely on bitmap to complete recovery */
4817 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
4818 conf
->level
= mddev
->new_level
;
4819 if (conf
->level
== 6)
4820 conf
->max_degraded
= 2;
4822 conf
->max_degraded
= 1;
4823 conf
->algorithm
= mddev
->new_layout
;
4824 conf
->max_nr_stripes
= NR_STRIPES
;
4825 conf
->reshape_progress
= mddev
->reshape_position
;
4826 if (conf
->reshape_progress
!= MaxSector
) {
4827 conf
->prev_chunk_sectors
= mddev
->chunk_sectors
;
4828 conf
->prev_algo
= mddev
->layout
;
4831 memory
= conf
->max_nr_stripes
* (sizeof(struct stripe_head
) +
4832 max_disks
* ((sizeof(struct bio
) + PAGE_SIZE
))) / 1024;
4833 if (grow_stripes(conf
, conf
->max_nr_stripes
)) {
4835 "raid5: couldn't allocate %dkB for buffers\n", memory
);
4838 printk(KERN_INFO
"raid5: allocated %dkB for %s\n",
4839 memory
, mdname(mddev
));
4841 conf
->thread
= md_register_thread(raid5d
, mddev
, NULL
);
4842 if (!conf
->thread
) {
4844 "raid5: couldn't allocate thread for %s\n",
4854 return ERR_PTR(-EIO
);
4856 return ERR_PTR(-ENOMEM
);
4860 static int only_parity(int raid_disk
, int algo
, int raid_disks
, int max_degraded
)
4863 case ALGORITHM_PARITY_0
:
4864 if (raid_disk
< max_degraded
)
4867 case ALGORITHM_PARITY_N
:
4868 if (raid_disk
>= raid_disks
- max_degraded
)
4871 case ALGORITHM_PARITY_0_6
:
4872 if (raid_disk
== 0 ||
4873 raid_disk
== raid_disks
- 1)
4876 case ALGORITHM_LEFT_ASYMMETRIC_6
:
4877 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
4878 case ALGORITHM_LEFT_SYMMETRIC_6
:
4879 case ALGORITHM_RIGHT_SYMMETRIC_6
:
4880 if (raid_disk
== raid_disks
- 1)
4886 static int run(mddev_t
*mddev
)
4889 int working_disks
= 0, chunk_size
;
4890 int dirty_parity_disks
= 0;
4892 sector_t reshape_offset
= 0;
4894 if (mddev
->recovery_cp
!= MaxSector
)
4895 printk(KERN_NOTICE
"raid5: %s is not clean"
4896 " -- starting background reconstruction\n",
4898 if (mddev
->reshape_position
!= MaxSector
) {
4899 /* Check that we can continue the reshape.
4900 * Currently only disks can change, it must
4901 * increase, and we must be past the point where
4902 * a stripe over-writes itself
4904 sector_t here_new
, here_old
;
4906 int max_degraded
= (mddev
->level
== 6 ? 2 : 1);
4908 if (mddev
->new_level
!= mddev
->level
) {
4909 printk(KERN_ERR
"raid5: %s: unsupported reshape "
4910 "required - aborting.\n",
4914 old_disks
= mddev
->raid_disks
- mddev
->delta_disks
;
4915 /* reshape_position must be on a new-stripe boundary, and one
4916 * further up in new geometry must map after here in old
4919 here_new
= mddev
->reshape_position
;
4920 if (sector_div(here_new
, mddev
->new_chunk_sectors
*
4921 (mddev
->raid_disks
- max_degraded
))) {
4922 printk(KERN_ERR
"raid5: reshape_position not "
4923 "on a stripe boundary\n");
4926 reshape_offset
= here_new
* mddev
->new_chunk_sectors
;
4927 /* here_new is the stripe we will write to */
4928 here_old
= mddev
->reshape_position
;
4929 sector_div(here_old
, mddev
->chunk_sectors
*
4930 (old_disks
-max_degraded
));
4931 /* here_old is the first stripe that we might need to read
4933 if (mddev
->delta_disks
== 0) {
4934 /* We cannot be sure it is safe to start an in-place
4935 * reshape. It is only safe if user-space if monitoring
4936 * and taking constant backups.
4937 * mdadm always starts a situation like this in
4938 * readonly mode so it can take control before
4939 * allowing any writes. So just check for that.
4941 if ((here_new
* mddev
->new_chunk_sectors
!=
4942 here_old
* mddev
->chunk_sectors
) ||
4944 printk(KERN_ERR
"raid5: in-place reshape must be started"
4945 " in read-only mode - aborting\n");
4948 } else if (mddev
->delta_disks
< 0
4949 ? (here_new
* mddev
->new_chunk_sectors
<=
4950 here_old
* mddev
->chunk_sectors
)
4951 : (here_new
* mddev
->new_chunk_sectors
>=
4952 here_old
* mddev
->chunk_sectors
)) {
4953 /* Reading from the same stripe as writing to - bad */
4954 printk(KERN_ERR
"raid5: reshape_position too early for "
4955 "auto-recovery - aborting.\n");
4958 printk(KERN_INFO
"raid5: reshape will continue\n");
4959 /* OK, we should be able to continue; */
4961 BUG_ON(mddev
->level
!= mddev
->new_level
);
4962 BUG_ON(mddev
->layout
!= mddev
->new_layout
);
4963 BUG_ON(mddev
->chunk_sectors
!= mddev
->new_chunk_sectors
);
4964 BUG_ON(mddev
->delta_disks
!= 0);
4967 if (mddev
->private == NULL
)
4968 conf
= setup_conf(mddev
);
4970 conf
= mddev
->private;
4973 return PTR_ERR(conf
);
4975 mddev
->thread
= conf
->thread
;
4976 conf
->thread
= NULL
;
4977 mddev
->private = conf
;
4980 * 0 for a fully functional array, 1 or 2 for a degraded array.
4982 list_for_each_entry(rdev
, &mddev
->disks
, same_set
) {
4983 if (rdev
->raid_disk
< 0)
4985 if (test_bit(In_sync
, &rdev
->flags
))
4987 /* This disc is not fully in-sync. However if it
4988 * just stored parity (beyond the recovery_offset),
4989 * when we don't need to be concerned about the
4990 * array being dirty.
4991 * When reshape goes 'backwards', we never have
4992 * partially completed devices, so we only need
4993 * to worry about reshape going forwards.
4995 /* Hack because v0.91 doesn't store recovery_offset properly. */
4996 if (mddev
->major_version
== 0 &&
4997 mddev
->minor_version
> 90)
4998 rdev
->recovery_offset
= reshape_offset
;
5000 printk("%d: w=%d pa=%d pr=%d m=%d a=%d r=%d op1=%d op2=%d\n",
5001 rdev
->raid_disk
, working_disks
, conf
->prev_algo
,
5002 conf
->previous_raid_disks
, conf
->max_degraded
,
5003 conf
->algorithm
, conf
->raid_disks
,
5004 only_parity(rdev
->raid_disk
,
5006 conf
->previous_raid_disks
,
5007 conf
->max_degraded
),
5008 only_parity(rdev
->raid_disk
,
5011 conf
->max_degraded
));
5012 if (rdev
->recovery_offset
< reshape_offset
) {
5013 /* We need to check old and new layout */
5014 if (!only_parity(rdev
->raid_disk
,
5017 conf
->max_degraded
))
5020 if (!only_parity(rdev
->raid_disk
,
5022 conf
->previous_raid_disks
,
5023 conf
->max_degraded
))
5025 dirty_parity_disks
++;
5028 mddev
->degraded
= (max(conf
->raid_disks
, conf
->previous_raid_disks
)
5031 if (mddev
->degraded
> conf
->max_degraded
) {
5032 printk(KERN_ERR
"raid5: not enough operational devices for %s"
5033 " (%d/%d failed)\n",
5034 mdname(mddev
), mddev
->degraded
, conf
->raid_disks
);
5038 /* device size must be a multiple of chunk size */
5039 mddev
->dev_sectors
&= ~(mddev
->chunk_sectors
- 1);
5040 mddev
->resync_max_sectors
= mddev
->dev_sectors
;
5042 if (mddev
->degraded
> dirty_parity_disks
&&
5043 mddev
->recovery_cp
!= MaxSector
) {
5044 if (mddev
->ok_start_degraded
)
5046 "raid5: starting dirty degraded array: %s"
5047 "- data corruption possible.\n",
5051 "raid5: cannot start dirty degraded array for %s\n",
5057 if (mddev
->degraded
== 0)
5058 printk("raid5: raid level %d set %s active with %d out of %d"
5059 " devices, algorithm %d\n", conf
->level
, mdname(mddev
),
5060 mddev
->raid_disks
-mddev
->degraded
, mddev
->raid_disks
,
5063 printk(KERN_ALERT
"raid5: raid level %d set %s active with %d"
5064 " out of %d devices, algorithm %d\n", conf
->level
,
5065 mdname(mddev
), mddev
->raid_disks
- mddev
->degraded
,
5066 mddev
->raid_disks
, mddev
->new_layout
);
5068 print_raid5_conf(conf
);
5070 if (conf
->reshape_progress
!= MaxSector
) {
5071 printk("...ok start reshape thread\n");
5072 conf
->reshape_safe
= conf
->reshape_progress
;
5073 atomic_set(&conf
->reshape_stripes
, 0);
5074 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5075 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5076 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5077 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5078 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5082 /* read-ahead size must cover two whole stripes, which is
5083 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5086 int data_disks
= conf
->previous_raid_disks
- conf
->max_degraded
;
5087 int stripe
= data_disks
*
5088 ((mddev
->chunk_sectors
<< 9) / PAGE_SIZE
);
5089 if (mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5090 mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5093 /* Ok, everything is just fine now */
5094 if (mddev
->to_remove
== &raid5_attrs_group
)
5095 mddev
->to_remove
= NULL
;
5096 else if (sysfs_create_group(&mddev
->kobj
, &raid5_attrs_group
))
5098 "raid5: failed to create sysfs attributes for %s\n",
5101 mddev
->queue
->queue_lock
= &conf
->device_lock
;
5103 mddev
->queue
->unplug_fn
= raid5_unplug_device
;
5104 mddev
->queue
->backing_dev_info
.congested_data
= mddev
;
5105 mddev
->queue
->backing_dev_info
.congested_fn
= raid5_congested
;
5107 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5109 blk_queue_merge_bvec(mddev
->queue
, raid5_mergeable_bvec
);
5110 chunk_size
= mddev
->chunk_sectors
<< 9;
5111 blk_queue_io_min(mddev
->queue
, chunk_size
);
5112 blk_queue_io_opt(mddev
->queue
, chunk_size
*
5113 (conf
->raid_disks
- conf
->max_degraded
));
5115 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5116 disk_stack_limits(mddev
->gendisk
, rdev
->bdev
,
5117 rdev
->data_offset
<< 9);
5121 md_unregister_thread(mddev
->thread
);
5122 mddev
->thread
= NULL
;
5124 print_raid5_conf(conf
);
5127 mddev
->private = NULL
;
5128 printk(KERN_ALERT
"raid5: failed to run raid set %s\n", mdname(mddev
));
5134 static int stop(mddev_t
*mddev
)
5136 raid5_conf_t
*conf
= mddev
->private;
5138 md_unregister_thread(mddev
->thread
);
5139 mddev
->thread
= NULL
;
5140 mddev
->queue
->backing_dev_info
.congested_fn
= NULL
;
5141 blk_sync_queue(mddev
->queue
); /* the unplug fn references 'conf'*/
5143 mddev
->private = NULL
;
5144 mddev
->to_remove
= &raid5_attrs_group
;
5149 static void print_sh(struct seq_file
*seq
, struct stripe_head
*sh
)
5153 seq_printf(seq
, "sh %llu, pd_idx %d, state %ld.\n",
5154 (unsigned long long)sh
->sector
, sh
->pd_idx
, sh
->state
);
5155 seq_printf(seq
, "sh %llu, count %d.\n",
5156 (unsigned long long)sh
->sector
, atomic_read(&sh
->count
));
5157 seq_printf(seq
, "sh %llu, ", (unsigned long long)sh
->sector
);
5158 for (i
= 0; i
< sh
->disks
; i
++) {
5159 seq_printf(seq
, "(cache%d: %p %ld) ",
5160 i
, sh
->dev
[i
].page
, sh
->dev
[i
].flags
);
5162 seq_printf(seq
, "\n");
5165 static void printall(struct seq_file
*seq
, raid5_conf_t
*conf
)
5167 struct stripe_head
*sh
;
5168 struct hlist_node
*hn
;
5171 spin_lock_irq(&conf
->device_lock
);
5172 for (i
= 0; i
< NR_HASH
; i
++) {
5173 hlist_for_each_entry(sh
, hn
, &conf
->stripe_hashtbl
[i
], hash
) {
5174 if (sh
->raid_conf
!= conf
)
5179 spin_unlock_irq(&conf
->device_lock
);
5183 static void status(struct seq_file
*seq
, mddev_t
*mddev
)
5185 raid5_conf_t
*conf
= mddev
->private;
5188 seq_printf(seq
, " level %d, %dk chunk, algorithm %d", mddev
->level
,
5189 mddev
->chunk_sectors
/ 2, mddev
->layout
);
5190 seq_printf (seq
, " [%d/%d] [", conf
->raid_disks
, conf
->raid_disks
- mddev
->degraded
);
5191 for (i
= 0; i
< conf
->raid_disks
; i
++)
5192 seq_printf (seq
, "%s",
5193 conf
->disks
[i
].rdev
&&
5194 test_bit(In_sync
, &conf
->disks
[i
].rdev
->flags
) ? "U" : "_");
5195 seq_printf (seq
, "]");
5197 seq_printf (seq
, "\n");
5198 printall(seq
, conf
);
5202 static void print_raid5_conf (raid5_conf_t
*conf
)
5205 struct disk_info
*tmp
;
5207 printk("RAID5 conf printout:\n");
5209 printk("(conf==NULL)\n");
5212 printk(" --- rd:%d wd:%d\n", conf
->raid_disks
,
5213 conf
->raid_disks
- conf
->mddev
->degraded
);
5215 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5216 char b
[BDEVNAME_SIZE
];
5217 tmp
= conf
->disks
+ i
;
5219 printk(" disk %d, o:%d, dev:%s\n",
5220 i
, !test_bit(Faulty
, &tmp
->rdev
->flags
),
5221 bdevname(tmp
->rdev
->bdev
,b
));
5225 static int raid5_spare_active(mddev_t
*mddev
)
5228 raid5_conf_t
*conf
= mddev
->private;
5229 struct disk_info
*tmp
;
5231 for (i
= 0; i
< conf
->raid_disks
; i
++) {
5232 tmp
= conf
->disks
+ i
;
5234 && !test_bit(Faulty
, &tmp
->rdev
->flags
)
5235 && !test_and_set_bit(In_sync
, &tmp
->rdev
->flags
)) {
5236 unsigned long flags
;
5237 spin_lock_irqsave(&conf
->device_lock
, flags
);
5239 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5242 print_raid5_conf(conf
);
5246 static int raid5_remove_disk(mddev_t
*mddev
, int number
)
5248 raid5_conf_t
*conf
= mddev
->private;
5251 struct disk_info
*p
= conf
->disks
+ number
;
5253 print_raid5_conf(conf
);
5256 if (number
>= conf
->raid_disks
&&
5257 conf
->reshape_progress
== MaxSector
)
5258 clear_bit(In_sync
, &rdev
->flags
);
5260 if (test_bit(In_sync
, &rdev
->flags
) ||
5261 atomic_read(&rdev
->nr_pending
)) {
5265 /* Only remove non-faulty devices if recovery
5268 if (!test_bit(Faulty
, &rdev
->flags
) &&
5269 mddev
->degraded
<= conf
->max_degraded
&&
5270 number
< conf
->raid_disks
) {
5276 if (atomic_read(&rdev
->nr_pending
)) {
5277 /* lost the race, try later */
5284 print_raid5_conf(conf
);
5288 static int raid5_add_disk(mddev_t
*mddev
, mdk_rdev_t
*rdev
)
5290 raid5_conf_t
*conf
= mddev
->private;
5293 struct disk_info
*p
;
5295 int last
= conf
->raid_disks
- 1;
5297 if (mddev
->degraded
> conf
->max_degraded
)
5298 /* no point adding a device */
5301 if (rdev
->raid_disk
>= 0)
5302 first
= last
= rdev
->raid_disk
;
5305 * find the disk ... but prefer rdev->saved_raid_disk
5308 if (rdev
->saved_raid_disk
>= 0 &&
5309 rdev
->saved_raid_disk
>= first
&&
5310 conf
->disks
[rdev
->saved_raid_disk
].rdev
== NULL
)
5311 disk
= rdev
->saved_raid_disk
;
5314 for ( ; disk
<= last
; disk
++)
5315 if ((p
=conf
->disks
+ disk
)->rdev
== NULL
) {
5316 clear_bit(In_sync
, &rdev
->flags
);
5317 rdev
->raid_disk
= disk
;
5319 if (rdev
->saved_raid_disk
!= disk
)
5321 rcu_assign_pointer(p
->rdev
, rdev
);
5324 print_raid5_conf(conf
);
5328 static int raid5_resize(mddev_t
*mddev
, sector_t sectors
)
5330 /* no resync is happening, and there is enough space
5331 * on all devices, so we can resize.
5332 * We need to make sure resync covers any new space.
5333 * If the array is shrinking we should possibly wait until
5334 * any io in the removed space completes, but it hardly seems
5337 sectors
&= ~((sector_t
)mddev
->chunk_sectors
- 1);
5338 md_set_array_sectors(mddev
, raid5_size(mddev
, sectors
,
5339 mddev
->raid_disks
));
5340 if (mddev
->array_sectors
>
5341 raid5_size(mddev
, sectors
, mddev
->raid_disks
))
5343 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5345 revalidate_disk(mddev
->gendisk
);
5346 if (sectors
> mddev
->dev_sectors
&& mddev
->recovery_cp
== MaxSector
) {
5347 mddev
->recovery_cp
= mddev
->dev_sectors
;
5348 set_bit(MD_RECOVERY_NEEDED
, &mddev
->recovery
);
5350 mddev
->dev_sectors
= sectors
;
5351 mddev
->resync_max_sectors
= sectors
;
5355 static int check_stripe_cache(mddev_t
*mddev
)
5357 /* Can only proceed if there are plenty of stripe_heads.
5358 * We need a minimum of one full stripe,, and for sensible progress
5359 * it is best to have about 4 times that.
5360 * If we require 4 times, then the default 256 4K stripe_heads will
5361 * allow for chunk sizes up to 256K, which is probably OK.
5362 * If the chunk size is greater, user-space should request more
5363 * stripe_heads first.
5365 raid5_conf_t
*conf
= mddev
->private;
5366 if (((mddev
->chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5367 > conf
->max_nr_stripes
||
5368 ((mddev
->new_chunk_sectors
<< 9) / STRIPE_SIZE
) * 4
5369 > conf
->max_nr_stripes
) {
5370 printk(KERN_WARNING
"raid5: reshape: not enough stripes. Needed %lu\n",
5371 ((max(mddev
->chunk_sectors
, mddev
->new_chunk_sectors
) << 9)
5378 static int check_reshape(mddev_t
*mddev
)
5380 raid5_conf_t
*conf
= mddev
->private;
5382 if (mddev
->delta_disks
== 0 &&
5383 mddev
->new_layout
== mddev
->layout
&&
5384 mddev
->new_chunk_sectors
== mddev
->chunk_sectors
)
5385 return 0; /* nothing to do */
5387 /* Cannot grow a bitmap yet */
5389 if (mddev
->degraded
> conf
->max_degraded
)
5391 if (mddev
->delta_disks
< 0) {
5392 /* We might be able to shrink, but the devices must
5393 * be made bigger first.
5394 * For raid6, 4 is the minimum size.
5395 * Otherwise 2 is the minimum
5398 if (mddev
->level
== 6)
5400 if (mddev
->raid_disks
+ mddev
->delta_disks
< min
)
5404 if (!check_stripe_cache(mddev
))
5407 return resize_stripes(conf
, conf
->raid_disks
+ mddev
->delta_disks
);
5410 static int raid5_start_reshape(mddev_t
*mddev
)
5412 raid5_conf_t
*conf
= mddev
->private;
5415 int added_devices
= 0;
5416 unsigned long flags
;
5418 if (test_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
))
5421 if (!check_stripe_cache(mddev
))
5424 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5425 if (rdev
->raid_disk
< 0 &&
5426 !test_bit(Faulty
, &rdev
->flags
))
5429 if (spares
- mddev
->degraded
< mddev
->delta_disks
- conf
->max_degraded
)
5430 /* Not enough devices even to make a degraded array
5435 /* Refuse to reduce size of the array. Any reductions in
5436 * array size must be through explicit setting of array_size
5439 if (raid5_size(mddev
, 0, conf
->raid_disks
+ mddev
->delta_disks
)
5440 < mddev
->array_sectors
) {
5441 printk(KERN_ERR
"md: %s: array size must be reduced "
5442 "before number of disks\n", mdname(mddev
));
5446 atomic_set(&conf
->reshape_stripes
, 0);
5447 spin_lock_irq(&conf
->device_lock
);
5448 conf
->previous_raid_disks
= conf
->raid_disks
;
5449 conf
->raid_disks
+= mddev
->delta_disks
;
5450 conf
->prev_chunk_sectors
= conf
->chunk_sectors
;
5451 conf
->chunk_sectors
= mddev
->new_chunk_sectors
;
5452 conf
->prev_algo
= conf
->algorithm
;
5453 conf
->algorithm
= mddev
->new_layout
;
5454 if (mddev
->delta_disks
< 0)
5455 conf
->reshape_progress
= raid5_size(mddev
, 0, 0);
5457 conf
->reshape_progress
= 0;
5458 conf
->reshape_safe
= conf
->reshape_progress
;
5460 spin_unlock_irq(&conf
->device_lock
);
5462 /* Add some new drives, as many as will fit.
5463 * We know there are enough to make the newly sized array work.
5465 list_for_each_entry(rdev
, &mddev
->disks
, same_set
)
5466 if (rdev
->raid_disk
< 0 &&
5467 !test_bit(Faulty
, &rdev
->flags
)) {
5468 if (raid5_add_disk(mddev
, rdev
) == 0) {
5470 if (rdev
->raid_disk
>= conf
->previous_raid_disks
) {
5471 set_bit(In_sync
, &rdev
->flags
);
5474 rdev
->recovery_offset
= 0;
5475 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5476 if (sysfs_create_link(&mddev
->kobj
,
5479 "raid5: failed to create "
5480 " link %s for %s\n",
5486 /* When a reshape changes the number of devices, ->degraded
5487 * is measured against the large of the pre and post number of
5489 if (mddev
->delta_disks
> 0) {
5490 spin_lock_irqsave(&conf
->device_lock
, flags
);
5491 mddev
->degraded
+= (conf
->raid_disks
- conf
->previous_raid_disks
)
5493 spin_unlock_irqrestore(&conf
->device_lock
, flags
);
5495 mddev
->raid_disks
= conf
->raid_disks
;
5496 mddev
->reshape_position
= conf
->reshape_progress
;
5497 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5499 clear_bit(MD_RECOVERY_SYNC
, &mddev
->recovery
);
5500 clear_bit(MD_RECOVERY_CHECK
, &mddev
->recovery
);
5501 set_bit(MD_RECOVERY_RESHAPE
, &mddev
->recovery
);
5502 set_bit(MD_RECOVERY_RUNNING
, &mddev
->recovery
);
5503 mddev
->sync_thread
= md_register_thread(md_do_sync
, mddev
,
5505 if (!mddev
->sync_thread
) {
5506 mddev
->recovery
= 0;
5507 spin_lock_irq(&conf
->device_lock
);
5508 mddev
->raid_disks
= conf
->raid_disks
= conf
->previous_raid_disks
;
5509 conf
->reshape_progress
= MaxSector
;
5510 spin_unlock_irq(&conf
->device_lock
);
5513 conf
->reshape_checkpoint
= jiffies
;
5514 md_wakeup_thread(mddev
->sync_thread
);
5515 md_new_event(mddev
);
5519 /* This is called from the reshape thread and should make any
5520 * changes needed in 'conf'
5522 static void end_reshape(raid5_conf_t
*conf
)
5525 if (!test_bit(MD_RECOVERY_INTR
, &conf
->mddev
->recovery
)) {
5527 spin_lock_irq(&conf
->device_lock
);
5528 conf
->previous_raid_disks
= conf
->raid_disks
;
5529 conf
->reshape_progress
= MaxSector
;
5530 spin_unlock_irq(&conf
->device_lock
);
5531 wake_up(&conf
->wait_for_overlap
);
5533 /* read-ahead size must cover two whole stripes, which is
5534 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices
5537 int data_disks
= conf
->raid_disks
- conf
->max_degraded
;
5538 int stripe
= data_disks
* ((conf
->chunk_sectors
<< 9)
5540 if (conf
->mddev
->queue
->backing_dev_info
.ra_pages
< 2 * stripe
)
5541 conf
->mddev
->queue
->backing_dev_info
.ra_pages
= 2 * stripe
;
5546 /* This is called from the raid5d thread with mddev_lock held.
5547 * It makes config changes to the device.
5549 static void raid5_finish_reshape(mddev_t
*mddev
)
5551 raid5_conf_t
*conf
= mddev
->private;
5553 if (!test_bit(MD_RECOVERY_INTR
, &mddev
->recovery
)) {
5555 if (mddev
->delta_disks
> 0) {
5556 md_set_array_sectors(mddev
, raid5_size(mddev
, 0, 0));
5557 set_capacity(mddev
->gendisk
, mddev
->array_sectors
);
5559 revalidate_disk(mddev
->gendisk
);
5562 mddev
->degraded
= conf
->raid_disks
;
5563 for (d
= 0; d
< conf
->raid_disks
; d
++)
5564 if (conf
->disks
[d
].rdev
&&
5566 &conf
->disks
[d
].rdev
->flags
))
5568 for (d
= conf
->raid_disks
;
5569 d
< conf
->raid_disks
- mddev
->delta_disks
;
5571 mdk_rdev_t
*rdev
= conf
->disks
[d
].rdev
;
5572 if (rdev
&& raid5_remove_disk(mddev
, d
) == 0) {
5574 sprintf(nm
, "rd%d", rdev
->raid_disk
);
5575 sysfs_remove_link(&mddev
->kobj
, nm
);
5576 rdev
->raid_disk
= -1;
5580 mddev
->layout
= conf
->algorithm
;
5581 mddev
->chunk_sectors
= conf
->chunk_sectors
;
5582 mddev
->reshape_position
= MaxSector
;
5583 mddev
->delta_disks
= 0;
5587 static void raid5_quiesce(mddev_t
*mddev
, int state
)
5589 raid5_conf_t
*conf
= mddev
->private;
5592 case 2: /* resume for a suspend */
5593 wake_up(&conf
->wait_for_overlap
);
5596 case 1: /* stop all writes */
5597 spin_lock_irq(&conf
->device_lock
);
5598 /* '2' tells resync/reshape to pause so that all
5599 * active stripes can drain
5602 wait_event_lock_irq(conf
->wait_for_stripe
,
5603 atomic_read(&conf
->active_stripes
) == 0 &&
5604 atomic_read(&conf
->active_aligned_reads
) == 0,
5605 conf
->device_lock
, /* nothing */);
5607 spin_unlock_irq(&conf
->device_lock
);
5608 /* allow reshape to continue */
5609 wake_up(&conf
->wait_for_overlap
);
5612 case 0: /* re-enable writes */
5613 spin_lock_irq(&conf
->device_lock
);
5615 wake_up(&conf
->wait_for_stripe
);
5616 wake_up(&conf
->wait_for_overlap
);
5617 spin_unlock_irq(&conf
->device_lock
);
5623 static void *raid5_takeover_raid0(mddev_t
*mddev
)
5626 mddev
->new_level
= 5;
5627 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5628 mddev
->new_chunk_sectors
= mddev
->chunk_sectors
;
5629 mddev
->raid_disks
+= 1;
5630 mddev
->delta_disks
= 1;
5631 /* make sure it will be not marked as dirty */
5632 mddev
->recovery_cp
= MaxSector
;
5634 return setup_conf(mddev
);
5638 static void *raid5_takeover_raid1(mddev_t
*mddev
)
5642 if (mddev
->raid_disks
!= 2 ||
5643 mddev
->degraded
> 1)
5644 return ERR_PTR(-EINVAL
);
5646 /* Should check if there are write-behind devices? */
5648 chunksect
= 64*2; /* 64K by default */
5650 /* The array must be an exact multiple of chunksize */
5651 while (chunksect
&& (mddev
->array_sectors
& (chunksect
-1)))
5654 if ((chunksect
<<9) < STRIPE_SIZE
)
5655 /* array size does not allow a suitable chunk size */
5656 return ERR_PTR(-EINVAL
);
5658 mddev
->new_level
= 5;
5659 mddev
->new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5660 mddev
->new_chunk_sectors
= chunksect
;
5662 return setup_conf(mddev
);
5665 static void *raid5_takeover_raid6(mddev_t
*mddev
)
5669 switch (mddev
->layout
) {
5670 case ALGORITHM_LEFT_ASYMMETRIC_6
:
5671 new_layout
= ALGORITHM_LEFT_ASYMMETRIC
;
5673 case ALGORITHM_RIGHT_ASYMMETRIC_6
:
5674 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC
;
5676 case ALGORITHM_LEFT_SYMMETRIC_6
:
5677 new_layout
= ALGORITHM_LEFT_SYMMETRIC
;
5679 case ALGORITHM_RIGHT_SYMMETRIC_6
:
5680 new_layout
= ALGORITHM_RIGHT_SYMMETRIC
;
5682 case ALGORITHM_PARITY_0_6
:
5683 new_layout
= ALGORITHM_PARITY_0
;
5685 case ALGORITHM_PARITY_N
:
5686 new_layout
= ALGORITHM_PARITY_N
;
5689 return ERR_PTR(-EINVAL
);
5691 mddev
->new_level
= 5;
5692 mddev
->new_layout
= new_layout
;
5693 mddev
->delta_disks
= -1;
5694 mddev
->raid_disks
-= 1;
5695 return setup_conf(mddev
);
5699 static int raid5_check_reshape(mddev_t
*mddev
)
5701 /* For a 2-drive array, the layout and chunk size can be changed
5702 * immediately as not restriping is needed.
5703 * For larger arrays we record the new value - after validation
5704 * to be used by a reshape pass.
5706 raid5_conf_t
*conf
= mddev
->private;
5707 int new_chunk
= mddev
->new_chunk_sectors
;
5709 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid5(mddev
->new_layout
))
5711 if (new_chunk
> 0) {
5712 if (!is_power_of_2(new_chunk
))
5714 if (new_chunk
< (PAGE_SIZE
>>9))
5716 if (mddev
->array_sectors
& (new_chunk
-1))
5717 /* not factor of array size */
5721 /* They look valid */
5723 if (mddev
->raid_disks
== 2) {
5724 /* can make the change immediately */
5725 if (mddev
->new_layout
>= 0) {
5726 conf
->algorithm
= mddev
->new_layout
;
5727 mddev
->layout
= mddev
->new_layout
;
5729 if (new_chunk
> 0) {
5730 conf
->chunk_sectors
= new_chunk
;
5731 mddev
->chunk_sectors
= new_chunk
;
5733 set_bit(MD_CHANGE_DEVS
, &mddev
->flags
);
5734 md_wakeup_thread(mddev
->thread
);
5736 return check_reshape(mddev
);
5739 static int raid6_check_reshape(mddev_t
*mddev
)
5741 int new_chunk
= mddev
->new_chunk_sectors
;
5743 if (mddev
->new_layout
>= 0 && !algorithm_valid_raid6(mddev
->new_layout
))
5745 if (new_chunk
> 0) {
5746 if (!is_power_of_2(new_chunk
))
5748 if (new_chunk
< (PAGE_SIZE
>> 9))
5750 if (mddev
->array_sectors
& (new_chunk
-1))
5751 /* not factor of array size */
5755 /* They look valid */
5756 return check_reshape(mddev
);
5759 static void *raid5_takeover(mddev_t
*mddev
)
5761 /* raid5 can take over:
5762 * raid0 - if all devices are the same - make it a raid4 layout
5763 * raid1 - if there are two drives. We need to know the chunk size
5764 * raid4 - trivial - just use a raid4 layout.
5765 * raid6 - Providing it is a *_6 layout
5767 if (mddev
->level
== 0) {
5768 /* for raid0 takeover only one zone is supported */
5769 struct raid0_private_data
*raid0_priv
5771 if (raid0_priv
->nr_strip_zones
> 1) {
5772 printk(KERN_ERR
"md: cannot takeover raid 0 with more than one zone.\n");
5773 return ERR_PTR(-EINVAL
);
5775 return raid5_takeover_raid0(mddev
);
5778 if (mddev
->level
== 1)
5779 return raid5_takeover_raid1(mddev
);
5780 if (mddev
->level
== 4) {
5781 mddev
->new_layout
= ALGORITHM_PARITY_N
;
5782 mddev
->new_level
= 5;
5783 return setup_conf(mddev
);
5785 if (mddev
->level
== 6)
5786 return raid5_takeover_raid6(mddev
);
5788 return ERR_PTR(-EINVAL
);
5792 static struct mdk_personality raid5_personality
;
5794 static void *raid6_takeover(mddev_t
*mddev
)
5796 /* Currently can only take over a raid5. We map the
5797 * personality to an equivalent raid6 personality
5798 * with the Q block at the end.
5802 if (mddev
->pers
!= &raid5_personality
)
5803 return ERR_PTR(-EINVAL
);
5804 if (mddev
->degraded
> 1)
5805 return ERR_PTR(-EINVAL
);
5806 if (mddev
->raid_disks
> 253)
5807 return ERR_PTR(-EINVAL
);
5808 if (mddev
->raid_disks
< 3)
5809 return ERR_PTR(-EINVAL
);
5811 switch (mddev
->layout
) {
5812 case ALGORITHM_LEFT_ASYMMETRIC
:
5813 new_layout
= ALGORITHM_LEFT_ASYMMETRIC_6
;
5815 case ALGORITHM_RIGHT_ASYMMETRIC
:
5816 new_layout
= ALGORITHM_RIGHT_ASYMMETRIC_6
;
5818 case ALGORITHM_LEFT_SYMMETRIC
:
5819 new_layout
= ALGORITHM_LEFT_SYMMETRIC_6
;
5821 case ALGORITHM_RIGHT_SYMMETRIC
:
5822 new_layout
= ALGORITHM_RIGHT_SYMMETRIC_6
;
5824 case ALGORITHM_PARITY_0
:
5825 new_layout
= ALGORITHM_PARITY_0_6
;
5827 case ALGORITHM_PARITY_N
:
5828 new_layout
= ALGORITHM_PARITY_N
;
5831 return ERR_PTR(-EINVAL
);
5833 mddev
->new_level
= 6;
5834 mddev
->new_layout
= new_layout
;
5835 mddev
->delta_disks
= 1;
5836 mddev
->raid_disks
+= 1;
5837 return setup_conf(mddev
);
5841 static struct mdk_personality raid6_personality
=
5845 .owner
= THIS_MODULE
,
5846 .make_request
= make_request
,
5850 .error_handler
= error
,
5851 .hot_add_disk
= raid5_add_disk
,
5852 .hot_remove_disk
= raid5_remove_disk
,
5853 .spare_active
= raid5_spare_active
,
5854 .sync_request
= sync_request
,
5855 .resize
= raid5_resize
,
5857 .check_reshape
= raid6_check_reshape
,
5858 .start_reshape
= raid5_start_reshape
,
5859 .finish_reshape
= raid5_finish_reshape
,
5860 .quiesce
= raid5_quiesce
,
5861 .takeover
= raid6_takeover
,
5863 static struct mdk_personality raid5_personality
=
5867 .owner
= THIS_MODULE
,
5868 .make_request
= make_request
,
5872 .error_handler
= error
,
5873 .hot_add_disk
= raid5_add_disk
,
5874 .hot_remove_disk
= raid5_remove_disk
,
5875 .spare_active
= raid5_spare_active
,
5876 .sync_request
= sync_request
,
5877 .resize
= raid5_resize
,
5879 .check_reshape
= raid5_check_reshape
,
5880 .start_reshape
= raid5_start_reshape
,
5881 .finish_reshape
= raid5_finish_reshape
,
5882 .quiesce
= raid5_quiesce
,
5883 .takeover
= raid5_takeover
,
5886 static struct mdk_personality raid4_personality
=
5890 .owner
= THIS_MODULE
,
5891 .make_request
= make_request
,
5895 .error_handler
= error
,
5896 .hot_add_disk
= raid5_add_disk
,
5897 .hot_remove_disk
= raid5_remove_disk
,
5898 .spare_active
= raid5_spare_active
,
5899 .sync_request
= sync_request
,
5900 .resize
= raid5_resize
,
5902 .check_reshape
= raid5_check_reshape
,
5903 .start_reshape
= raid5_start_reshape
,
5904 .finish_reshape
= raid5_finish_reshape
,
5905 .quiesce
= raid5_quiesce
,
5908 static int __init
raid5_init(void)
5910 register_md_personality(&raid6_personality
);
5911 register_md_personality(&raid5_personality
);
5912 register_md_personality(&raid4_personality
);
5916 static void raid5_exit(void)
5918 unregister_md_personality(&raid6_personality
);
5919 unregister_md_personality(&raid5_personality
);
5920 unregister_md_personality(&raid4_personality
);
5923 module_init(raid5_init
);
5924 module_exit(raid5_exit
);
5925 MODULE_LICENSE("GPL");
5926 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD");
5927 MODULE_ALIAS("md-personality-4"); /* RAID5 */
5928 MODULE_ALIAS("md-raid5");
5929 MODULE_ALIAS("md-raid4");
5930 MODULE_ALIAS("md-level-5");
5931 MODULE_ALIAS("md-level-4");
5932 MODULE_ALIAS("md-personality-8"); /* RAID6 */
5933 MODULE_ALIAS("md-raid6");
5934 MODULE_ALIAS("md-level-6");
5936 /* This used to be two separate modules, they were: */
5937 MODULE_ALIAS("raid5");
5938 MODULE_ALIAS("raid6");