1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
20 #include "async-thread.h"
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
26 * set when this rbio is sitting in the hash, but it is just a cache
29 #define RBIO_CACHE_BIT 2
32 * set when it is safe to trust the stripe_pages for caching
34 #define RBIO_CACHE_READY_BIT 3
36 #define RBIO_CACHE_SIZE 1024
38 #define BTRFS_STRIPE_HASH_TABLE_BITS 11
40 /* Used by the raid56 code to lock stripes for read/modify/write */
41 struct btrfs_stripe_hash
{
42 struct list_head hash_list
;
46 /* Used by the raid56 code to lock stripes for read/modify/write */
47 struct btrfs_stripe_hash_table
{
48 struct list_head stripe_cache
;
49 spinlock_t cache_lock
;
51 struct btrfs_stripe_hash table
[];
56 BTRFS_RBIO_READ_REBUILD
,
57 BTRFS_RBIO_PARITY_SCRUB
,
58 BTRFS_RBIO_REBUILD_MISSING
,
61 struct btrfs_raid_bio
{
62 struct btrfs_fs_info
*fs_info
;
63 struct btrfs_bio
*bbio
;
65 /* while we're doing rmw on a stripe
66 * we put it into a hash table so we can
67 * lock the stripe and merge more rbios
70 struct list_head hash_list
;
73 * LRU list for the stripe cache
75 struct list_head stripe_cache
;
78 * for scheduling work in the helper threads
80 struct btrfs_work work
;
83 * bio list and bio_list_lock are used
84 * to add more bios into the stripe
85 * in hopes of avoiding the full rmw
87 struct bio_list bio_list
;
88 spinlock_t bio_list_lock
;
90 /* also protected by the bio_list_lock, the
91 * plug list is used by the plugging code
92 * to collect partial bios while plugged. The
93 * stripe locking code also uses it to hand off
94 * the stripe lock to the next pending IO
96 struct list_head plug_list
;
99 * flags that tell us if it is safe to
100 * merge with this bio
104 /* size of each individual stripe on disk */
107 /* number of data stripes (no p/q) */
114 * set if we're doing a parity rebuild
115 * for a read from higher up, which is handled
116 * differently from a parity rebuild as part of
119 enum btrfs_rbio_ops operation
;
121 /* first bad stripe */
124 /* second bad stripe (for raid6 use) */
129 * number of pages needed to represent the full
135 * size of all the bios in the bio_list. This
136 * helps us decide if the rbio maps to a full
145 atomic_t stripes_pending
;
149 * these are two arrays of pointers. We allocate the
150 * rbio big enough to hold them both and setup their
151 * locations when the rbio is allocated
154 /* pointers to pages that we allocated for
155 * reading/writing stripes directly from the disk (including P/Q)
157 struct page
**stripe_pages
;
160 * pointers to the pages in the bio_list. Stored
161 * here for faster lookup
163 struct page
**bio_pages
;
166 * bitmap to record which horizontal stripe has data
168 unsigned long *dbitmap
;
170 /* allocated with real_stripes-many pointers for finish_*() calls */
171 void **finish_pointers
;
173 /* allocated with stripe_npages-many bits for finish_*() calls */
174 unsigned long *finish_pbitmap
;
177 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
);
178 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
);
179 static void rmw_work(struct btrfs_work
*work
);
180 static void read_rebuild_work(struct btrfs_work
*work
);
181 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
, struct bio
*bio
);
182 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
);
183 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
);
184 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
);
185 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
);
187 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
189 static void scrub_parity_work(struct btrfs_work
*work
);
191 static void start_async_work(struct btrfs_raid_bio
*rbio
, btrfs_func_t work_func
)
193 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
, work_func
, NULL
, NULL
);
194 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
198 * the stripe hash table is used for locking, and to collect
199 * bios in hopes of making a full stripe
201 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info
*info
)
203 struct btrfs_stripe_hash_table
*table
;
204 struct btrfs_stripe_hash_table
*x
;
205 struct btrfs_stripe_hash
*cur
;
206 struct btrfs_stripe_hash
*h
;
207 int num_entries
= 1 << BTRFS_STRIPE_HASH_TABLE_BITS
;
211 if (info
->stripe_hash_table
)
215 * The table is large, starting with order 4 and can go as high as
216 * order 7 in case lock debugging is turned on.
218 * Try harder to allocate and fallback to vmalloc to lower the chance
219 * of a failing mount.
221 table_size
= sizeof(*table
) + sizeof(*h
) * num_entries
;
222 table
= kvzalloc(table_size
, GFP_KERNEL
);
226 spin_lock_init(&table
->cache_lock
);
227 INIT_LIST_HEAD(&table
->stripe_cache
);
231 for (i
= 0; i
< num_entries
; i
++) {
233 INIT_LIST_HEAD(&cur
->hash_list
);
234 spin_lock_init(&cur
->lock
);
237 x
= cmpxchg(&info
->stripe_hash_table
, NULL
, table
);
244 * caching an rbio means to copy anything from the
245 * bio_pages array into the stripe_pages array. We
246 * use the page uptodate bit in the stripe cache array
247 * to indicate if it has valid data
249 * once the caching is done, we set the cache ready
252 static void cache_rbio_pages(struct btrfs_raid_bio
*rbio
)
259 ret
= alloc_rbio_pages(rbio
);
263 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
264 if (!rbio
->bio_pages
[i
])
267 s
= kmap(rbio
->bio_pages
[i
]);
268 d
= kmap(rbio
->stripe_pages
[i
]);
272 kunmap(rbio
->bio_pages
[i
]);
273 kunmap(rbio
->stripe_pages
[i
]);
274 SetPageUptodate(rbio
->stripe_pages
[i
]);
276 set_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
280 * we hash on the first logical address of the stripe
282 static int rbio_bucket(struct btrfs_raid_bio
*rbio
)
284 u64 num
= rbio
->bbio
->raid_map
[0];
287 * we shift down quite a bit. We're using byte
288 * addressing, and most of the lower bits are zeros.
289 * This tends to upset hash_64, and it consistently
290 * returns just one or two different values.
292 * shifting off the lower bits fixes things.
294 return hash_64(num
>> 16, BTRFS_STRIPE_HASH_TABLE_BITS
);
298 * stealing an rbio means taking all the uptodate pages from the stripe
299 * array in the source rbio and putting them into the destination rbio
301 static void steal_rbio(struct btrfs_raid_bio
*src
, struct btrfs_raid_bio
*dest
)
307 if (!test_bit(RBIO_CACHE_READY_BIT
, &src
->flags
))
310 for (i
= 0; i
< dest
->nr_pages
; i
++) {
311 s
= src
->stripe_pages
[i
];
312 if (!s
|| !PageUptodate(s
)) {
316 d
= dest
->stripe_pages
[i
];
320 dest
->stripe_pages
[i
] = s
;
321 src
->stripe_pages
[i
] = NULL
;
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
330 * must be called with dest->rbio_list_lock held
332 static void merge_rbio(struct btrfs_raid_bio
*dest
,
333 struct btrfs_raid_bio
*victim
)
335 bio_list_merge(&dest
->bio_list
, &victim
->bio_list
);
336 dest
->bio_list_bytes
+= victim
->bio_list_bytes
;
337 dest
->generic_bio_cnt
+= victim
->generic_bio_cnt
;
338 bio_list_init(&victim
->bio_list
);
342 * used to prune items that are in the cache. The caller
343 * must hold the hash table lock.
345 static void __remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
347 int bucket
= rbio_bucket(rbio
);
348 struct btrfs_stripe_hash_table
*table
;
349 struct btrfs_stripe_hash
*h
;
353 * check the bit again under the hash table lock.
355 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
358 table
= rbio
->fs_info
->stripe_hash_table
;
359 h
= table
->table
+ bucket
;
361 /* hold the lock for the bucket because we may be
362 * removing it from the hash table
367 * hold the lock for the bio list because we need
368 * to make sure the bio list is empty
370 spin_lock(&rbio
->bio_list_lock
);
372 if (test_and_clear_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
373 list_del_init(&rbio
->stripe_cache
);
374 table
->cache_size
-= 1;
377 /* if the bio list isn't empty, this rbio is
378 * still involved in an IO. We take it out
379 * of the cache list, and drop the ref that
380 * was held for the list.
382 * If the bio_list was empty, we also remove
383 * the rbio from the hash_table, and drop
384 * the corresponding ref
386 if (bio_list_empty(&rbio
->bio_list
)) {
387 if (!list_empty(&rbio
->hash_list
)) {
388 list_del_init(&rbio
->hash_list
);
389 refcount_dec(&rbio
->refs
);
390 BUG_ON(!list_empty(&rbio
->plug_list
));
395 spin_unlock(&rbio
->bio_list_lock
);
396 spin_unlock(&h
->lock
);
399 __free_raid_bio(rbio
);
403 * prune a given rbio from the cache
405 static void remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
407 struct btrfs_stripe_hash_table
*table
;
410 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
413 table
= rbio
->fs_info
->stripe_hash_table
;
415 spin_lock_irqsave(&table
->cache_lock
, flags
);
416 __remove_rbio_from_cache(rbio
);
417 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
421 * remove everything in the cache
423 static void btrfs_clear_rbio_cache(struct btrfs_fs_info
*info
)
425 struct btrfs_stripe_hash_table
*table
;
427 struct btrfs_raid_bio
*rbio
;
429 table
= info
->stripe_hash_table
;
431 spin_lock_irqsave(&table
->cache_lock
, flags
);
432 while (!list_empty(&table
->stripe_cache
)) {
433 rbio
= list_entry(table
->stripe_cache
.next
,
434 struct btrfs_raid_bio
,
436 __remove_rbio_from_cache(rbio
);
438 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
442 * remove all cached entries and free the hash table
445 void btrfs_free_stripe_hash_table(struct btrfs_fs_info
*info
)
447 if (!info
->stripe_hash_table
)
449 btrfs_clear_rbio_cache(info
);
450 kvfree(info
->stripe_hash_table
);
451 info
->stripe_hash_table
= NULL
;
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
462 * If the size of the rbio cache is too big, we
465 static void cache_rbio(struct btrfs_raid_bio
*rbio
)
467 struct btrfs_stripe_hash_table
*table
;
470 if (!test_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
))
473 table
= rbio
->fs_info
->stripe_hash_table
;
475 spin_lock_irqsave(&table
->cache_lock
, flags
);
476 spin_lock(&rbio
->bio_list_lock
);
478 /* bump our ref if we were not in the list before */
479 if (!test_and_set_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
480 refcount_inc(&rbio
->refs
);
482 if (!list_empty(&rbio
->stripe_cache
)){
483 list_move(&rbio
->stripe_cache
, &table
->stripe_cache
);
485 list_add(&rbio
->stripe_cache
, &table
->stripe_cache
);
486 table
->cache_size
+= 1;
489 spin_unlock(&rbio
->bio_list_lock
);
491 if (table
->cache_size
> RBIO_CACHE_SIZE
) {
492 struct btrfs_raid_bio
*found
;
494 found
= list_entry(table
->stripe_cache
.prev
,
495 struct btrfs_raid_bio
,
499 __remove_rbio_from_cache(found
);
502 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
506 * helper function to run the xor_blocks api. It is only
507 * able to do MAX_XOR_BLOCKS at a time, so we need to
510 static void run_xor(void **pages
, int src_cnt
, ssize_t len
)
514 void *dest
= pages
[src_cnt
];
517 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
518 xor_blocks(xor_src_cnt
, len
, dest
, pages
+ src_off
);
520 src_cnt
-= xor_src_cnt
;
521 src_off
+= xor_src_cnt
;
526 * Returns true if the bio list inside this rbio covers an entire stripe (no
529 static int rbio_is_full(struct btrfs_raid_bio
*rbio
)
532 unsigned long size
= rbio
->bio_list_bytes
;
535 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
536 if (size
!= rbio
->nr_data
* rbio
->stripe_len
)
538 BUG_ON(size
> rbio
->nr_data
* rbio
->stripe_len
);
539 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
545 * returns 1 if it is safe to merge two rbios together.
546 * The merging is safe if the two rbios correspond to
547 * the same stripe and if they are both going in the same
548 * direction (read vs write), and if neither one is
549 * locked for final IO
551 * The caller is responsible for locking such that
552 * rmw_locked is safe to test
554 static int rbio_can_merge(struct btrfs_raid_bio
*last
,
555 struct btrfs_raid_bio
*cur
)
557 if (test_bit(RBIO_RMW_LOCKED_BIT
, &last
->flags
) ||
558 test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
))
562 * we can't merge with cached rbios, since the
563 * idea is that when we merge the destination
564 * rbio is going to run our IO for us. We can
565 * steal from cached rbios though, other functions
568 if (test_bit(RBIO_CACHE_BIT
, &last
->flags
) ||
569 test_bit(RBIO_CACHE_BIT
, &cur
->flags
))
572 if (last
->bbio
->raid_map
[0] !=
573 cur
->bbio
->raid_map
[0])
576 /* we can't merge with different operations */
577 if (last
->operation
!= cur
->operation
)
580 * We've need read the full stripe from the drive.
581 * check and repair the parity and write the new results.
583 * We're not allowed to add any new bios to the
584 * bio list here, anyone else that wants to
585 * change this stripe needs to do their own rmw.
587 if (last
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
590 if (last
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
593 if (last
->operation
== BTRFS_RBIO_READ_REBUILD
) {
594 int fa
= last
->faila
;
595 int fb
= last
->failb
;
596 int cur_fa
= cur
->faila
;
597 int cur_fb
= cur
->failb
;
599 if (last
->faila
>= last
->failb
) {
604 if (cur
->faila
>= cur
->failb
) {
609 if (fa
!= cur_fa
|| fb
!= cur_fb
)
615 static int rbio_stripe_page_index(struct btrfs_raid_bio
*rbio
, int stripe
,
618 return stripe
* rbio
->stripe_npages
+ index
;
622 * these are just the pages from the rbio array, not from anything
623 * the FS sent down to us
625 static struct page
*rbio_stripe_page(struct btrfs_raid_bio
*rbio
, int stripe
,
628 return rbio
->stripe_pages
[rbio_stripe_page_index(rbio
, stripe
, index
)];
632 * helper to index into the pstripe
634 static struct page
*rbio_pstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
636 return rbio_stripe_page(rbio
, rbio
->nr_data
, index
);
640 * helper to index into the qstripe, returns null
641 * if there is no qstripe
643 static struct page
*rbio_qstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
645 if (rbio
->nr_data
+ 1 == rbio
->real_stripes
)
647 return rbio_stripe_page(rbio
, rbio
->nr_data
+ 1, index
);
651 * The first stripe in the table for a logical address
652 * has the lock. rbios are added in one of three ways:
654 * 1) Nobody has the stripe locked yet. The rbio is given
655 * the lock and 0 is returned. The caller must start the IO
658 * 2) Someone has the stripe locked, but we're able to merge
659 * with the lock owner. The rbio is freed and the IO will
660 * start automatically along with the existing rbio. 1 is returned.
662 * 3) Someone has the stripe locked, but we're not able to merge.
663 * The rbio is added to the lock owner's plug list, or merged into
664 * an rbio already on the plug list. When the lock owner unlocks,
665 * the next rbio on the list is run and the IO is started automatically.
668 * If we return 0, the caller still owns the rbio and must continue with
669 * IO submission. If we return 1, the caller must assume the rbio has
670 * already been freed.
672 static noinline
int lock_stripe_add(struct btrfs_raid_bio
*rbio
)
674 int bucket
= rbio_bucket(rbio
);
675 struct btrfs_stripe_hash
*h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
676 struct btrfs_raid_bio
*cur
;
677 struct btrfs_raid_bio
*pending
;
679 struct btrfs_raid_bio
*freeit
= NULL
;
680 struct btrfs_raid_bio
*cache_drop
= NULL
;
683 spin_lock_irqsave(&h
->lock
, flags
);
684 list_for_each_entry(cur
, &h
->hash_list
, hash_list
) {
685 if (cur
->bbio
->raid_map
[0] == rbio
->bbio
->raid_map
[0]) {
686 spin_lock(&cur
->bio_list_lock
);
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur
->bio_list
) &&
690 list_empty(&cur
->plug_list
) &&
691 test_bit(RBIO_CACHE_BIT
, &cur
->flags
) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
)) {
693 list_del_init(&cur
->hash_list
);
694 refcount_dec(&cur
->refs
);
696 steal_rbio(cur
, rbio
);
698 spin_unlock(&cur
->bio_list_lock
);
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur
, rbio
)) {
705 merge_rbio(cur
, rbio
);
706 spin_unlock(&cur
->bio_list_lock
);
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
721 list_for_each_entry(pending
, &cur
->plug_list
,
723 if (rbio_can_merge(pending
, rbio
)) {
724 merge_rbio(pending
, rbio
);
725 spin_unlock(&cur
->bio_list_lock
);
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
736 list_add_tail(&rbio
->plug_list
, &cur
->plug_list
);
737 spin_unlock(&cur
->bio_list_lock
);
743 refcount_inc(&rbio
->refs
);
744 list_add(&rbio
->hash_list
, &h
->hash_list
);
746 spin_unlock_irqrestore(&h
->lock
, flags
);
748 remove_rbio_from_cache(cache_drop
);
750 __free_raid_bio(freeit
);
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
758 static noinline
void unlock_stripe(struct btrfs_raid_bio
*rbio
)
761 struct btrfs_stripe_hash
*h
;
765 bucket
= rbio_bucket(rbio
);
766 h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
768 if (list_empty(&rbio
->plug_list
))
771 spin_lock_irqsave(&h
->lock
, flags
);
772 spin_lock(&rbio
->bio_list_lock
);
774 if (!list_empty(&rbio
->hash_list
)) {
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
780 if (list_empty(&rbio
->plug_list
) &&
781 test_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
783 clear_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
784 BUG_ON(!bio_list_empty(&rbio
->bio_list
));
788 list_del_init(&rbio
->hash_list
);
789 refcount_dec(&rbio
->refs
);
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
796 if (!list_empty(&rbio
->plug_list
)) {
797 struct btrfs_raid_bio
*next
;
798 struct list_head
*head
= rbio
->plug_list
.next
;
800 next
= list_entry(head
, struct btrfs_raid_bio
,
803 list_del_init(&rbio
->plug_list
);
805 list_add(&next
->hash_list
, &h
->hash_list
);
806 refcount_inc(&next
->refs
);
807 spin_unlock(&rbio
->bio_list_lock
);
808 spin_unlock_irqrestore(&h
->lock
, flags
);
810 if (next
->operation
== BTRFS_RBIO_READ_REBUILD
)
811 start_async_work(next
, read_rebuild_work
);
812 else if (next
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
813 steal_rbio(rbio
, next
);
814 start_async_work(next
, read_rebuild_work
);
815 } else if (next
->operation
== BTRFS_RBIO_WRITE
) {
816 steal_rbio(rbio
, next
);
817 start_async_work(next
, rmw_work
);
818 } else if (next
->operation
== BTRFS_RBIO_PARITY_SCRUB
) {
819 steal_rbio(rbio
, next
);
820 start_async_work(next
, scrub_parity_work
);
827 spin_unlock(&rbio
->bio_list_lock
);
828 spin_unlock_irqrestore(&h
->lock
, flags
);
832 remove_rbio_from_cache(rbio
);
835 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
)
839 if (!refcount_dec_and_test(&rbio
->refs
))
842 WARN_ON(!list_empty(&rbio
->stripe_cache
));
843 WARN_ON(!list_empty(&rbio
->hash_list
));
844 WARN_ON(!bio_list_empty(&rbio
->bio_list
));
846 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
847 if (rbio
->stripe_pages
[i
]) {
848 __free_page(rbio
->stripe_pages
[i
]);
849 rbio
->stripe_pages
[i
] = NULL
;
853 btrfs_put_bbio(rbio
->bbio
);
857 static void rbio_endio_bio_list(struct bio
*cur
, blk_status_t err
)
864 cur
->bi_status
= err
;
871 * this frees the rbio and runs through all the bios in the
872 * bio_list and calls end_io on them
874 static void rbio_orig_end_io(struct btrfs_raid_bio
*rbio
, blk_status_t err
)
876 struct bio
*cur
= bio_list_get(&rbio
->bio_list
);
879 if (rbio
->generic_bio_cnt
)
880 btrfs_bio_counter_sub(rbio
->fs_info
, rbio
->generic_bio_cnt
);
883 * At this moment, rbio->bio_list is empty, however since rbio does not
884 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
885 * hash list, rbio may be merged with others so that rbio->bio_list
887 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
888 * more and we can call bio_endio() on all queued bios.
891 extra
= bio_list_get(&rbio
->bio_list
);
892 __free_raid_bio(rbio
);
894 rbio_endio_bio_list(cur
, err
);
896 rbio_endio_bio_list(extra
, err
);
900 * end io function used by finish_rmw. When we finally
901 * get here, we've written a full stripe
903 static void raid_write_end_io(struct bio
*bio
)
905 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
906 blk_status_t err
= bio
->bi_status
;
910 fail_bio_stripe(rbio
, bio
);
914 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
919 /* OK, we have read all the stripes we need to. */
920 max_errors
= (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
) ?
921 0 : rbio
->bbio
->max_errors
;
922 if (atomic_read(&rbio
->error
) > max_errors
)
925 rbio_orig_end_io(rbio
, err
);
929 * the read/modify/write code wants to use the original bio for
930 * any pages it included, and then use the rbio for everything
931 * else. This function decides if a given index (stripe number)
932 * and page number in that stripe fall inside the original bio
935 * if you set bio_list_only, you'll get a NULL back for any ranges
936 * that are outside the bio_list
938 * This doesn't take any refs on anything, you get a bare page pointer
939 * and the caller must bump refs as required.
941 * You must call index_rbio_pages once before you can trust
942 * the answers from this function.
944 static struct page
*page_in_rbio(struct btrfs_raid_bio
*rbio
,
945 int index
, int pagenr
, int bio_list_only
)
948 struct page
*p
= NULL
;
950 chunk_page
= index
* (rbio
->stripe_len
>> PAGE_SHIFT
) + pagenr
;
952 spin_lock_irq(&rbio
->bio_list_lock
);
953 p
= rbio
->bio_pages
[chunk_page
];
954 spin_unlock_irq(&rbio
->bio_list_lock
);
956 if (p
|| bio_list_only
)
959 return rbio
->stripe_pages
[chunk_page
];
963 * number of pages we need for the entire stripe across all the
966 static unsigned long rbio_nr_pages(unsigned long stripe_len
, int nr_stripes
)
968 return DIV_ROUND_UP(stripe_len
, PAGE_SIZE
) * nr_stripes
;
972 * allocation and initial setup for the btrfs_raid_bio. Not
973 * this does not allocate any pages for rbio->pages.
975 static struct btrfs_raid_bio
*alloc_rbio(struct btrfs_fs_info
*fs_info
,
976 struct btrfs_bio
*bbio
,
979 struct btrfs_raid_bio
*rbio
;
981 int real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
982 int num_pages
= rbio_nr_pages(stripe_len
, real_stripes
);
983 int stripe_npages
= DIV_ROUND_UP(stripe_len
, PAGE_SIZE
);
986 rbio
= kzalloc(sizeof(*rbio
) +
987 sizeof(*rbio
->stripe_pages
) * num_pages
+
988 sizeof(*rbio
->bio_pages
) * num_pages
+
989 sizeof(*rbio
->finish_pointers
) * real_stripes
+
990 sizeof(*rbio
->dbitmap
) * BITS_TO_LONGS(stripe_npages
) +
991 sizeof(*rbio
->finish_pbitmap
) *
992 BITS_TO_LONGS(stripe_npages
),
995 return ERR_PTR(-ENOMEM
);
997 bio_list_init(&rbio
->bio_list
);
998 INIT_LIST_HEAD(&rbio
->plug_list
);
999 spin_lock_init(&rbio
->bio_list_lock
);
1000 INIT_LIST_HEAD(&rbio
->stripe_cache
);
1001 INIT_LIST_HEAD(&rbio
->hash_list
);
1003 rbio
->fs_info
= fs_info
;
1004 rbio
->stripe_len
= stripe_len
;
1005 rbio
->nr_pages
= num_pages
;
1006 rbio
->real_stripes
= real_stripes
;
1007 rbio
->stripe_npages
= stripe_npages
;
1010 refcount_set(&rbio
->refs
, 1);
1011 atomic_set(&rbio
->error
, 0);
1012 atomic_set(&rbio
->stripes_pending
, 0);
1015 * the stripe_pages, bio_pages, etc arrays point to the extra
1016 * memory we allocated past the end of the rbio
1019 #define CONSUME_ALLOC(ptr, count) do { \
1021 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1023 CONSUME_ALLOC(rbio
->stripe_pages
, num_pages
);
1024 CONSUME_ALLOC(rbio
->bio_pages
, num_pages
);
1025 CONSUME_ALLOC(rbio
->finish_pointers
, real_stripes
);
1026 CONSUME_ALLOC(rbio
->dbitmap
, BITS_TO_LONGS(stripe_npages
));
1027 CONSUME_ALLOC(rbio
->finish_pbitmap
, BITS_TO_LONGS(stripe_npages
));
1028 #undef CONSUME_ALLOC
1030 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1031 nr_data
= real_stripes
- 1;
1032 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1033 nr_data
= real_stripes
- 2;
1037 rbio
->nr_data
= nr_data
;
1041 /* allocate pages for all the stripes in the bio, including parity */
1042 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
)
1047 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
1048 if (rbio
->stripe_pages
[i
])
1050 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1053 rbio
->stripe_pages
[i
] = page
;
1058 /* only allocate pages for p/q stripes */
1059 static int alloc_rbio_parity_pages(struct btrfs_raid_bio
*rbio
)
1064 i
= rbio_stripe_page_index(rbio
, rbio
->nr_data
, 0);
1066 for (; i
< rbio
->nr_pages
; i
++) {
1067 if (rbio
->stripe_pages
[i
])
1069 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1072 rbio
->stripe_pages
[i
] = page
;
1078 * add a single page from a specific stripe into our list of bios for IO
1079 * this will try to merge into existing bios if possible, and returns
1080 * zero if all went well.
1082 static int rbio_add_io_page(struct btrfs_raid_bio
*rbio
,
1083 struct bio_list
*bio_list
,
1086 unsigned long page_index
,
1087 unsigned long bio_max_len
)
1089 struct bio
*last
= bio_list
->tail
;
1093 struct btrfs_bio_stripe
*stripe
;
1096 stripe
= &rbio
->bbio
->stripes
[stripe_nr
];
1097 disk_start
= stripe
->physical
+ (page_index
<< PAGE_SHIFT
);
1099 /* if the device is missing, just fail this stripe */
1100 if (!stripe
->dev
->bdev
)
1101 return fail_rbio_index(rbio
, stripe_nr
);
1103 /* see if we can add this page onto our existing bio */
1105 last_end
= (u64
)last
->bi_iter
.bi_sector
<< 9;
1106 last_end
+= last
->bi_iter
.bi_size
;
1109 * we can't merge these if they are from different
1110 * devices or if they are not contiguous
1112 if (last_end
== disk_start
&& stripe
->dev
->bdev
&&
1114 last
->bi_disk
== stripe
->dev
->bdev
->bd_disk
&&
1115 last
->bi_partno
== stripe
->dev
->bdev
->bd_partno
) {
1116 ret
= bio_add_page(last
, page
, PAGE_SIZE
, 0);
1117 if (ret
== PAGE_SIZE
)
1122 /* put a new bio on the list */
1123 bio
= btrfs_io_bio_alloc(bio_max_len
>> PAGE_SHIFT
?: 1);
1124 bio
->bi_iter
.bi_size
= 0;
1125 bio_set_dev(bio
, stripe
->dev
->bdev
);
1126 bio
->bi_iter
.bi_sector
= disk_start
>> 9;
1128 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
1129 bio_list_add(bio_list
, bio
);
1134 * while we're doing the read/modify/write cycle, we could
1135 * have errors in reading pages off the disk. This checks
1136 * for errors and if we're not able to read the page it'll
1137 * trigger parity reconstruction. The rmw will be finished
1138 * after we've reconstructed the failed stripes
1140 static void validate_rbio_for_rmw(struct btrfs_raid_bio
*rbio
)
1142 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
1143 BUG_ON(rbio
->faila
== rbio
->real_stripes
- 1);
1144 __raid56_parity_recover(rbio
);
1151 * helper function to walk our bio list and populate the bio_pages array with
1152 * the result. This seems expensive, but it is faster than constantly
1153 * searching through the bio list as we setup the IO in finish_rmw or stripe
1156 * This must be called before you trust the answers from page_in_rbio
1158 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
)
1162 unsigned long stripe_offset
;
1163 unsigned long page_index
;
1165 spin_lock_irq(&rbio
->bio_list_lock
);
1166 bio_list_for_each(bio
, &rbio
->bio_list
) {
1167 struct bio_vec bvec
;
1168 struct bvec_iter iter
;
1171 start
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1172 stripe_offset
= start
- rbio
->bbio
->raid_map
[0];
1173 page_index
= stripe_offset
>> PAGE_SHIFT
;
1175 if (bio_flagged(bio
, BIO_CLONED
))
1176 bio
->bi_iter
= btrfs_io_bio(bio
)->iter
;
1178 bio_for_each_segment(bvec
, bio
, iter
) {
1179 rbio
->bio_pages
[page_index
+ i
] = bvec
.bv_page
;
1183 spin_unlock_irq(&rbio
->bio_list_lock
);
1187 * this is called from one of two situations. We either
1188 * have a full stripe from the higher layers, or we've read all
1189 * the missing bits off disk.
1191 * This will calculate the parity and then send down any
1194 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
)
1196 struct btrfs_bio
*bbio
= rbio
->bbio
;
1197 void **pointers
= rbio
->finish_pointers
;
1198 int nr_data
= rbio
->nr_data
;
1203 struct bio_list bio_list
;
1207 bio_list_init(&bio_list
);
1209 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
1210 p_stripe
= rbio
->real_stripes
- 1;
1211 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
1212 p_stripe
= rbio
->real_stripes
- 2;
1213 q_stripe
= rbio
->real_stripes
- 1;
1218 /* at this point we either have a full stripe,
1219 * or we've read the full stripe from the drive.
1220 * recalculate the parity and write the new results.
1222 * We're not allowed to add any new bios to the
1223 * bio list here, anyone else that wants to
1224 * change this stripe needs to do their own rmw.
1226 spin_lock_irq(&rbio
->bio_list_lock
);
1227 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1228 spin_unlock_irq(&rbio
->bio_list_lock
);
1230 atomic_set(&rbio
->error
, 0);
1233 * now that we've set rmw_locked, run through the
1234 * bio list one last time and map the page pointers
1236 * We don't cache full rbios because we're assuming
1237 * the higher layers are unlikely to use this area of
1238 * the disk again soon. If they do use it again,
1239 * hopefully they will send another full bio.
1241 index_rbio_pages(rbio
);
1242 if (!rbio_is_full(rbio
))
1243 cache_rbio_pages(rbio
);
1245 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1247 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1249 /* first collect one page from each data stripe */
1250 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
1251 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1252 pointers
[stripe
] = kmap(p
);
1255 /* then add the parity stripe */
1256 p
= rbio_pstripe_page(rbio
, pagenr
);
1258 pointers
[stripe
++] = kmap(p
);
1260 if (q_stripe
!= -1) {
1263 * raid6, add the qstripe and call the
1264 * library function to fill in our p/q
1266 p
= rbio_qstripe_page(rbio
, pagenr
);
1268 pointers
[stripe
++] = kmap(p
);
1270 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
1274 copy_page(pointers
[nr_data
], pointers
[0]);
1275 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
1279 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
1280 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
1284 * time to start writing. Make bios for everything from the
1285 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1288 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1289 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1291 if (stripe
< rbio
->nr_data
) {
1292 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1296 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1299 ret
= rbio_add_io_page(rbio
, &bio_list
,
1300 page
, stripe
, pagenr
, rbio
->stripe_len
);
1306 if (likely(!bbio
->num_tgtdevs
))
1309 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1310 if (!bbio
->tgtdev_map
[stripe
])
1313 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1315 if (stripe
< rbio
->nr_data
) {
1316 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1320 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1323 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1324 rbio
->bbio
->tgtdev_map
[stripe
],
1325 pagenr
, rbio
->stripe_len
);
1332 atomic_set(&rbio
->stripes_pending
, bio_list_size(&bio_list
));
1333 BUG_ON(atomic_read(&rbio
->stripes_pending
) == 0);
1336 bio
= bio_list_pop(&bio_list
);
1340 bio
->bi_private
= rbio
;
1341 bio
->bi_end_io
= raid_write_end_io
;
1342 bio
->bi_opf
= REQ_OP_WRITE
;
1349 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1351 while ((bio
= bio_list_pop(&bio_list
)))
1356 * helper to find the stripe number for a given bio. Used to figure out which
1357 * stripe has failed. This expects the bio to correspond to a physical disk,
1358 * so it looks up based on physical sector numbers.
1360 static int find_bio_stripe(struct btrfs_raid_bio
*rbio
,
1363 u64 physical
= bio
->bi_iter
.bi_sector
;
1366 struct btrfs_bio_stripe
*stripe
;
1370 for (i
= 0; i
< rbio
->bbio
->num_stripes
; i
++) {
1371 stripe
= &rbio
->bbio
->stripes
[i
];
1372 stripe_start
= stripe
->physical
;
1373 if (physical
>= stripe_start
&&
1374 physical
< stripe_start
+ rbio
->stripe_len
&&
1375 stripe
->dev
->bdev
&&
1376 bio
->bi_disk
== stripe
->dev
->bdev
->bd_disk
&&
1377 bio
->bi_partno
== stripe
->dev
->bdev
->bd_partno
) {
1385 * helper to find the stripe number for a given
1386 * bio (before mapping). Used to figure out which stripe has
1387 * failed. This looks up based on logical block numbers.
1389 static int find_logical_bio_stripe(struct btrfs_raid_bio
*rbio
,
1392 u64 logical
= bio
->bi_iter
.bi_sector
;
1398 for (i
= 0; i
< rbio
->nr_data
; i
++) {
1399 stripe_start
= rbio
->bbio
->raid_map
[i
];
1400 if (logical
>= stripe_start
&&
1401 logical
< stripe_start
+ rbio
->stripe_len
) {
1409 * returns -EIO if we had too many failures
1411 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
)
1413 unsigned long flags
;
1416 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
1418 /* we already know this stripe is bad, move on */
1419 if (rbio
->faila
== failed
|| rbio
->failb
== failed
)
1422 if (rbio
->faila
== -1) {
1423 /* first failure on this rbio */
1424 rbio
->faila
= failed
;
1425 atomic_inc(&rbio
->error
);
1426 } else if (rbio
->failb
== -1) {
1427 /* second failure on this rbio */
1428 rbio
->failb
= failed
;
1429 atomic_inc(&rbio
->error
);
1434 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
1440 * helper to fail a stripe based on a physical disk
1443 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
,
1446 int failed
= find_bio_stripe(rbio
, bio
);
1451 return fail_rbio_index(rbio
, failed
);
1455 * this sets each page in the bio uptodate. It should only be used on private
1456 * rbio pages, nothing that comes in from the higher layers
1458 static void set_bio_pages_uptodate(struct bio
*bio
)
1460 struct bio_vec
*bvec
;
1461 struct bvec_iter_all iter_all
;
1463 ASSERT(!bio_flagged(bio
, BIO_CLONED
));
1465 bio_for_each_segment_all(bvec
, bio
, iter_all
)
1466 SetPageUptodate(bvec
->bv_page
);
1470 * end io for the read phase of the rmw cycle. All the bios here are physical
1471 * stripe bios we've read from the disk so we can recalculate the parity of the
1474 * This will usually kick off finish_rmw once all the bios are read in, but it
1475 * may trigger parity reconstruction if we had any errors along the way
1477 static void raid_rmw_end_io(struct bio
*bio
)
1479 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1482 fail_bio_stripe(rbio
, bio
);
1484 set_bio_pages_uptodate(bio
);
1488 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
1491 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
1495 * this will normally call finish_rmw to start our write
1496 * but if there are any failed stripes we'll reconstruct
1499 validate_rbio_for_rmw(rbio
);
1504 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1508 * the stripe must be locked by the caller. It will
1509 * unlock after all the writes are done
1511 static int raid56_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1513 int bios_to_read
= 0;
1514 struct bio_list bio_list
;
1520 bio_list_init(&bio_list
);
1522 ret
= alloc_rbio_pages(rbio
);
1526 index_rbio_pages(rbio
);
1528 atomic_set(&rbio
->error
, 0);
1530 * build a list of bios to read all the missing parts of this
1533 for (stripe
= 0; stripe
< rbio
->nr_data
; stripe
++) {
1534 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1537 * we want to find all the pages missing from
1538 * the rbio and read them from the disk. If
1539 * page_in_rbio finds a page in the bio list
1540 * we don't need to read it off the stripe.
1542 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1546 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1548 * the bio cache may have handed us an uptodate
1549 * page. If so, be happy and use it
1551 if (PageUptodate(page
))
1554 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1555 stripe
, pagenr
, rbio
->stripe_len
);
1561 bios_to_read
= bio_list_size(&bio_list
);
1562 if (!bios_to_read
) {
1564 * this can happen if others have merged with
1565 * us, it means there is nothing left to read.
1566 * But if there are missing devices it may not be
1567 * safe to do the full stripe write yet.
1573 * the bbio may be freed once we submit the last bio. Make sure
1574 * not to touch it after that
1576 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
1578 bio
= bio_list_pop(&bio_list
);
1582 bio
->bi_private
= rbio
;
1583 bio
->bi_end_io
= raid_rmw_end_io
;
1584 bio
->bi_opf
= REQ_OP_READ
;
1586 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
1590 /* the actual write will happen once the reads are done */
1594 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
1596 while ((bio
= bio_list_pop(&bio_list
)))
1602 validate_rbio_for_rmw(rbio
);
1607 * if the upper layers pass in a full stripe, we thank them by only allocating
1608 * enough pages to hold the parity, and sending it all down quickly.
1610 static int full_stripe_write(struct btrfs_raid_bio
*rbio
)
1614 ret
= alloc_rbio_parity_pages(rbio
);
1616 __free_raid_bio(rbio
);
1620 ret
= lock_stripe_add(rbio
);
1627 * partial stripe writes get handed over to async helpers.
1628 * We're really hoping to merge a few more writes into this
1629 * rbio before calculating new parity
1631 static int partial_stripe_write(struct btrfs_raid_bio
*rbio
)
1635 ret
= lock_stripe_add(rbio
);
1637 start_async_work(rbio
, rmw_work
);
1642 * sometimes while we were reading from the drive to
1643 * recalculate parity, enough new bios come into create
1644 * a full stripe. So we do a check here to see if we can
1645 * go directly to finish_rmw
1647 static int __raid56_parity_write(struct btrfs_raid_bio
*rbio
)
1649 /* head off into rmw land if we don't have a full stripe */
1650 if (!rbio_is_full(rbio
))
1651 return partial_stripe_write(rbio
);
1652 return full_stripe_write(rbio
);
1656 * We use plugging call backs to collect full stripes.
1657 * Any time we get a partial stripe write while plugged
1658 * we collect it into a list. When the unplug comes down,
1659 * we sort the list by logical block number and merge
1660 * everything we can into the same rbios
1662 struct btrfs_plug_cb
{
1663 struct blk_plug_cb cb
;
1664 struct btrfs_fs_info
*info
;
1665 struct list_head rbio_list
;
1666 struct btrfs_work work
;
1670 * rbios on the plug list are sorted for easier merging.
1672 static int plug_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1674 struct btrfs_raid_bio
*ra
= container_of(a
, struct btrfs_raid_bio
,
1676 struct btrfs_raid_bio
*rb
= container_of(b
, struct btrfs_raid_bio
,
1678 u64 a_sector
= ra
->bio_list
.head
->bi_iter
.bi_sector
;
1679 u64 b_sector
= rb
->bio_list
.head
->bi_iter
.bi_sector
;
1681 if (a_sector
< b_sector
)
1683 if (a_sector
> b_sector
)
1688 static void run_plug(struct btrfs_plug_cb
*plug
)
1690 struct btrfs_raid_bio
*cur
;
1691 struct btrfs_raid_bio
*last
= NULL
;
1694 * sort our plug list then try to merge
1695 * everything we can in hopes of creating full
1698 list_sort(NULL
, &plug
->rbio_list
, plug_cmp
);
1699 while (!list_empty(&plug
->rbio_list
)) {
1700 cur
= list_entry(plug
->rbio_list
.next
,
1701 struct btrfs_raid_bio
, plug_list
);
1702 list_del_init(&cur
->plug_list
);
1704 if (rbio_is_full(cur
)) {
1707 /* we have a full stripe, send it down */
1708 ret
= full_stripe_write(cur
);
1713 if (rbio_can_merge(last
, cur
)) {
1714 merge_rbio(last
, cur
);
1715 __free_raid_bio(cur
);
1719 __raid56_parity_write(last
);
1724 __raid56_parity_write(last
);
1730 * if the unplug comes from schedule, we have to push the
1731 * work off to a helper thread
1733 static void unplug_work(struct btrfs_work
*work
)
1735 struct btrfs_plug_cb
*plug
;
1736 plug
= container_of(work
, struct btrfs_plug_cb
, work
);
1740 static void btrfs_raid_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1742 struct btrfs_plug_cb
*plug
;
1743 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1745 if (from_schedule
) {
1746 btrfs_init_work(&plug
->work
, btrfs_rmw_helper
,
1747 unplug_work
, NULL
, NULL
);
1748 btrfs_queue_work(plug
->info
->rmw_workers
,
1756 * our main entry point for writes from the rest of the FS.
1758 int raid56_parity_write(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
1759 struct btrfs_bio
*bbio
, u64 stripe_len
)
1761 struct btrfs_raid_bio
*rbio
;
1762 struct btrfs_plug_cb
*plug
= NULL
;
1763 struct blk_plug_cb
*cb
;
1766 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
1768 btrfs_put_bbio(bbio
);
1769 return PTR_ERR(rbio
);
1771 bio_list_add(&rbio
->bio_list
, bio
);
1772 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
1773 rbio
->operation
= BTRFS_RBIO_WRITE
;
1775 btrfs_bio_counter_inc_noblocked(fs_info
);
1776 rbio
->generic_bio_cnt
= 1;
1779 * don't plug on full rbios, just get them out the door
1780 * as quickly as we can
1782 if (rbio_is_full(rbio
)) {
1783 ret
= full_stripe_write(rbio
);
1785 btrfs_bio_counter_dec(fs_info
);
1789 cb
= blk_check_plugged(btrfs_raid_unplug
, fs_info
, sizeof(*plug
));
1791 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1793 plug
->info
= fs_info
;
1794 INIT_LIST_HEAD(&plug
->rbio_list
);
1796 list_add_tail(&rbio
->plug_list
, &plug
->rbio_list
);
1799 ret
= __raid56_parity_write(rbio
);
1801 btrfs_bio_counter_dec(fs_info
);
1807 * all parity reconstruction happens here. We've read in everything
1808 * we can find from the drives and this does the heavy lifting of
1809 * sorting the good from the bad.
1811 static void __raid_recover_end_io(struct btrfs_raid_bio
*rbio
)
1815 int faila
= -1, failb
= -1;
1820 pointers
= kcalloc(rbio
->real_stripes
, sizeof(void *), GFP_NOFS
);
1822 err
= BLK_STS_RESOURCE
;
1826 faila
= rbio
->faila
;
1827 failb
= rbio
->failb
;
1829 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1830 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1831 spin_lock_irq(&rbio
->bio_list_lock
);
1832 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1833 spin_unlock_irq(&rbio
->bio_list_lock
);
1836 index_rbio_pages(rbio
);
1838 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1840 * Now we just use bitmap to mark the horizontal stripes in
1841 * which we have data when doing parity scrub.
1843 if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
&&
1844 !test_bit(pagenr
, rbio
->dbitmap
))
1847 /* setup our array of pointers with pages
1850 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1852 * if we're rebuilding a read, we have to use
1853 * pages from the bio list
1855 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1856 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1857 (stripe
== faila
|| stripe
== failb
)) {
1858 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1860 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1862 pointers
[stripe
] = kmap(page
);
1865 /* all raid6 handling here */
1866 if (rbio
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
) {
1868 * single failure, rebuild from parity raid5
1872 if (faila
== rbio
->nr_data
) {
1874 * Just the P stripe has failed, without
1875 * a bad data or Q stripe.
1876 * TODO, we should redo the xor here.
1878 err
= BLK_STS_IOERR
;
1882 * a single failure in raid6 is rebuilt
1883 * in the pstripe code below
1888 /* make sure our ps and qs are in order */
1889 if (faila
> failb
) {
1895 /* if the q stripe is failed, do a pstripe reconstruction
1897 * If both the q stripe and the P stripe are failed, we're
1898 * here due to a crc mismatch and we can't give them the
1901 if (rbio
->bbio
->raid_map
[failb
] == RAID6_Q_STRIPE
) {
1902 if (rbio
->bbio
->raid_map
[faila
] ==
1904 err
= BLK_STS_IOERR
;
1908 * otherwise we have one bad data stripe and
1909 * a good P stripe. raid5!
1914 if (rbio
->bbio
->raid_map
[failb
] == RAID5_P_STRIPE
) {
1915 raid6_datap_recov(rbio
->real_stripes
,
1916 PAGE_SIZE
, faila
, pointers
);
1918 raid6_2data_recov(rbio
->real_stripes
,
1919 PAGE_SIZE
, faila
, failb
,
1925 /* rebuild from P stripe here (raid5 or raid6) */
1926 BUG_ON(failb
!= -1);
1928 /* Copy parity block into failed block to start with */
1929 copy_page(pointers
[faila
], pointers
[rbio
->nr_data
]);
1931 /* rearrange the pointer array */
1932 p
= pointers
[faila
];
1933 for (stripe
= faila
; stripe
< rbio
->nr_data
- 1; stripe
++)
1934 pointers
[stripe
] = pointers
[stripe
+ 1];
1935 pointers
[rbio
->nr_data
- 1] = p
;
1937 /* xor in the rest */
1938 run_xor(pointers
, rbio
->nr_data
- 1, PAGE_SIZE
);
1940 /* if we're doing this rebuild as part of an rmw, go through
1941 * and set all of our private rbio pages in the
1942 * failed stripes as uptodate. This way finish_rmw will
1943 * know they can be trusted. If this was a read reconstruction,
1944 * other endio functions will fiddle the uptodate bits
1946 if (rbio
->operation
== BTRFS_RBIO_WRITE
) {
1947 for (i
= 0; i
< rbio
->stripe_npages
; i
++) {
1949 page
= rbio_stripe_page(rbio
, faila
, i
);
1950 SetPageUptodate(page
);
1953 page
= rbio_stripe_page(rbio
, failb
, i
);
1954 SetPageUptodate(page
);
1958 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1960 * if we're rebuilding a read, we have to use
1961 * pages from the bio list
1963 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1964 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1965 (stripe
== faila
|| stripe
== failb
)) {
1966 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1968 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1980 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1981 * valid rbio which is consistent with ondisk content, thus such a
1982 * valid rbio can be cached to avoid further disk reads.
1984 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1985 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1987 * - In case of two failures, where rbio->failb != -1:
1989 * Do not cache this rbio since the above read reconstruction
1990 * (raid6_datap_recov() or raid6_2data_recov()) may have
1991 * changed some content of stripes which are not identical to
1992 * on-disk content any more, otherwise, a later write/recover
1993 * may steal stripe_pages from this rbio and end up with
1994 * corruptions or rebuild failures.
1996 * - In case of single failure, where rbio->failb == -1:
1998 * Cache this rbio iff the above read reconstruction is
1999 * executed without problems.
2001 if (err
== BLK_STS_OK
&& rbio
->failb
< 0)
2002 cache_rbio_pages(rbio
);
2004 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2006 rbio_orig_end_io(rbio
, err
);
2007 } else if (err
== BLK_STS_OK
) {
2011 if (rbio
->operation
== BTRFS_RBIO_WRITE
)
2013 else if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
2014 finish_parity_scrub(rbio
, 0);
2018 rbio_orig_end_io(rbio
, err
);
2023 * This is called only for stripes we've read from disk to
2024 * reconstruct the parity.
2026 static void raid_recover_end_io(struct bio
*bio
)
2028 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2031 * we only read stripe pages off the disk, set them
2032 * up to date if there were no errors
2035 fail_bio_stripe(rbio
, bio
);
2037 set_bio_pages_uptodate(bio
);
2040 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2043 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2044 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2046 __raid_recover_end_io(rbio
);
2050 * reads everything we need off the disk to reconstruct
2051 * the parity. endio handlers trigger final reconstruction
2052 * when the IO is done.
2054 * This is used both for reads from the higher layers and for
2055 * parity construction required to finish a rmw cycle.
2057 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
)
2059 int bios_to_read
= 0;
2060 struct bio_list bio_list
;
2066 bio_list_init(&bio_list
);
2068 ret
= alloc_rbio_pages(rbio
);
2072 atomic_set(&rbio
->error
, 0);
2075 * read everything that hasn't failed. Thanks to the
2076 * stripe cache, it is possible that some or all of these
2077 * pages are going to be uptodate.
2079 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2080 if (rbio
->faila
== stripe
|| rbio
->failb
== stripe
) {
2081 atomic_inc(&rbio
->error
);
2085 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
2089 * the rmw code may have already read this
2092 p
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2093 if (PageUptodate(p
))
2096 ret
= rbio_add_io_page(rbio
, &bio_list
,
2097 rbio_stripe_page(rbio
, stripe
, pagenr
),
2098 stripe
, pagenr
, rbio
->stripe_len
);
2104 bios_to_read
= bio_list_size(&bio_list
);
2105 if (!bios_to_read
) {
2107 * we might have no bios to read just because the pages
2108 * were up to date, or we might have no bios to read because
2109 * the devices were gone.
2111 if (atomic_read(&rbio
->error
) <= rbio
->bbio
->max_errors
) {
2112 __raid_recover_end_io(rbio
);
2120 * the bbio may be freed once we submit the last bio. Make sure
2121 * not to touch it after that
2123 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2125 bio
= bio_list_pop(&bio_list
);
2129 bio
->bi_private
= rbio
;
2130 bio
->bi_end_io
= raid_recover_end_io
;
2131 bio
->bi_opf
= REQ_OP_READ
;
2133 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2141 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
2142 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
2143 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2145 while ((bio
= bio_list_pop(&bio_list
)))
2152 * the main entry point for reads from the higher layers. This
2153 * is really only called when the normal read path had a failure,
2154 * so we assume the bio they send down corresponds to a failed part
2157 int raid56_parity_recover(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2158 struct btrfs_bio
*bbio
, u64 stripe_len
,
2159 int mirror_num
, int generic_io
)
2161 struct btrfs_raid_bio
*rbio
;
2165 ASSERT(bbio
->mirror_num
== mirror_num
);
2166 btrfs_io_bio(bio
)->mirror_num
= mirror_num
;
2169 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
2172 btrfs_put_bbio(bbio
);
2173 return PTR_ERR(rbio
);
2176 rbio
->operation
= BTRFS_RBIO_READ_REBUILD
;
2177 bio_list_add(&rbio
->bio_list
, bio
);
2178 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
2180 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2181 if (rbio
->faila
== -1) {
2183 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2184 __func__
, (u64
)bio
->bi_iter
.bi_sector
<< 9,
2185 (u64
)bio
->bi_iter
.bi_size
, bbio
->map_type
);
2187 btrfs_put_bbio(bbio
);
2193 btrfs_bio_counter_inc_noblocked(fs_info
);
2194 rbio
->generic_bio_cnt
= 1;
2196 btrfs_get_bbio(bbio
);
2201 * for 'mirror == 2', reconstruct from all other stripes.
2202 * for 'mirror_num > 2', select a stripe to fail on every retry.
2204 if (mirror_num
> 2) {
2206 * 'mirror == 3' is to fail the p stripe and
2207 * reconstruct from the q stripe. 'mirror > 3' is to
2208 * fail a data stripe and reconstruct from p+q stripe.
2210 rbio
->failb
= rbio
->real_stripes
- (mirror_num
- 1);
2211 ASSERT(rbio
->failb
> 0);
2212 if (rbio
->failb
<= rbio
->faila
)
2216 ret
= lock_stripe_add(rbio
);
2219 * __raid56_parity_recover will end the bio with
2220 * any errors it hits. We don't want to return
2221 * its error value up the stack because our caller
2222 * will end up calling bio_endio with any nonzero
2226 __raid56_parity_recover(rbio
);
2228 * our rbio has been added to the list of
2229 * rbios that will be handled after the
2230 * currently lock owner is done
2236 static void rmw_work(struct btrfs_work
*work
)
2238 struct btrfs_raid_bio
*rbio
;
2240 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2241 raid56_rmw_stripe(rbio
);
2244 static void read_rebuild_work(struct btrfs_work
*work
)
2246 struct btrfs_raid_bio
*rbio
;
2248 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2249 __raid56_parity_recover(rbio
);
2253 * The following code is used to scrub/replace the parity stripe
2255 * Caller must have already increased bio_counter for getting @bbio.
2257 * Note: We need make sure all the pages that add into the scrub/replace
2258 * raid bio are correct and not be changed during the scrub/replace. That
2259 * is those pages just hold metadata or file data with checksum.
2262 struct btrfs_raid_bio
*
2263 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2264 struct btrfs_bio
*bbio
, u64 stripe_len
,
2265 struct btrfs_device
*scrub_dev
,
2266 unsigned long *dbitmap
, int stripe_nsectors
)
2268 struct btrfs_raid_bio
*rbio
;
2271 rbio
= alloc_rbio(fs_info
, bbio
, stripe_len
);
2274 bio_list_add(&rbio
->bio_list
, bio
);
2276 * This is a special bio which is used to hold the completion handler
2277 * and make the scrub rbio is similar to the other types
2279 ASSERT(!bio
->bi_iter
.bi_size
);
2280 rbio
->operation
= BTRFS_RBIO_PARITY_SCRUB
;
2283 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2284 * to the end position, so this search can start from the first parity
2287 for (i
= rbio
->nr_data
; i
< rbio
->real_stripes
; i
++) {
2288 if (bbio
->stripes
[i
].dev
== scrub_dev
) {
2293 ASSERT(i
< rbio
->real_stripes
);
2295 /* Now we just support the sectorsize equals to page size */
2296 ASSERT(fs_info
->sectorsize
== PAGE_SIZE
);
2297 ASSERT(rbio
->stripe_npages
== stripe_nsectors
);
2298 bitmap_copy(rbio
->dbitmap
, dbitmap
, stripe_nsectors
);
2301 * We have already increased bio_counter when getting bbio, record it
2302 * so we can free it at rbio_orig_end_io().
2304 rbio
->generic_bio_cnt
= 1;
2309 /* Used for both parity scrub and missing. */
2310 void raid56_add_scrub_pages(struct btrfs_raid_bio
*rbio
, struct page
*page
,
2316 ASSERT(logical
>= rbio
->bbio
->raid_map
[0]);
2317 ASSERT(logical
+ PAGE_SIZE
<= rbio
->bbio
->raid_map
[0] +
2318 rbio
->stripe_len
* rbio
->nr_data
);
2319 stripe_offset
= (int)(logical
- rbio
->bbio
->raid_map
[0]);
2320 index
= stripe_offset
>> PAGE_SHIFT
;
2321 rbio
->bio_pages
[index
] = page
;
2325 * We just scrub the parity that we have correct data on the same horizontal,
2326 * so we needn't allocate all pages for all the stripes.
2328 static int alloc_rbio_essential_pages(struct btrfs_raid_bio
*rbio
)
2335 for_each_set_bit(bit
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2336 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2337 index
= i
* rbio
->stripe_npages
+ bit
;
2338 if (rbio
->stripe_pages
[index
])
2341 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2344 rbio
->stripe_pages
[index
] = page
;
2350 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
2353 struct btrfs_bio
*bbio
= rbio
->bbio
;
2354 void **pointers
= rbio
->finish_pointers
;
2355 unsigned long *pbitmap
= rbio
->finish_pbitmap
;
2356 int nr_data
= rbio
->nr_data
;
2361 struct page
*p_page
= NULL
;
2362 struct page
*q_page
= NULL
;
2363 struct bio_list bio_list
;
2368 bio_list_init(&bio_list
);
2370 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
2371 p_stripe
= rbio
->real_stripes
- 1;
2372 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
2373 p_stripe
= rbio
->real_stripes
- 2;
2374 q_stripe
= rbio
->real_stripes
- 1;
2379 if (bbio
->num_tgtdevs
&& bbio
->tgtdev_map
[rbio
->scrubp
]) {
2381 bitmap_copy(pbitmap
, rbio
->dbitmap
, rbio
->stripe_npages
);
2385 * Because the higher layers(scrubber) are unlikely to
2386 * use this area of the disk again soon, so don't cache
2389 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2394 p_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2397 SetPageUptodate(p_page
);
2399 if (q_stripe
!= -1) {
2400 q_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2402 __free_page(p_page
);
2405 SetPageUptodate(q_page
);
2408 atomic_set(&rbio
->error
, 0);
2410 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2413 /* first collect one page from each data stripe */
2414 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
2415 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
2416 pointers
[stripe
] = kmap(p
);
2419 /* then add the parity stripe */
2420 pointers
[stripe
++] = kmap(p_page
);
2422 if (q_stripe
!= -1) {
2425 * raid6, add the qstripe and call the
2426 * library function to fill in our p/q
2428 pointers
[stripe
++] = kmap(q_page
);
2430 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
2434 copy_page(pointers
[nr_data
], pointers
[0]);
2435 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
2438 /* Check scrubbing parity and repair it */
2439 p
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2441 if (memcmp(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
))
2442 copy_page(parity
, pointers
[rbio
->scrubp
]);
2444 /* Parity is right, needn't writeback */
2445 bitmap_clear(rbio
->dbitmap
, pagenr
, 1);
2448 for (stripe
= 0; stripe
< nr_data
; stripe
++)
2449 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
2453 __free_page(p_page
);
2455 __free_page(q_page
);
2459 * time to start writing. Make bios for everything from the
2460 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2463 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2466 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2467 ret
= rbio_add_io_page(rbio
, &bio_list
,
2468 page
, rbio
->scrubp
, pagenr
, rbio
->stripe_len
);
2476 for_each_set_bit(pagenr
, pbitmap
, rbio
->stripe_npages
) {
2479 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2480 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2481 bbio
->tgtdev_map
[rbio
->scrubp
],
2482 pagenr
, rbio
->stripe_len
);
2488 nr_data
= bio_list_size(&bio_list
);
2490 /* Every parity is right */
2491 rbio_orig_end_io(rbio
, BLK_STS_OK
);
2495 atomic_set(&rbio
->stripes_pending
, nr_data
);
2498 bio
= bio_list_pop(&bio_list
);
2502 bio
->bi_private
= rbio
;
2503 bio
->bi_end_io
= raid_write_end_io
;
2504 bio
->bi_opf
= REQ_OP_WRITE
;
2511 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2513 while ((bio
= bio_list_pop(&bio_list
)))
2517 static inline int is_data_stripe(struct btrfs_raid_bio
*rbio
, int stripe
)
2519 if (stripe
>= 0 && stripe
< rbio
->nr_data
)
2525 * While we're doing the parity check and repair, we could have errors
2526 * in reading pages off the disk. This checks for errors and if we're
2527 * not able to read the page it'll trigger parity reconstruction. The
2528 * parity scrub will be finished after we've reconstructed the failed
2531 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio
*rbio
)
2533 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2536 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
2537 int dfail
= 0, failp
= -1;
2539 if (is_data_stripe(rbio
, rbio
->faila
))
2541 else if (is_parity_stripe(rbio
->faila
))
2542 failp
= rbio
->faila
;
2544 if (is_data_stripe(rbio
, rbio
->failb
))
2546 else if (is_parity_stripe(rbio
->failb
))
2547 failp
= rbio
->failb
;
2550 * Because we can not use a scrubbing parity to repair
2551 * the data, so the capability of the repair is declined.
2552 * (In the case of RAID5, we can not repair anything)
2554 if (dfail
> rbio
->bbio
->max_errors
- 1)
2558 * If all data is good, only parity is correctly, just
2559 * repair the parity.
2562 finish_parity_scrub(rbio
, 0);
2567 * Here means we got one corrupted data stripe and one
2568 * corrupted parity on RAID6, if the corrupted parity
2569 * is scrubbing parity, luckily, use the other one to repair
2570 * the data, or we can not repair the data stripe.
2572 if (failp
!= rbio
->scrubp
)
2575 __raid_recover_end_io(rbio
);
2577 finish_parity_scrub(rbio
, 1);
2582 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2586 * end io for the read phase of the rmw cycle. All the bios here are physical
2587 * stripe bios we've read from the disk so we can recalculate the parity of the
2590 * This will usually kick off finish_rmw once all the bios are read in, but it
2591 * may trigger parity reconstruction if we had any errors along the way
2593 static void raid56_parity_scrub_end_io(struct bio
*bio
)
2595 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2598 fail_bio_stripe(rbio
, bio
);
2600 set_bio_pages_uptodate(bio
);
2604 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2608 * this will normally call finish_rmw to start our write
2609 * but if there are any failed stripes we'll reconstruct
2612 validate_rbio_for_parity_scrub(rbio
);
2615 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio
*rbio
)
2617 int bios_to_read
= 0;
2618 struct bio_list bio_list
;
2624 bio_list_init(&bio_list
);
2626 ret
= alloc_rbio_essential_pages(rbio
);
2630 atomic_set(&rbio
->error
, 0);
2632 * build a list of bios to read all the missing parts of this
2635 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2636 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2639 * we want to find all the pages missing from
2640 * the rbio and read them from the disk. If
2641 * page_in_rbio finds a page in the bio list
2642 * we don't need to read it off the stripe.
2644 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
2648 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2650 * the bio cache may have handed us an uptodate
2651 * page. If so, be happy and use it
2653 if (PageUptodate(page
))
2656 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2657 stripe
, pagenr
, rbio
->stripe_len
);
2663 bios_to_read
= bio_list_size(&bio_list
);
2664 if (!bios_to_read
) {
2666 * this can happen if others have merged with
2667 * us, it means there is nothing left to read.
2668 * But if there are missing devices it may not be
2669 * safe to do the full stripe write yet.
2675 * the bbio may be freed once we submit the last bio. Make sure
2676 * not to touch it after that
2678 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2680 bio
= bio_list_pop(&bio_list
);
2684 bio
->bi_private
= rbio
;
2685 bio
->bi_end_io
= raid56_parity_scrub_end_io
;
2686 bio
->bi_opf
= REQ_OP_READ
;
2688 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2692 /* the actual write will happen once the reads are done */
2696 rbio_orig_end_io(rbio
, BLK_STS_IOERR
);
2698 while ((bio
= bio_list_pop(&bio_list
)))
2704 validate_rbio_for_parity_scrub(rbio
);
2707 static void scrub_parity_work(struct btrfs_work
*work
)
2709 struct btrfs_raid_bio
*rbio
;
2711 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2712 raid56_parity_scrub_stripe(rbio
);
2715 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio
*rbio
)
2717 if (!lock_stripe_add(rbio
))
2718 start_async_work(rbio
, scrub_parity_work
);
2721 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2723 struct btrfs_raid_bio
*
2724 raid56_alloc_missing_rbio(struct btrfs_fs_info
*fs_info
, struct bio
*bio
,
2725 struct btrfs_bio
*bbio
, u64 length
)
2727 struct btrfs_raid_bio
*rbio
;
2729 rbio
= alloc_rbio(fs_info
, bbio
, length
);
2733 rbio
->operation
= BTRFS_RBIO_REBUILD_MISSING
;
2734 bio_list_add(&rbio
->bio_list
, bio
);
2736 * This is a special bio which is used to hold the completion handler
2737 * and make the scrub rbio is similar to the other types
2739 ASSERT(!bio
->bi_iter
.bi_size
);
2741 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2742 if (rbio
->faila
== -1) {
2749 * When we get bbio, we have already increased bio_counter, record it
2750 * so we can free it at rbio_orig_end_io()
2752 rbio
->generic_bio_cnt
= 1;
2757 void raid56_submit_missing_rbio(struct btrfs_raid_bio
*rbio
)
2759 if (!lock_stripe_add(rbio
))
2760 start_async_work(rbio
, read_rebuild_work
);