2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
19 #include <linux/sched.h>
20 #include <linux/wait.h>
21 #include <linux/bio.h>
22 #include <linux/slab.h>
23 #include <linux/buffer_head.h>
24 #include <linux/blkdev.h>
25 #include <linux/random.h>
26 #include <linux/iocontext.h>
27 #include <linux/capability.h>
28 #include <linux/ratelimit.h>
29 #include <linux/kthread.h>
30 #include <linux/raid/pq.h>
31 #include <linux/hash.h>
32 #include <linux/list_sort.h>
33 #include <linux/raid/xor.h>
34 #include <linux/vmalloc.h>
35 #include <asm/div64.h>
37 #include "extent_map.h"
39 #include "transaction.h"
40 #include "print-tree.h"
43 #include "async-thread.h"
44 #include "check-integrity.h"
45 #include "rcu-string.h"
47 /* set when additional merges to this rbio are not allowed */
48 #define RBIO_RMW_LOCKED_BIT 1
51 * set when this rbio is sitting in the hash, but it is just a cache
54 #define RBIO_CACHE_BIT 2
57 * set when it is safe to trust the stripe_pages for caching
59 #define RBIO_CACHE_READY_BIT 3
61 #define RBIO_CACHE_SIZE 1024
65 BTRFS_RBIO_READ_REBUILD
,
66 BTRFS_RBIO_PARITY_SCRUB
,
67 BTRFS_RBIO_REBUILD_MISSING
,
70 struct btrfs_raid_bio
{
71 struct btrfs_fs_info
*fs_info
;
72 struct btrfs_bio
*bbio
;
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
79 struct list_head hash_list
;
82 * LRU list for the stripe cache
84 struct list_head stripe_cache
;
87 * for scheduling work in the helper threads
89 struct btrfs_work work
;
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
96 struct bio_list bio_list
;
97 spinlock_t bio_list_lock
;
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
103 * the stripe lock to the next pending IO
105 struct list_head plug_list
;
108 * flags that tell us if it is safe to
109 * merge with this bio
113 /* size of each individual stripe on disk */
116 /* number of data stripes (no p/q) */
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
128 enum btrfs_rbio_ops operation
;
130 /* first bad stripe */
133 /* second bad stripe (for raid6 use) */
138 * number of pages needed to represent the full
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
154 atomic_t stripes_pending
;
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
166 struct page
**stripe_pages
;
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
172 struct page
**bio_pages
;
175 * bitmap to record which horizontal stripe has data
177 unsigned long *dbitmap
;
180 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
);
181 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
);
182 static void rmw_work(struct btrfs_work
*work
);
183 static void read_rebuild_work(struct btrfs_work
*work
);
184 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
);
185 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
);
186 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
, struct bio
*bio
);
187 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
);
188 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
);
189 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
);
190 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
);
192 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
194 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
);
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
200 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info
*info
)
202 struct btrfs_stripe_hash_table
*table
;
203 struct btrfs_stripe_hash_table
*x
;
204 struct btrfs_stripe_hash
*cur
;
205 struct btrfs_stripe_hash
*h
;
206 int num_entries
= 1 << BTRFS_STRIPE_HASH_TABLE_BITS
;
210 if (info
->stripe_hash_table
)
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
220 table_size
= sizeof(*table
) + sizeof(*h
) * num_entries
;
221 table
= kzalloc(table_size
, GFP_KERNEL
| __GFP_NOWARN
| __GFP_REPEAT
);
223 table
= vzalloc(table_size
);
228 spin_lock_init(&table
->cache_lock
);
229 INIT_LIST_HEAD(&table
->stripe_cache
);
233 for (i
= 0; i
< num_entries
; i
++) {
235 INIT_LIST_HEAD(&cur
->hash_list
);
236 spin_lock_init(&cur
->lock
);
237 init_waitqueue_head(&cur
->wait
);
240 x
= cmpxchg(&info
->stripe_hash_table
, NULL
, table
);
247 * caching an rbio means to copy anything from the
248 * bio_pages array into the stripe_pages array. We
249 * use the page uptodate bit in the stripe cache array
250 * to indicate if it has valid data
252 * once the caching is done, we set the cache ready
255 static void cache_rbio_pages(struct btrfs_raid_bio
*rbio
)
262 ret
= alloc_rbio_pages(rbio
);
266 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
267 if (!rbio
->bio_pages
[i
])
270 s
= kmap(rbio
->bio_pages
[i
]);
271 d
= kmap(rbio
->stripe_pages
[i
]);
273 memcpy(d
, s
, PAGE_SIZE
);
275 kunmap(rbio
->bio_pages
[i
]);
276 kunmap(rbio
->stripe_pages
[i
]);
277 SetPageUptodate(rbio
->stripe_pages
[i
]);
279 set_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
283 * we hash on the first logical address of the stripe
285 static int rbio_bucket(struct btrfs_raid_bio
*rbio
)
287 u64 num
= rbio
->bbio
->raid_map
[0];
290 * we shift down quite a bit. We're using byte
291 * addressing, and most of the lower bits are zeros.
292 * This tends to upset hash_64, and it consistently
293 * returns just one or two different values.
295 * shifting off the lower bits fixes things.
297 return hash_64(num
>> 16, BTRFS_STRIPE_HASH_TABLE_BITS
);
301 * stealing an rbio means taking all the uptodate pages from the stripe
302 * array in the source rbio and putting them into the destination rbio
304 static void steal_rbio(struct btrfs_raid_bio
*src
, struct btrfs_raid_bio
*dest
)
310 if (!test_bit(RBIO_CACHE_READY_BIT
, &src
->flags
))
313 for (i
= 0; i
< dest
->nr_pages
; i
++) {
314 s
= src
->stripe_pages
[i
];
315 if (!s
|| !PageUptodate(s
)) {
319 d
= dest
->stripe_pages
[i
];
323 dest
->stripe_pages
[i
] = s
;
324 src
->stripe_pages
[i
] = NULL
;
329 * merging means we take the bio_list from the victim and
330 * splice it into the destination. The victim should
331 * be discarded afterwards.
333 * must be called with dest->rbio_list_lock held
335 static void merge_rbio(struct btrfs_raid_bio
*dest
,
336 struct btrfs_raid_bio
*victim
)
338 bio_list_merge(&dest
->bio_list
, &victim
->bio_list
);
339 dest
->bio_list_bytes
+= victim
->bio_list_bytes
;
340 dest
->generic_bio_cnt
+= victim
->generic_bio_cnt
;
341 bio_list_init(&victim
->bio_list
);
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
348 static void __remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
350 int bucket
= rbio_bucket(rbio
);
351 struct btrfs_stripe_hash_table
*table
;
352 struct btrfs_stripe_hash
*h
;
356 * check the bit again under the hash table lock.
358 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
361 table
= rbio
->fs_info
->stripe_hash_table
;
362 h
= table
->table
+ bucket
;
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
373 spin_lock(&rbio
->bio_list_lock
);
375 if (test_and_clear_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
376 list_del_init(&rbio
->stripe_cache
);
377 table
->cache_size
-= 1;
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
389 if (bio_list_empty(&rbio
->bio_list
)) {
390 if (!list_empty(&rbio
->hash_list
)) {
391 list_del_init(&rbio
->hash_list
);
392 atomic_dec(&rbio
->refs
);
393 BUG_ON(!list_empty(&rbio
->plug_list
));
398 spin_unlock(&rbio
->bio_list_lock
);
399 spin_unlock(&h
->lock
);
402 __free_raid_bio(rbio
);
406 * prune a given rbio from the cache
408 static void remove_rbio_from_cache(struct btrfs_raid_bio
*rbio
)
410 struct btrfs_stripe_hash_table
*table
;
413 if (!test_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
416 table
= rbio
->fs_info
->stripe_hash_table
;
418 spin_lock_irqsave(&table
->cache_lock
, flags
);
419 __remove_rbio_from_cache(rbio
);
420 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
424 * remove everything in the cache
426 static void btrfs_clear_rbio_cache(struct btrfs_fs_info
*info
)
428 struct btrfs_stripe_hash_table
*table
;
430 struct btrfs_raid_bio
*rbio
;
432 table
= info
->stripe_hash_table
;
434 spin_lock_irqsave(&table
->cache_lock
, flags
);
435 while (!list_empty(&table
->stripe_cache
)) {
436 rbio
= list_entry(table
->stripe_cache
.next
,
437 struct btrfs_raid_bio
,
439 __remove_rbio_from_cache(rbio
);
441 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
445 * remove all cached entries and free the hash table
448 void btrfs_free_stripe_hash_table(struct btrfs_fs_info
*info
)
450 if (!info
->stripe_hash_table
)
452 btrfs_clear_rbio_cache(info
);
453 kvfree(info
->stripe_hash_table
);
454 info
->stripe_hash_table
= NULL
;
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
465 * If the size of the rbio cache is too big, we
468 static void cache_rbio(struct btrfs_raid_bio
*rbio
)
470 struct btrfs_stripe_hash_table
*table
;
473 if (!test_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
))
476 table
= rbio
->fs_info
->stripe_hash_table
;
478 spin_lock_irqsave(&table
->cache_lock
, flags
);
479 spin_lock(&rbio
->bio_list_lock
);
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT
, &rbio
->flags
))
483 atomic_inc(&rbio
->refs
);
485 if (!list_empty(&rbio
->stripe_cache
)){
486 list_move(&rbio
->stripe_cache
, &table
->stripe_cache
);
488 list_add(&rbio
->stripe_cache
, &table
->stripe_cache
);
489 table
->cache_size
+= 1;
492 spin_unlock(&rbio
->bio_list_lock
);
494 if (table
->cache_size
> RBIO_CACHE_SIZE
) {
495 struct btrfs_raid_bio
*found
;
497 found
= list_entry(table
->stripe_cache
.prev
,
498 struct btrfs_raid_bio
,
502 __remove_rbio_from_cache(found
);
505 spin_unlock_irqrestore(&table
->cache_lock
, flags
);
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
513 static void run_xor(void **pages
, int src_cnt
, ssize_t len
)
517 void *dest
= pages
[src_cnt
];
520 xor_src_cnt
= min(src_cnt
, MAX_XOR_BLOCKS
);
521 xor_blocks(xor_src_cnt
, len
, dest
, pages
+ src_off
);
523 src_cnt
-= xor_src_cnt
;
524 src_off
+= xor_src_cnt
;
529 * returns true if the bio list inside this rbio
530 * covers an entire stripe (no rmw required).
531 * Must be called with the bio list lock held, or
532 * at a time when you know it is impossible to add
533 * new bios into the list
535 static int __rbio_is_full(struct btrfs_raid_bio
*rbio
)
537 unsigned long size
= rbio
->bio_list_bytes
;
540 if (size
!= rbio
->nr_data
* rbio
->stripe_len
)
543 BUG_ON(size
> rbio
->nr_data
* rbio
->stripe_len
);
547 static int rbio_is_full(struct btrfs_raid_bio
*rbio
)
552 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
553 ret
= __rbio_is_full(rbio
);
554 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
559 * returns 1 if it is safe to merge two rbios together.
560 * The merging is safe if the two rbios correspond to
561 * the same stripe and if they are both going in the same
562 * direction (read vs write), and if neither one is
563 * locked for final IO
565 * The caller is responsible for locking such that
566 * rmw_locked is safe to test
568 static int rbio_can_merge(struct btrfs_raid_bio
*last
,
569 struct btrfs_raid_bio
*cur
)
571 if (test_bit(RBIO_RMW_LOCKED_BIT
, &last
->flags
) ||
572 test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
))
576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can
579 * steal from cached rbios though, other functions
582 if (test_bit(RBIO_CACHE_BIT
, &last
->flags
) ||
583 test_bit(RBIO_CACHE_BIT
, &cur
->flags
))
586 if (last
->bbio
->raid_map
[0] !=
587 cur
->bbio
->raid_map
[0])
590 /* we can't merge with different operations */
591 if (last
->operation
!= cur
->operation
)
594 * We've need read the full stripe from the drive.
595 * check and repair the parity and write the new results.
597 * We're not allowed to add any new bios to the
598 * bio list here, anyone else that wants to
599 * change this stripe needs to do their own rmw.
601 if (last
->operation
== BTRFS_RBIO_PARITY_SCRUB
||
602 cur
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
605 if (last
->operation
== BTRFS_RBIO_REBUILD_MISSING
||
606 cur
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
612 static int rbio_stripe_page_index(struct btrfs_raid_bio
*rbio
, int stripe
,
615 return stripe
* rbio
->stripe_npages
+ index
;
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
622 static struct page
*rbio_stripe_page(struct btrfs_raid_bio
*rbio
, int stripe
,
625 return rbio
->stripe_pages
[rbio_stripe_page_index(rbio
, stripe
, index
)];
629 * helper to index into the pstripe
631 static struct page
*rbio_pstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
633 return rbio_stripe_page(rbio
, rbio
->nr_data
, index
);
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
640 static struct page
*rbio_qstripe_page(struct btrfs_raid_bio
*rbio
, int index
)
642 if (rbio
->nr_data
+ 1 == rbio
->real_stripes
)
644 return rbio_stripe_page(rbio
, rbio
->nr_data
+ 1, index
);
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
669 static noinline
int lock_stripe_add(struct btrfs_raid_bio
*rbio
)
671 int bucket
= rbio_bucket(rbio
);
672 struct btrfs_stripe_hash
*h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
673 struct btrfs_raid_bio
*cur
;
674 struct btrfs_raid_bio
*pending
;
677 struct btrfs_raid_bio
*freeit
= NULL
;
678 struct btrfs_raid_bio
*cache_drop
= NULL
;
682 spin_lock_irqsave(&h
->lock
, flags
);
683 list_for_each_entry(cur
, &h
->hash_list
, hash_list
) {
685 if (cur
->bbio
->raid_map
[0] == rbio
->bbio
->raid_map
[0]) {
686 spin_lock(&cur
->bio_list_lock
);
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur
->bio_list
) &&
690 list_empty(&cur
->plug_list
) &&
691 test_bit(RBIO_CACHE_BIT
, &cur
->flags
) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT
, &cur
->flags
)) {
693 list_del_init(&cur
->hash_list
);
694 atomic_dec(&cur
->refs
);
696 steal_rbio(cur
, rbio
);
698 spin_unlock(&cur
->bio_list_lock
);
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur
, rbio
)) {
705 merge_rbio(cur
, rbio
);
706 spin_unlock(&cur
->bio_list_lock
);
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
721 list_for_each_entry(pending
, &cur
->plug_list
,
723 if (rbio_can_merge(pending
, rbio
)) {
724 merge_rbio(pending
, rbio
);
725 spin_unlock(&cur
->bio_list_lock
);
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
736 list_add_tail(&rbio
->plug_list
, &cur
->plug_list
);
737 spin_unlock(&cur
->bio_list_lock
);
743 atomic_inc(&rbio
->refs
);
744 list_add(&rbio
->hash_list
, &h
->hash_list
);
746 spin_unlock_irqrestore(&h
->lock
, flags
);
748 remove_rbio_from_cache(cache_drop
);
750 __free_raid_bio(freeit
);
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
758 static noinline
void unlock_stripe(struct btrfs_raid_bio
*rbio
)
761 struct btrfs_stripe_hash
*h
;
765 bucket
= rbio_bucket(rbio
);
766 h
= rbio
->fs_info
->stripe_hash_table
->table
+ bucket
;
768 if (list_empty(&rbio
->plug_list
))
771 spin_lock_irqsave(&h
->lock
, flags
);
772 spin_lock(&rbio
->bio_list_lock
);
774 if (!list_empty(&rbio
->hash_list
)) {
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
780 if (list_empty(&rbio
->plug_list
) &&
781 test_bit(RBIO_CACHE_BIT
, &rbio
->flags
)) {
783 clear_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
784 BUG_ON(!bio_list_empty(&rbio
->bio_list
));
788 list_del_init(&rbio
->hash_list
);
789 atomic_dec(&rbio
->refs
);
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
796 if (!list_empty(&rbio
->plug_list
)) {
797 struct btrfs_raid_bio
*next
;
798 struct list_head
*head
= rbio
->plug_list
.next
;
800 next
= list_entry(head
, struct btrfs_raid_bio
,
803 list_del_init(&rbio
->plug_list
);
805 list_add(&next
->hash_list
, &h
->hash_list
);
806 atomic_inc(&next
->refs
);
807 spin_unlock(&rbio
->bio_list_lock
);
808 spin_unlock_irqrestore(&h
->lock
, flags
);
810 if (next
->operation
== BTRFS_RBIO_READ_REBUILD
)
811 async_read_rebuild(next
);
812 else if (next
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
813 steal_rbio(rbio
, next
);
814 async_read_rebuild(next
);
815 } else if (next
->operation
== BTRFS_RBIO_WRITE
) {
816 steal_rbio(rbio
, next
);
817 async_rmw_stripe(next
);
818 } else if (next
->operation
== BTRFS_RBIO_PARITY_SCRUB
) {
819 steal_rbio(rbio
, next
);
820 async_scrub_parity(next
);
825 * The barrier for this waitqueue_active is not needed,
826 * we're protected by h->lock and can't miss a wakeup.
828 } else if (waitqueue_active(&h
->wait
)) {
829 spin_unlock(&rbio
->bio_list_lock
);
830 spin_unlock_irqrestore(&h
->lock
, flags
);
836 spin_unlock(&rbio
->bio_list_lock
);
837 spin_unlock_irqrestore(&h
->lock
, flags
);
841 remove_rbio_from_cache(rbio
);
844 static void __free_raid_bio(struct btrfs_raid_bio
*rbio
)
848 WARN_ON(atomic_read(&rbio
->refs
) < 0);
849 if (!atomic_dec_and_test(&rbio
->refs
))
852 WARN_ON(!list_empty(&rbio
->stripe_cache
));
853 WARN_ON(!list_empty(&rbio
->hash_list
));
854 WARN_ON(!bio_list_empty(&rbio
->bio_list
));
856 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
857 if (rbio
->stripe_pages
[i
]) {
858 __free_page(rbio
->stripe_pages
[i
]);
859 rbio
->stripe_pages
[i
] = NULL
;
863 btrfs_put_bbio(rbio
->bbio
);
867 static void free_raid_bio(struct btrfs_raid_bio
*rbio
)
870 __free_raid_bio(rbio
);
874 * this frees the rbio and runs through all the bios in the
875 * bio_list and calls end_io on them
877 static void rbio_orig_end_io(struct btrfs_raid_bio
*rbio
, int err
)
879 struct bio
*cur
= bio_list_get(&rbio
->bio_list
);
882 if (rbio
->generic_bio_cnt
)
883 btrfs_bio_counter_sub(rbio
->fs_info
, rbio
->generic_bio_cnt
);
897 * end io function used by finish_rmw. When we finally
898 * get here, we've written a full stripe
900 static void raid_write_end_io(struct bio
*bio
)
902 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
903 int err
= bio
->bi_error
;
907 fail_bio_stripe(rbio
, bio
);
911 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
916 /* OK, we have read all the stripes we need to. */
917 max_errors
= (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
) ?
918 0 : rbio
->bbio
->max_errors
;
919 if (atomic_read(&rbio
->error
) > max_errors
)
922 rbio_orig_end_io(rbio
, err
);
926 * the read/modify/write code wants to use the original bio for
927 * any pages it included, and then use the rbio for everything
928 * else. This function decides if a given index (stripe number)
929 * and page number in that stripe fall inside the original bio
932 * if you set bio_list_only, you'll get a NULL back for any ranges
933 * that are outside the bio_list
935 * This doesn't take any refs on anything, you get a bare page pointer
936 * and the caller must bump refs as required.
938 * You must call index_rbio_pages once before you can trust
939 * the answers from this function.
941 static struct page
*page_in_rbio(struct btrfs_raid_bio
*rbio
,
942 int index
, int pagenr
, int bio_list_only
)
945 struct page
*p
= NULL
;
947 chunk_page
= index
* (rbio
->stripe_len
>> PAGE_SHIFT
) + pagenr
;
949 spin_lock_irq(&rbio
->bio_list_lock
);
950 p
= rbio
->bio_pages
[chunk_page
];
951 spin_unlock_irq(&rbio
->bio_list_lock
);
953 if (p
|| bio_list_only
)
956 return rbio
->stripe_pages
[chunk_page
];
960 * number of pages we need for the entire stripe across all the
963 static unsigned long rbio_nr_pages(unsigned long stripe_len
, int nr_stripes
)
965 return DIV_ROUND_UP(stripe_len
, PAGE_SIZE
) * nr_stripes
;
969 * allocation and initial setup for the btrfs_raid_bio. Not
970 * this does not allocate any pages for rbio->pages.
972 static struct btrfs_raid_bio
*alloc_rbio(struct btrfs_root
*root
,
973 struct btrfs_bio
*bbio
, u64 stripe_len
)
975 struct btrfs_raid_bio
*rbio
;
977 int real_stripes
= bbio
->num_stripes
- bbio
->num_tgtdevs
;
978 int num_pages
= rbio_nr_pages(stripe_len
, real_stripes
);
979 int stripe_npages
= DIV_ROUND_UP(stripe_len
, PAGE_SIZE
);
982 rbio
= kzalloc(sizeof(*rbio
) + num_pages
* sizeof(struct page
*) * 2 +
983 DIV_ROUND_UP(stripe_npages
, BITS_PER_LONG
) *
984 sizeof(long), GFP_NOFS
);
986 return ERR_PTR(-ENOMEM
);
988 bio_list_init(&rbio
->bio_list
);
989 INIT_LIST_HEAD(&rbio
->plug_list
);
990 spin_lock_init(&rbio
->bio_list_lock
);
991 INIT_LIST_HEAD(&rbio
->stripe_cache
);
992 INIT_LIST_HEAD(&rbio
->hash_list
);
994 rbio
->fs_info
= root
->fs_info
;
995 rbio
->stripe_len
= stripe_len
;
996 rbio
->nr_pages
= num_pages
;
997 rbio
->real_stripes
= real_stripes
;
998 rbio
->stripe_npages
= stripe_npages
;
1001 atomic_set(&rbio
->refs
, 1);
1002 atomic_set(&rbio
->error
, 0);
1003 atomic_set(&rbio
->stripes_pending
, 0);
1006 * the stripe_pages and bio_pages array point to the extra
1007 * memory we allocated past the end of the rbio
1010 rbio
->stripe_pages
= p
;
1011 rbio
->bio_pages
= p
+ sizeof(struct page
*) * num_pages
;
1012 rbio
->dbitmap
= p
+ sizeof(struct page
*) * num_pages
* 2;
1014 if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID5
)
1015 nr_data
= real_stripes
- 1;
1016 else if (bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
)
1017 nr_data
= real_stripes
- 2;
1021 rbio
->nr_data
= nr_data
;
1025 /* allocate pages for all the stripes in the bio, including parity */
1026 static int alloc_rbio_pages(struct btrfs_raid_bio
*rbio
)
1031 for (i
= 0; i
< rbio
->nr_pages
; i
++) {
1032 if (rbio
->stripe_pages
[i
])
1034 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1037 rbio
->stripe_pages
[i
] = page
;
1042 /* only allocate pages for p/q stripes */
1043 static int alloc_rbio_parity_pages(struct btrfs_raid_bio
*rbio
)
1048 i
= rbio_stripe_page_index(rbio
, rbio
->nr_data
, 0);
1050 for (; i
< rbio
->nr_pages
; i
++) {
1051 if (rbio
->stripe_pages
[i
])
1053 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
1056 rbio
->stripe_pages
[i
] = page
;
1062 * add a single page from a specific stripe into our list of bios for IO
1063 * this will try to merge into existing bios if possible, and returns
1064 * zero if all went well.
1066 static int rbio_add_io_page(struct btrfs_raid_bio
*rbio
,
1067 struct bio_list
*bio_list
,
1070 unsigned long page_index
,
1071 unsigned long bio_max_len
)
1073 struct bio
*last
= bio_list
->tail
;
1077 struct btrfs_bio_stripe
*stripe
;
1080 stripe
= &rbio
->bbio
->stripes
[stripe_nr
];
1081 disk_start
= stripe
->physical
+ (page_index
<< PAGE_SHIFT
);
1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe
->dev
->bdev
)
1085 return fail_rbio_index(rbio
, stripe_nr
);
1087 /* see if we can add this page onto our existing bio */
1089 last_end
= (u64
)last
->bi_iter
.bi_sector
<< 9;
1090 last_end
+= last
->bi_iter
.bi_size
;
1093 * we can't merge these if they are from different
1094 * devices or if they are not contiguous
1096 if (last_end
== disk_start
&& stripe
->dev
->bdev
&&
1098 last
->bi_bdev
== stripe
->dev
->bdev
) {
1099 ret
= bio_add_page(last
, page
, PAGE_SIZE
, 0);
1100 if (ret
== PAGE_SIZE
)
1105 /* put a new bio on the list */
1106 bio
= btrfs_io_bio_alloc(GFP_NOFS
, bio_max_len
>> PAGE_SHIFT
?:1);
1110 bio
->bi_iter
.bi_size
= 0;
1111 bio
->bi_bdev
= stripe
->dev
->bdev
;
1112 bio
->bi_iter
.bi_sector
= disk_start
>> 9;
1114 bio_add_page(bio
, page
, PAGE_SIZE
, 0);
1115 bio_list_add(bio_list
, bio
);
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk. This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction. The rmw will be finished
1124 * after we've reconstructed the failed stripes
1126 static void validate_rbio_for_rmw(struct btrfs_raid_bio
*rbio
)
1128 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
1129 BUG_ON(rbio
->faila
== rbio
->real_stripes
- 1);
1130 __raid56_parity_recover(rbio
);
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1142 * This must be called before you trust the answers from page_in_rbio
1144 static void index_rbio_pages(struct btrfs_raid_bio
*rbio
)
1147 struct bio_vec
*bvec
;
1149 unsigned long stripe_offset
;
1150 unsigned long page_index
;
1153 spin_lock_irq(&rbio
->bio_list_lock
);
1154 bio_list_for_each(bio
, &rbio
->bio_list
) {
1155 start
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
1156 stripe_offset
= start
- rbio
->bbio
->raid_map
[0];
1157 page_index
= stripe_offset
>> PAGE_SHIFT
;
1159 bio_for_each_segment_all(bvec
, bio
, i
)
1160 rbio
->bio_pages
[page_index
+ i
] = bvec
->bv_page
;
1162 spin_unlock_irq(&rbio
->bio_list_lock
);
1166 * this is called from one of two situations. We either
1167 * have a full stripe from the higher layers, or we've read all
1168 * the missing bits off disk.
1170 * This will calculate the parity and then send down any
1173 static noinline
void finish_rmw(struct btrfs_raid_bio
*rbio
)
1175 struct btrfs_bio
*bbio
= rbio
->bbio
;
1176 void *pointers
[rbio
->real_stripes
];
1177 int nr_data
= rbio
->nr_data
;
1182 struct bio_list bio_list
;
1186 bio_list_init(&bio_list
);
1188 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
1189 p_stripe
= rbio
->real_stripes
- 1;
1190 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
1191 p_stripe
= rbio
->real_stripes
- 2;
1192 q_stripe
= rbio
->real_stripes
- 1;
1197 /* at this point we either have a full stripe,
1198 * or we've read the full stripe from the drive.
1199 * recalculate the parity and write the new results.
1201 * We're not allowed to add any new bios to the
1202 * bio list here, anyone else that wants to
1203 * change this stripe needs to do their own rmw.
1205 spin_lock_irq(&rbio
->bio_list_lock
);
1206 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1207 spin_unlock_irq(&rbio
->bio_list_lock
);
1209 atomic_set(&rbio
->error
, 0);
1212 * now that we've set rmw_locked, run through the
1213 * bio list one last time and map the page pointers
1215 * We don't cache full rbios because we're assuming
1216 * the higher layers are unlikely to use this area of
1217 * the disk again soon. If they do use it again,
1218 * hopefully they will send another full bio.
1220 index_rbio_pages(rbio
);
1221 if (!rbio_is_full(rbio
))
1222 cache_rbio_pages(rbio
);
1224 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1226 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1228 /* first collect one page from each data stripe */
1229 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
1230 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1231 pointers
[stripe
] = kmap(p
);
1234 /* then add the parity stripe */
1235 p
= rbio_pstripe_page(rbio
, pagenr
);
1237 pointers
[stripe
++] = kmap(p
);
1239 if (q_stripe
!= -1) {
1242 * raid6, add the qstripe and call the
1243 * library function to fill in our p/q
1245 p
= rbio_qstripe_page(rbio
, pagenr
);
1247 pointers
[stripe
++] = kmap(p
);
1249 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
1253 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
1254 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
1258 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
1259 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
1263 * time to start writing. Make bios for everything from the
1264 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1267 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1268 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1270 if (stripe
< rbio
->nr_data
) {
1271 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1275 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1278 ret
= rbio_add_io_page(rbio
, &bio_list
,
1279 page
, stripe
, pagenr
, rbio
->stripe_len
);
1285 if (likely(!bbio
->num_tgtdevs
))
1288 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1289 if (!bbio
->tgtdev_map
[stripe
])
1292 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1294 if (stripe
< rbio
->nr_data
) {
1295 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1299 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1302 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1303 rbio
->bbio
->tgtdev_map
[stripe
],
1304 pagenr
, rbio
->stripe_len
);
1311 atomic_set(&rbio
->stripes_pending
, bio_list_size(&bio_list
));
1312 BUG_ON(atomic_read(&rbio
->stripes_pending
) == 0);
1315 bio
= bio_list_pop(&bio_list
);
1319 bio
->bi_private
= rbio
;
1320 bio
->bi_end_io
= raid_write_end_io
;
1321 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
1328 rbio_orig_end_io(rbio
, -EIO
);
1332 * helper to find the stripe number for a given bio. Used to figure out which
1333 * stripe has failed. This expects the bio to correspond to a physical disk,
1334 * so it looks up based on physical sector numbers.
1336 static int find_bio_stripe(struct btrfs_raid_bio
*rbio
,
1339 u64 physical
= bio
->bi_iter
.bi_sector
;
1342 struct btrfs_bio_stripe
*stripe
;
1346 for (i
= 0; i
< rbio
->bbio
->num_stripes
; i
++) {
1347 stripe
= &rbio
->bbio
->stripes
[i
];
1348 stripe_start
= stripe
->physical
;
1349 if (physical
>= stripe_start
&&
1350 physical
< stripe_start
+ rbio
->stripe_len
&&
1351 bio
->bi_bdev
== stripe
->dev
->bdev
) {
1359 * helper to find the stripe number for a given
1360 * bio (before mapping). Used to figure out which stripe has
1361 * failed. This looks up based on logical block numbers.
1363 static int find_logical_bio_stripe(struct btrfs_raid_bio
*rbio
,
1366 u64 logical
= bio
->bi_iter
.bi_sector
;
1372 for (i
= 0; i
< rbio
->nr_data
; i
++) {
1373 stripe_start
= rbio
->bbio
->raid_map
[i
];
1374 if (logical
>= stripe_start
&&
1375 logical
< stripe_start
+ rbio
->stripe_len
) {
1383 * returns -EIO if we had too many failures
1385 static int fail_rbio_index(struct btrfs_raid_bio
*rbio
, int failed
)
1387 unsigned long flags
;
1390 spin_lock_irqsave(&rbio
->bio_list_lock
, flags
);
1392 /* we already know this stripe is bad, move on */
1393 if (rbio
->faila
== failed
|| rbio
->failb
== failed
)
1396 if (rbio
->faila
== -1) {
1397 /* first failure on this rbio */
1398 rbio
->faila
= failed
;
1399 atomic_inc(&rbio
->error
);
1400 } else if (rbio
->failb
== -1) {
1401 /* second failure on this rbio */
1402 rbio
->failb
= failed
;
1403 atomic_inc(&rbio
->error
);
1408 spin_unlock_irqrestore(&rbio
->bio_list_lock
, flags
);
1414 * helper to fail a stripe based on a physical disk
1417 static int fail_bio_stripe(struct btrfs_raid_bio
*rbio
,
1420 int failed
= find_bio_stripe(rbio
, bio
);
1425 return fail_rbio_index(rbio
, failed
);
1429 * this sets each page in the bio uptodate. It should only be used on private
1430 * rbio pages, nothing that comes in from the higher layers
1432 static void set_bio_pages_uptodate(struct bio
*bio
)
1434 struct bio_vec
*bvec
;
1437 bio_for_each_segment_all(bvec
, bio
, i
)
1438 SetPageUptodate(bvec
->bv_page
);
1442 * end io for the read phase of the rmw cycle. All the bios here are physical
1443 * stripe bios we've read from the disk so we can recalculate the parity of the
1446 * This will usually kick off finish_rmw once all the bios are read in, but it
1447 * may trigger parity reconstruction if we had any errors along the way
1449 static void raid_rmw_end_io(struct bio
*bio
)
1451 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1454 fail_bio_stripe(rbio
, bio
);
1456 set_bio_pages_uptodate(bio
);
1460 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
1463 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
1467 * this will normally call finish_rmw to start our write
1468 * but if there are any failed stripes we'll reconstruct
1471 validate_rbio_for_rmw(rbio
);
1476 rbio_orig_end_io(rbio
, -EIO
);
1479 static void async_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1481 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
, rmw_work
, NULL
, NULL
);
1482 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
1485 static void async_read_rebuild(struct btrfs_raid_bio
*rbio
)
1487 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
1488 read_rebuild_work
, NULL
, NULL
);
1490 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
1494 * the stripe must be locked by the caller. It will
1495 * unlock after all the writes are done
1497 static int raid56_rmw_stripe(struct btrfs_raid_bio
*rbio
)
1499 int bios_to_read
= 0;
1500 struct bio_list bio_list
;
1506 bio_list_init(&bio_list
);
1508 ret
= alloc_rbio_pages(rbio
);
1512 index_rbio_pages(rbio
);
1514 atomic_set(&rbio
->error
, 0);
1516 * build a list of bios to read all the missing parts of this
1519 for (stripe
= 0; stripe
< rbio
->nr_data
; stripe
++) {
1520 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1523 * we want to find all the pages missing from
1524 * the rbio and read them from the disk. If
1525 * page_in_rbio finds a page in the bio list
1526 * we don't need to read it off the stripe.
1528 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
1532 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1534 * the bio cache may have handed us an uptodate
1535 * page. If so, be happy and use it
1537 if (PageUptodate(page
))
1540 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
1541 stripe
, pagenr
, rbio
->stripe_len
);
1547 bios_to_read
= bio_list_size(&bio_list
);
1548 if (!bios_to_read
) {
1550 * this can happen if others have merged with
1551 * us, it means there is nothing left to read.
1552 * But if there are missing devices it may not be
1553 * safe to do the full stripe write yet.
1559 * the bbio may be freed once we submit the last bio. Make sure
1560 * not to touch it after that
1562 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
1564 bio
= bio_list_pop(&bio_list
);
1568 bio
->bi_private
= rbio
;
1569 bio
->bi_end_io
= raid_rmw_end_io
;
1570 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
1572 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
1576 /* the actual write will happen once the reads are done */
1580 rbio_orig_end_io(rbio
, -EIO
);
1584 validate_rbio_for_rmw(rbio
);
1589 * if the upper layers pass in a full stripe, we thank them by only allocating
1590 * enough pages to hold the parity, and sending it all down quickly.
1592 static int full_stripe_write(struct btrfs_raid_bio
*rbio
)
1596 ret
= alloc_rbio_parity_pages(rbio
);
1598 __free_raid_bio(rbio
);
1602 ret
= lock_stripe_add(rbio
);
1609 * partial stripe writes get handed over to async helpers.
1610 * We're really hoping to merge a few more writes into this
1611 * rbio before calculating new parity
1613 static int partial_stripe_write(struct btrfs_raid_bio
*rbio
)
1617 ret
= lock_stripe_add(rbio
);
1619 async_rmw_stripe(rbio
);
1624 * sometimes while we were reading from the drive to
1625 * recalculate parity, enough new bios come into create
1626 * a full stripe. So we do a check here to see if we can
1627 * go directly to finish_rmw
1629 static int __raid56_parity_write(struct btrfs_raid_bio
*rbio
)
1631 /* head off into rmw land if we don't have a full stripe */
1632 if (!rbio_is_full(rbio
))
1633 return partial_stripe_write(rbio
);
1634 return full_stripe_write(rbio
);
1638 * We use plugging call backs to collect full stripes.
1639 * Any time we get a partial stripe write while plugged
1640 * we collect it into a list. When the unplug comes down,
1641 * we sort the list by logical block number and merge
1642 * everything we can into the same rbios
1644 struct btrfs_plug_cb
{
1645 struct blk_plug_cb cb
;
1646 struct btrfs_fs_info
*info
;
1647 struct list_head rbio_list
;
1648 struct btrfs_work work
;
1652 * rbios on the plug list are sorted for easier merging.
1654 static int plug_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
1656 struct btrfs_raid_bio
*ra
= container_of(a
, struct btrfs_raid_bio
,
1658 struct btrfs_raid_bio
*rb
= container_of(b
, struct btrfs_raid_bio
,
1660 u64 a_sector
= ra
->bio_list
.head
->bi_iter
.bi_sector
;
1661 u64 b_sector
= rb
->bio_list
.head
->bi_iter
.bi_sector
;
1663 if (a_sector
< b_sector
)
1665 if (a_sector
> b_sector
)
1670 static void run_plug(struct btrfs_plug_cb
*plug
)
1672 struct btrfs_raid_bio
*cur
;
1673 struct btrfs_raid_bio
*last
= NULL
;
1676 * sort our plug list then try to merge
1677 * everything we can in hopes of creating full
1680 list_sort(NULL
, &plug
->rbio_list
, plug_cmp
);
1681 while (!list_empty(&plug
->rbio_list
)) {
1682 cur
= list_entry(plug
->rbio_list
.next
,
1683 struct btrfs_raid_bio
, plug_list
);
1684 list_del_init(&cur
->plug_list
);
1686 if (rbio_is_full(cur
)) {
1687 /* we have a full stripe, send it down */
1688 full_stripe_write(cur
);
1692 if (rbio_can_merge(last
, cur
)) {
1693 merge_rbio(last
, cur
);
1694 __free_raid_bio(cur
);
1698 __raid56_parity_write(last
);
1703 __raid56_parity_write(last
);
1709 * if the unplug comes from schedule, we have to push the
1710 * work off to a helper thread
1712 static void unplug_work(struct btrfs_work
*work
)
1714 struct btrfs_plug_cb
*plug
;
1715 plug
= container_of(work
, struct btrfs_plug_cb
, work
);
1719 static void btrfs_raid_unplug(struct blk_plug_cb
*cb
, bool from_schedule
)
1721 struct btrfs_plug_cb
*plug
;
1722 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1724 if (from_schedule
) {
1725 btrfs_init_work(&plug
->work
, btrfs_rmw_helper
,
1726 unplug_work
, NULL
, NULL
);
1727 btrfs_queue_work(plug
->info
->rmw_workers
,
1735 * our main entry point for writes from the rest of the FS.
1737 int raid56_parity_write(struct btrfs_root
*root
, struct bio
*bio
,
1738 struct btrfs_bio
*bbio
, u64 stripe_len
)
1740 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
1741 struct btrfs_raid_bio
*rbio
;
1742 struct btrfs_plug_cb
*plug
= NULL
;
1743 struct blk_plug_cb
*cb
;
1746 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
1748 btrfs_put_bbio(bbio
);
1749 return PTR_ERR(rbio
);
1751 bio_list_add(&rbio
->bio_list
, bio
);
1752 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
1753 rbio
->operation
= BTRFS_RBIO_WRITE
;
1755 btrfs_bio_counter_inc_noblocked(fs_info
);
1756 rbio
->generic_bio_cnt
= 1;
1759 * don't plug on full rbios, just get them out the door
1760 * as quickly as we can
1762 if (rbio_is_full(rbio
)) {
1763 ret
= full_stripe_write(rbio
);
1765 btrfs_bio_counter_dec(fs_info
);
1769 cb
= blk_check_plugged(btrfs_raid_unplug
, fs_info
, sizeof(*plug
));
1771 plug
= container_of(cb
, struct btrfs_plug_cb
, cb
);
1773 plug
->info
= fs_info
;
1774 INIT_LIST_HEAD(&plug
->rbio_list
);
1776 list_add_tail(&rbio
->plug_list
, &plug
->rbio_list
);
1779 ret
= __raid56_parity_write(rbio
);
1781 btrfs_bio_counter_dec(fs_info
);
1787 * all parity reconstruction happens here. We've read in everything
1788 * we can find from the drives and this does the heavy lifting of
1789 * sorting the good from the bad.
1791 static void __raid_recover_end_io(struct btrfs_raid_bio
*rbio
)
1795 int faila
= -1, failb
= -1;
1800 pointers
= kcalloc(rbio
->real_stripes
, sizeof(void *), GFP_NOFS
);
1806 faila
= rbio
->faila
;
1807 failb
= rbio
->failb
;
1809 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1810 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1811 spin_lock_irq(&rbio
->bio_list_lock
);
1812 set_bit(RBIO_RMW_LOCKED_BIT
, &rbio
->flags
);
1813 spin_unlock_irq(&rbio
->bio_list_lock
);
1816 index_rbio_pages(rbio
);
1818 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
1820 * Now we just use bitmap to mark the horizontal stripes in
1821 * which we have data when doing parity scrub.
1823 if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
&&
1824 !test_bit(pagenr
, rbio
->dbitmap
))
1827 /* setup our array of pointers with pages
1830 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1832 * if we're rebuilding a read, we have to use
1833 * pages from the bio list
1835 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1836 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1837 (stripe
== faila
|| stripe
== failb
)) {
1838 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1840 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1842 pointers
[stripe
] = kmap(page
);
1845 /* all raid6 handling here */
1846 if (rbio
->bbio
->map_type
& BTRFS_BLOCK_GROUP_RAID6
) {
1848 * single failure, rebuild from parity raid5
1852 if (faila
== rbio
->nr_data
) {
1854 * Just the P stripe has failed, without
1855 * a bad data or Q stripe.
1856 * TODO, we should redo the xor here.
1862 * a single failure in raid6 is rebuilt
1863 * in the pstripe code below
1868 /* make sure our ps and qs are in order */
1869 if (faila
> failb
) {
1875 /* if the q stripe is failed, do a pstripe reconstruction
1877 * If both the q stripe and the P stripe are failed, we're
1878 * here due to a crc mismatch and we can't give them the
1881 if (rbio
->bbio
->raid_map
[failb
] == RAID6_Q_STRIPE
) {
1882 if (rbio
->bbio
->raid_map
[faila
] ==
1888 * otherwise we have one bad data stripe and
1889 * a good P stripe. raid5!
1894 if (rbio
->bbio
->raid_map
[failb
] == RAID5_P_STRIPE
) {
1895 raid6_datap_recov(rbio
->real_stripes
,
1896 PAGE_SIZE
, faila
, pointers
);
1898 raid6_2data_recov(rbio
->real_stripes
,
1899 PAGE_SIZE
, faila
, failb
,
1905 /* rebuild from P stripe here (raid5 or raid6) */
1906 BUG_ON(failb
!= -1);
1908 /* Copy parity block into failed block to start with */
1909 memcpy(pointers
[faila
],
1910 pointers
[rbio
->nr_data
],
1913 /* rearrange the pointer array */
1914 p
= pointers
[faila
];
1915 for (stripe
= faila
; stripe
< rbio
->nr_data
- 1; stripe
++)
1916 pointers
[stripe
] = pointers
[stripe
+ 1];
1917 pointers
[rbio
->nr_data
- 1] = p
;
1919 /* xor in the rest */
1920 run_xor(pointers
, rbio
->nr_data
- 1, PAGE_SIZE
);
1922 /* if we're doing this rebuild as part of an rmw, go through
1923 * and set all of our private rbio pages in the
1924 * failed stripes as uptodate. This way finish_rmw will
1925 * know they can be trusted. If this was a read reconstruction,
1926 * other endio functions will fiddle the uptodate bits
1928 if (rbio
->operation
== BTRFS_RBIO_WRITE
) {
1929 for (i
= 0; i
< rbio
->stripe_npages
; i
++) {
1931 page
= rbio_stripe_page(rbio
, faila
, i
);
1932 SetPageUptodate(page
);
1935 page
= rbio_stripe_page(rbio
, failb
, i
);
1936 SetPageUptodate(page
);
1940 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
1942 * if we're rebuilding a read, we have to use
1943 * pages from the bio list
1945 if ((rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
1946 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) &&
1947 (stripe
== faila
|| stripe
== failb
)) {
1948 page
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
1950 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
1961 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
) {
1963 cache_rbio_pages(rbio
);
1965 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
1967 rbio_orig_end_io(rbio
, err
);
1968 } else if (rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
) {
1969 rbio_orig_end_io(rbio
, err
);
1970 } else if (err
== 0) {
1974 if (rbio
->operation
== BTRFS_RBIO_WRITE
)
1976 else if (rbio
->operation
== BTRFS_RBIO_PARITY_SCRUB
)
1977 finish_parity_scrub(rbio
, 0);
1981 rbio_orig_end_io(rbio
, err
);
1986 * This is called only for stripes we've read from disk to
1987 * reconstruct the parity.
1989 static void raid_recover_end_io(struct bio
*bio
)
1991 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
1994 * we only read stripe pages off the disk, set them
1995 * up to date if there were no errors
1998 fail_bio_stripe(rbio
, bio
);
2000 set_bio_pages_uptodate(bio
);
2003 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2006 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2007 rbio_orig_end_io(rbio
, -EIO
);
2009 __raid_recover_end_io(rbio
);
2013 * reads everything we need off the disk to reconstruct
2014 * the parity. endio handlers trigger final reconstruction
2015 * when the IO is done.
2017 * This is used both for reads from the higher layers and for
2018 * parity construction required to finish a rmw cycle.
2020 static int __raid56_parity_recover(struct btrfs_raid_bio
*rbio
)
2022 int bios_to_read
= 0;
2023 struct bio_list bio_list
;
2029 bio_list_init(&bio_list
);
2031 ret
= alloc_rbio_pages(rbio
);
2035 atomic_set(&rbio
->error
, 0);
2038 * read everything that hasn't failed. Thanks to the
2039 * stripe cache, it is possible that some or all of these
2040 * pages are going to be uptodate.
2042 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2043 if (rbio
->faila
== stripe
|| rbio
->failb
== stripe
) {
2044 atomic_inc(&rbio
->error
);
2048 for (pagenr
= 0; pagenr
< rbio
->stripe_npages
; pagenr
++) {
2052 * the rmw code may have already read this
2055 p
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2056 if (PageUptodate(p
))
2059 ret
= rbio_add_io_page(rbio
, &bio_list
,
2060 rbio_stripe_page(rbio
, stripe
, pagenr
),
2061 stripe
, pagenr
, rbio
->stripe_len
);
2067 bios_to_read
= bio_list_size(&bio_list
);
2068 if (!bios_to_read
) {
2070 * we might have no bios to read just because the pages
2071 * were up to date, or we might have no bios to read because
2072 * the devices were gone.
2074 if (atomic_read(&rbio
->error
) <= rbio
->bbio
->max_errors
) {
2075 __raid_recover_end_io(rbio
);
2083 * the bbio may be freed once we submit the last bio. Make sure
2084 * not to touch it after that
2086 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2088 bio
= bio_list_pop(&bio_list
);
2092 bio
->bi_private
= rbio
;
2093 bio
->bi_end_io
= raid_recover_end_io
;
2094 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2096 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2104 if (rbio
->operation
== BTRFS_RBIO_READ_REBUILD
||
2105 rbio
->operation
== BTRFS_RBIO_REBUILD_MISSING
)
2106 rbio_orig_end_io(rbio
, -EIO
);
2111 * the main entry point for reads from the higher layers. This
2112 * is really only called when the normal read path had a failure,
2113 * so we assume the bio they send down corresponds to a failed part
2116 int raid56_parity_recover(struct btrfs_root
*root
, struct bio
*bio
,
2117 struct btrfs_bio
*bbio
, u64 stripe_len
,
2118 int mirror_num
, int generic_io
)
2120 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2121 struct btrfs_raid_bio
*rbio
;
2124 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2127 btrfs_put_bbio(bbio
);
2128 return PTR_ERR(rbio
);
2131 rbio
->operation
= BTRFS_RBIO_READ_REBUILD
;
2132 bio_list_add(&rbio
->bio_list
, bio
);
2133 rbio
->bio_list_bytes
= bio
->bi_iter
.bi_size
;
2135 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2136 if (rbio
->faila
== -1) {
2138 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2139 __func__
, (u64
)bio
->bi_iter
.bi_sector
<< 9,
2140 (u64
)bio
->bi_iter
.bi_size
, bbio
->map_type
);
2142 btrfs_put_bbio(bbio
);
2148 btrfs_bio_counter_inc_noblocked(fs_info
);
2149 rbio
->generic_bio_cnt
= 1;
2151 btrfs_get_bbio(bbio
);
2155 * reconstruct from the q stripe if they are
2156 * asking for mirror 3
2158 if (mirror_num
== 3)
2159 rbio
->failb
= rbio
->real_stripes
- 2;
2161 ret
= lock_stripe_add(rbio
);
2164 * __raid56_parity_recover will end the bio with
2165 * any errors it hits. We don't want to return
2166 * its error value up the stack because our caller
2167 * will end up calling bio_endio with any nonzero
2171 __raid56_parity_recover(rbio
);
2173 * our rbio has been added to the list of
2174 * rbios that will be handled after the
2175 * currently lock owner is done
2181 static void rmw_work(struct btrfs_work
*work
)
2183 struct btrfs_raid_bio
*rbio
;
2185 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2186 raid56_rmw_stripe(rbio
);
2189 static void read_rebuild_work(struct btrfs_work
*work
)
2191 struct btrfs_raid_bio
*rbio
;
2193 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2194 __raid56_parity_recover(rbio
);
2198 * The following code is used to scrub/replace the parity stripe
2200 * Note: We need make sure all the pages that add into the scrub/replace
2201 * raid bio are correct and not be changed during the scrub/replace. That
2202 * is those pages just hold metadata or file data with checksum.
2205 struct btrfs_raid_bio
*
2206 raid56_parity_alloc_scrub_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2207 struct btrfs_bio
*bbio
, u64 stripe_len
,
2208 struct btrfs_device
*scrub_dev
,
2209 unsigned long *dbitmap
, int stripe_nsectors
)
2211 struct btrfs_fs_info
*fs_info
= root
->fs_info
;
2212 struct btrfs_raid_bio
*rbio
;
2215 rbio
= alloc_rbio(root
, bbio
, stripe_len
);
2218 bio_list_add(&rbio
->bio_list
, bio
);
2220 * This is a special bio which is used to hold the completion handler
2221 * and make the scrub rbio is similar to the other types
2223 ASSERT(!bio
->bi_iter
.bi_size
);
2224 rbio
->operation
= BTRFS_RBIO_PARITY_SCRUB
;
2226 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2227 if (bbio
->stripes
[i
].dev
== scrub_dev
) {
2233 /* Now we just support the sectorsize equals to page size */
2234 ASSERT(fs_info
->sectorsize
== PAGE_SIZE
);
2235 ASSERT(rbio
->stripe_npages
== stripe_nsectors
);
2236 bitmap_copy(rbio
->dbitmap
, dbitmap
, stripe_nsectors
);
2241 /* Used for both parity scrub and missing. */
2242 void raid56_add_scrub_pages(struct btrfs_raid_bio
*rbio
, struct page
*page
,
2248 ASSERT(logical
>= rbio
->bbio
->raid_map
[0]);
2249 ASSERT(logical
+ PAGE_SIZE
<= rbio
->bbio
->raid_map
[0] +
2250 rbio
->stripe_len
* rbio
->nr_data
);
2251 stripe_offset
= (int)(logical
- rbio
->bbio
->raid_map
[0]);
2252 index
= stripe_offset
>> PAGE_SHIFT
;
2253 rbio
->bio_pages
[index
] = page
;
2257 * We just scrub the parity that we have correct data on the same horizontal,
2258 * so we needn't allocate all pages for all the stripes.
2260 static int alloc_rbio_essential_pages(struct btrfs_raid_bio
*rbio
)
2267 for_each_set_bit(bit
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2268 for (i
= 0; i
< rbio
->real_stripes
; i
++) {
2269 index
= i
* rbio
->stripe_npages
+ bit
;
2270 if (rbio
->stripe_pages
[index
])
2273 page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2276 rbio
->stripe_pages
[index
] = page
;
2282 static noinline
void finish_parity_scrub(struct btrfs_raid_bio
*rbio
,
2285 struct btrfs_bio
*bbio
= rbio
->bbio
;
2286 void *pointers
[rbio
->real_stripes
];
2287 DECLARE_BITMAP(pbitmap
, rbio
->stripe_npages
);
2288 int nr_data
= rbio
->nr_data
;
2293 struct page
*p_page
= NULL
;
2294 struct page
*q_page
= NULL
;
2295 struct bio_list bio_list
;
2300 bio_list_init(&bio_list
);
2302 if (rbio
->real_stripes
- rbio
->nr_data
== 1) {
2303 p_stripe
= rbio
->real_stripes
- 1;
2304 } else if (rbio
->real_stripes
- rbio
->nr_data
== 2) {
2305 p_stripe
= rbio
->real_stripes
- 2;
2306 q_stripe
= rbio
->real_stripes
- 1;
2311 if (bbio
->num_tgtdevs
&& bbio
->tgtdev_map
[rbio
->scrubp
]) {
2313 bitmap_copy(pbitmap
, rbio
->dbitmap
, rbio
->stripe_npages
);
2317 * Because the higher layers(scrubber) are unlikely to
2318 * use this area of the disk again soon, so don't cache
2321 clear_bit(RBIO_CACHE_READY_BIT
, &rbio
->flags
);
2326 p_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2329 SetPageUptodate(p_page
);
2331 if (q_stripe
!= -1) {
2332 q_page
= alloc_page(GFP_NOFS
| __GFP_HIGHMEM
);
2334 __free_page(p_page
);
2337 SetPageUptodate(q_page
);
2340 atomic_set(&rbio
->error
, 0);
2342 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2345 /* first collect one page from each data stripe */
2346 for (stripe
= 0; stripe
< nr_data
; stripe
++) {
2347 p
= page_in_rbio(rbio
, stripe
, pagenr
, 0);
2348 pointers
[stripe
] = kmap(p
);
2351 /* then add the parity stripe */
2352 pointers
[stripe
++] = kmap(p_page
);
2354 if (q_stripe
!= -1) {
2357 * raid6, add the qstripe and call the
2358 * library function to fill in our p/q
2360 pointers
[stripe
++] = kmap(q_page
);
2362 raid6_call
.gen_syndrome(rbio
->real_stripes
, PAGE_SIZE
,
2366 memcpy(pointers
[nr_data
], pointers
[0], PAGE_SIZE
);
2367 run_xor(pointers
+ 1, nr_data
- 1, PAGE_SIZE
);
2370 /* Check scrubbing parity and repair it */
2371 p
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2373 if (memcmp(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
))
2374 memcpy(parity
, pointers
[rbio
->scrubp
], PAGE_SIZE
);
2376 /* Parity is right, needn't writeback */
2377 bitmap_clear(rbio
->dbitmap
, pagenr
, 1);
2380 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++)
2381 kunmap(page_in_rbio(rbio
, stripe
, pagenr
, 0));
2384 __free_page(p_page
);
2386 __free_page(q_page
);
2390 * time to start writing. Make bios for everything from the
2391 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2394 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2397 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2398 ret
= rbio_add_io_page(rbio
, &bio_list
,
2399 page
, rbio
->scrubp
, pagenr
, rbio
->stripe_len
);
2407 for_each_set_bit(pagenr
, pbitmap
, rbio
->stripe_npages
) {
2410 page
= rbio_stripe_page(rbio
, rbio
->scrubp
, pagenr
);
2411 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2412 bbio
->tgtdev_map
[rbio
->scrubp
],
2413 pagenr
, rbio
->stripe_len
);
2419 nr_data
= bio_list_size(&bio_list
);
2421 /* Every parity is right */
2422 rbio_orig_end_io(rbio
, 0);
2426 atomic_set(&rbio
->stripes_pending
, nr_data
);
2429 bio
= bio_list_pop(&bio_list
);
2433 bio
->bi_private
= rbio
;
2434 bio
->bi_end_io
= raid_write_end_io
;
2435 bio_set_op_attrs(bio
, REQ_OP_WRITE
, 0);
2442 rbio_orig_end_io(rbio
, -EIO
);
2445 static inline int is_data_stripe(struct btrfs_raid_bio
*rbio
, int stripe
)
2447 if (stripe
>= 0 && stripe
< rbio
->nr_data
)
2453 * While we're doing the parity check and repair, we could have errors
2454 * in reading pages off the disk. This checks for errors and if we're
2455 * not able to read the page it'll trigger parity reconstruction. The
2456 * parity scrub will be finished after we've reconstructed the failed
2459 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio
*rbio
)
2461 if (atomic_read(&rbio
->error
) > rbio
->bbio
->max_errors
)
2464 if (rbio
->faila
>= 0 || rbio
->failb
>= 0) {
2465 int dfail
= 0, failp
= -1;
2467 if (is_data_stripe(rbio
, rbio
->faila
))
2469 else if (is_parity_stripe(rbio
->faila
))
2470 failp
= rbio
->faila
;
2472 if (is_data_stripe(rbio
, rbio
->failb
))
2474 else if (is_parity_stripe(rbio
->failb
))
2475 failp
= rbio
->failb
;
2478 * Because we can not use a scrubbing parity to repair
2479 * the data, so the capability of the repair is declined.
2480 * (In the case of RAID5, we can not repair anything)
2482 if (dfail
> rbio
->bbio
->max_errors
- 1)
2486 * If all data is good, only parity is correctly, just
2487 * repair the parity.
2490 finish_parity_scrub(rbio
, 0);
2495 * Here means we got one corrupted data stripe and one
2496 * corrupted parity on RAID6, if the corrupted parity
2497 * is scrubbing parity, luckily, use the other one to repair
2498 * the data, or we can not repair the data stripe.
2500 if (failp
!= rbio
->scrubp
)
2503 __raid_recover_end_io(rbio
);
2505 finish_parity_scrub(rbio
, 1);
2510 rbio_orig_end_io(rbio
, -EIO
);
2514 * end io for the read phase of the rmw cycle. All the bios here are physical
2515 * stripe bios we've read from the disk so we can recalculate the parity of the
2518 * This will usually kick off finish_rmw once all the bios are read in, but it
2519 * may trigger parity reconstruction if we had any errors along the way
2521 static void raid56_parity_scrub_end_io(struct bio
*bio
)
2523 struct btrfs_raid_bio
*rbio
= bio
->bi_private
;
2526 fail_bio_stripe(rbio
, bio
);
2528 set_bio_pages_uptodate(bio
);
2532 if (!atomic_dec_and_test(&rbio
->stripes_pending
))
2536 * this will normally call finish_rmw to start our write
2537 * but if there are any failed stripes we'll reconstruct
2540 validate_rbio_for_parity_scrub(rbio
);
2543 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio
*rbio
)
2545 int bios_to_read
= 0;
2546 struct bio_list bio_list
;
2552 ret
= alloc_rbio_essential_pages(rbio
);
2556 bio_list_init(&bio_list
);
2558 atomic_set(&rbio
->error
, 0);
2560 * build a list of bios to read all the missing parts of this
2563 for (stripe
= 0; stripe
< rbio
->real_stripes
; stripe
++) {
2564 for_each_set_bit(pagenr
, rbio
->dbitmap
, rbio
->stripe_npages
) {
2567 * we want to find all the pages missing from
2568 * the rbio and read them from the disk. If
2569 * page_in_rbio finds a page in the bio list
2570 * we don't need to read it off the stripe.
2572 page
= page_in_rbio(rbio
, stripe
, pagenr
, 1);
2576 page
= rbio_stripe_page(rbio
, stripe
, pagenr
);
2578 * the bio cache may have handed us an uptodate
2579 * page. If so, be happy and use it
2581 if (PageUptodate(page
))
2584 ret
= rbio_add_io_page(rbio
, &bio_list
, page
,
2585 stripe
, pagenr
, rbio
->stripe_len
);
2591 bios_to_read
= bio_list_size(&bio_list
);
2592 if (!bios_to_read
) {
2594 * this can happen if others have merged with
2595 * us, it means there is nothing left to read.
2596 * But if there are missing devices it may not be
2597 * safe to do the full stripe write yet.
2603 * the bbio may be freed once we submit the last bio. Make sure
2604 * not to touch it after that
2606 atomic_set(&rbio
->stripes_pending
, bios_to_read
);
2608 bio
= bio_list_pop(&bio_list
);
2612 bio
->bi_private
= rbio
;
2613 bio
->bi_end_io
= raid56_parity_scrub_end_io
;
2614 bio_set_op_attrs(bio
, REQ_OP_READ
, 0);
2616 btrfs_bio_wq_end_io(rbio
->fs_info
, bio
, BTRFS_WQ_ENDIO_RAID56
);
2620 /* the actual write will happen once the reads are done */
2624 rbio_orig_end_io(rbio
, -EIO
);
2628 validate_rbio_for_parity_scrub(rbio
);
2631 static void scrub_parity_work(struct btrfs_work
*work
)
2633 struct btrfs_raid_bio
*rbio
;
2635 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2636 raid56_parity_scrub_stripe(rbio
);
2639 static void async_scrub_parity(struct btrfs_raid_bio
*rbio
)
2641 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2642 scrub_parity_work
, NULL
, NULL
);
2644 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2647 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio
*rbio
)
2649 if (!lock_stripe_add(rbio
))
2650 async_scrub_parity(rbio
);
2653 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2655 struct btrfs_raid_bio
*
2656 raid56_alloc_missing_rbio(struct btrfs_root
*root
, struct bio
*bio
,
2657 struct btrfs_bio
*bbio
, u64 length
)
2659 struct btrfs_raid_bio
*rbio
;
2661 rbio
= alloc_rbio(root
, bbio
, length
);
2665 rbio
->operation
= BTRFS_RBIO_REBUILD_MISSING
;
2666 bio_list_add(&rbio
->bio_list
, bio
);
2668 * This is a special bio which is used to hold the completion handler
2669 * and make the scrub rbio is similar to the other types
2671 ASSERT(!bio
->bi_iter
.bi_size
);
2673 rbio
->faila
= find_logical_bio_stripe(rbio
, bio
);
2674 if (rbio
->faila
== -1) {
2683 static void missing_raid56_work(struct btrfs_work
*work
)
2685 struct btrfs_raid_bio
*rbio
;
2687 rbio
= container_of(work
, struct btrfs_raid_bio
, work
);
2688 __raid56_parity_recover(rbio
);
2691 static void async_missing_raid56(struct btrfs_raid_bio
*rbio
)
2693 btrfs_init_work(&rbio
->work
, btrfs_rmw_helper
,
2694 missing_raid56_work
, NULL
, NULL
);
2696 btrfs_queue_work(rbio
->fs_info
->rmw_workers
, &rbio
->work
);
2699 void raid56_submit_missing_rbio(struct btrfs_raid_bio
*rbio
)
2701 if (!lock_stripe_add(rbio
))
2702 async_missing_raid56(rbio
);