]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/raid56.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / raid56.c
CommitLineData
53b381b3
DW
1/*
2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/bio.h>
22#include <linux/slab.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/random.h>
26#include <linux/iocontext.h>
27#include <linux/capability.h>
28#include <linux/ratelimit.h>
29#include <linux/kthread.h>
30#include <linux/raid/pq.h>
31#include <linux/hash.h>
32#include <linux/list_sort.h>
33#include <linux/raid/xor.h>
818e010b 34#include <linux/mm.h>
53b381b3 35#include <asm/div64.h>
53b381b3
DW
36#include "ctree.h"
37#include "extent_map.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "print-tree.h"
41#include "volumes.h"
42#include "raid56.h"
43#include "async-thread.h"
44#include "check-integrity.h"
45#include "rcu-string.h"
46
47/* set when additional merges to this rbio are not allowed */
48#define RBIO_RMW_LOCKED_BIT 1
49
4ae10b3a
CM
50/*
51 * set when this rbio is sitting in the hash, but it is just a cache
52 * of past RMW
53 */
54#define RBIO_CACHE_BIT 2
55
56/*
57 * set when it is safe to trust the stripe_pages for caching
58 */
59#define RBIO_CACHE_READY_BIT 3
60
4ae10b3a
CM
61#define RBIO_CACHE_SIZE 1024
62
1b94b556 63enum btrfs_rbio_ops {
b4ee1782
OS
64 BTRFS_RBIO_WRITE,
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
1b94b556
MX
68};
69
53b381b3
DW
70struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
73
53b381b3
DW
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
77 * into it.
78 */
79 struct list_head hash_list;
80
4ae10b3a
CM
81 /*
82 * LRU list for the stripe cache
83 */
84 struct list_head stripe_cache;
85
53b381b3
DW
86 /*
87 * for scheduling work in the helper threads
88 */
89 struct btrfs_work work;
90
91 /*
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
95 */
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
98
6ac0f488
CM
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
53b381b3
DW
103 * the stripe lock to the next pending IO
104 */
105 struct list_head plug_list;
106
107 /*
108 * flags that tell us if it is safe to
109 * merge with this bio
110 */
111 unsigned long flags;
112
113 /* size of each individual stripe on disk */
114 int stripe_len;
115
116 /* number of data stripes (no p/q) */
117 int nr_data;
118
2c8cdd6e
MX
119 int real_stripes;
120
5a6ac9ea 121 int stripe_npages;
53b381b3
DW
122 /*
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
126 * rmw
127 */
1b94b556 128 enum btrfs_rbio_ops operation;
53b381b3
DW
129
130 /* first bad stripe */
131 int faila;
132
133 /* second bad stripe (for raid6 use) */
134 int failb;
135
5a6ac9ea 136 int scrubp;
53b381b3
DW
137 /*
138 * number of pages needed to represent the full
139 * stripe
140 */
141 int nr_pages;
142
143 /*
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
146 * stripe or not
147 */
148 int bio_list_bytes;
149
4245215d
MX
150 int generic_bio_cnt;
151
dec95574 152 refcount_t refs;
53b381b3 153
b89e1b01
MX
154 atomic_t stripes_pending;
155
156 atomic_t error;
53b381b3
DW
157 /*
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
161 */
162
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
165 */
166 struct page **stripe_pages;
167
168 /*
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
171 */
172 struct page **bio_pages;
5a6ac9ea
MX
173
174 /*
175 * bitmap to record which horizontal stripe has data
176 */
177 unsigned long *dbitmap;
53b381b3
DW
178};
179
180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182static void rmw_work(struct btrfs_work *work);
183static void read_rebuild_work(struct btrfs_work *work);
184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191
5a6ac9ea
MX
192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 int need_check);
194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195
53b381b3
DW
196/*
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
199 */
200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201{
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 int i;
83c8266a 208 int table_size;
53b381b3
DW
209
210 if (info->stripe_hash_table)
211 return 0;
212
83c8266a
DS
213 /*
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
216 *
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
219 */
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
818e010b
DS
221 table = kvzalloc(table_size, GFP_KERNEL);
222 if (!table)
223 return -ENOMEM;
53b381b3 224
4ae10b3a
CM
225 spin_lock_init(&table->cache_lock);
226 INIT_LIST_HEAD(&table->stripe_cache);
227
53b381b3
DW
228 h = table->table;
229
230 for (i = 0; i < num_entries; i++) {
231 cur = h + i;
232 INIT_LIST_HEAD(&cur->hash_list);
233 spin_lock_init(&cur->lock);
234 init_waitqueue_head(&cur->wait);
235 }
236
237 x = cmpxchg(&info->stripe_hash_table, NULL, table);
f749303b
WS
238 if (x)
239 kvfree(x);
53b381b3
DW
240 return 0;
241}
242
4ae10b3a
CM
243/*
244 * caching an rbio means to copy anything from the
245 * bio_pages array into the stripe_pages array. We
246 * use the page uptodate bit in the stripe cache array
247 * to indicate if it has valid data
248 *
249 * once the caching is done, we set the cache ready
250 * bit.
251 */
252static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
253{
254 int i;
255 char *s;
256 char *d;
257 int ret;
258
259 ret = alloc_rbio_pages(rbio);
260 if (ret)
261 return;
262
263 for (i = 0; i < rbio->nr_pages; i++) {
264 if (!rbio->bio_pages[i])
265 continue;
266
267 s = kmap(rbio->bio_pages[i]);
268 d = kmap(rbio->stripe_pages[i]);
269
09cbfeaf 270 memcpy(d, s, PAGE_SIZE);
4ae10b3a
CM
271
272 kunmap(rbio->bio_pages[i]);
273 kunmap(rbio->stripe_pages[i]);
274 SetPageUptodate(rbio->stripe_pages[i]);
275 }
276 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
277}
278
53b381b3
DW
279/*
280 * we hash on the first logical address of the stripe
281 */
282static int rbio_bucket(struct btrfs_raid_bio *rbio)
283{
8e5cfb55 284 u64 num = rbio->bbio->raid_map[0];
53b381b3
DW
285
286 /*
287 * we shift down quite a bit. We're using byte
288 * addressing, and most of the lower bits are zeros.
289 * This tends to upset hash_64, and it consistently
290 * returns just one or two different values.
291 *
292 * shifting off the lower bits fixes things.
293 */
294 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
295}
296
4ae10b3a
CM
297/*
298 * stealing an rbio means taking all the uptodate pages from the stripe
299 * array in the source rbio and putting them into the destination rbio
300 */
301static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
302{
303 int i;
304 struct page *s;
305 struct page *d;
306
307 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
308 return;
309
310 for (i = 0; i < dest->nr_pages; i++) {
311 s = src->stripe_pages[i];
312 if (!s || !PageUptodate(s)) {
313 continue;
314 }
315
316 d = dest->stripe_pages[i];
317 if (d)
318 __free_page(d);
319
320 dest->stripe_pages[i] = s;
321 src->stripe_pages[i] = NULL;
322 }
323}
324
53b381b3
DW
325/*
326 * merging means we take the bio_list from the victim and
327 * splice it into the destination. The victim should
328 * be discarded afterwards.
329 *
330 * must be called with dest->rbio_list_lock held
331 */
332static void merge_rbio(struct btrfs_raid_bio *dest,
333 struct btrfs_raid_bio *victim)
334{
335 bio_list_merge(&dest->bio_list, &victim->bio_list);
336 dest->bio_list_bytes += victim->bio_list_bytes;
4245215d 337 dest->generic_bio_cnt += victim->generic_bio_cnt;
53b381b3
DW
338 bio_list_init(&victim->bio_list);
339}
340
341/*
4ae10b3a
CM
342 * used to prune items that are in the cache. The caller
343 * must hold the hash table lock.
344 */
345static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
346{
347 int bucket = rbio_bucket(rbio);
348 struct btrfs_stripe_hash_table *table;
349 struct btrfs_stripe_hash *h;
350 int freeit = 0;
351
352 /*
353 * check the bit again under the hash table lock.
354 */
355 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
356 return;
357
358 table = rbio->fs_info->stripe_hash_table;
359 h = table->table + bucket;
360
361 /* hold the lock for the bucket because we may be
362 * removing it from the hash table
363 */
364 spin_lock(&h->lock);
365
366 /*
367 * hold the lock for the bio list because we need
368 * to make sure the bio list is empty
369 */
370 spin_lock(&rbio->bio_list_lock);
371
372 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
373 list_del_init(&rbio->stripe_cache);
374 table->cache_size -= 1;
375 freeit = 1;
376
377 /* if the bio list isn't empty, this rbio is
378 * still involved in an IO. We take it out
379 * of the cache list, and drop the ref that
380 * was held for the list.
381 *
382 * If the bio_list was empty, we also remove
383 * the rbio from the hash_table, and drop
384 * the corresponding ref
385 */
386 if (bio_list_empty(&rbio->bio_list)) {
387 if (!list_empty(&rbio->hash_list)) {
388 list_del_init(&rbio->hash_list);
dec95574 389 refcount_dec(&rbio->refs);
4ae10b3a
CM
390 BUG_ON(!list_empty(&rbio->plug_list));
391 }
392 }
393 }
394
395 spin_unlock(&rbio->bio_list_lock);
396 spin_unlock(&h->lock);
397
398 if (freeit)
399 __free_raid_bio(rbio);
400}
401
402/*
403 * prune a given rbio from the cache
404 */
405static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
406{
407 struct btrfs_stripe_hash_table *table;
408 unsigned long flags;
409
410 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
411 return;
412
413 table = rbio->fs_info->stripe_hash_table;
414
415 spin_lock_irqsave(&table->cache_lock, flags);
416 __remove_rbio_from_cache(rbio);
417 spin_unlock_irqrestore(&table->cache_lock, flags);
418}
419
420/*
421 * remove everything in the cache
422 */
48a3b636 423static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4ae10b3a
CM
424{
425 struct btrfs_stripe_hash_table *table;
426 unsigned long flags;
427 struct btrfs_raid_bio *rbio;
428
429 table = info->stripe_hash_table;
430
431 spin_lock_irqsave(&table->cache_lock, flags);
432 while (!list_empty(&table->stripe_cache)) {
433 rbio = list_entry(table->stripe_cache.next,
434 struct btrfs_raid_bio,
435 stripe_cache);
436 __remove_rbio_from_cache(rbio);
437 }
438 spin_unlock_irqrestore(&table->cache_lock, flags);
439}
440
441/*
442 * remove all cached entries and free the hash table
443 * used by unmount
53b381b3
DW
444 */
445void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
446{
447 if (!info->stripe_hash_table)
448 return;
4ae10b3a 449 btrfs_clear_rbio_cache(info);
f749303b 450 kvfree(info->stripe_hash_table);
53b381b3
DW
451 info->stripe_hash_table = NULL;
452}
453
4ae10b3a
CM
454/*
455 * insert an rbio into the stripe cache. It
456 * must have already been prepared by calling
457 * cache_rbio_pages
458 *
459 * If this rbio was already cached, it gets
460 * moved to the front of the lru.
461 *
462 * If the size of the rbio cache is too big, we
463 * prune an item.
464 */
465static void cache_rbio(struct btrfs_raid_bio *rbio)
466{
467 struct btrfs_stripe_hash_table *table;
468 unsigned long flags;
469
470 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
471 return;
472
473 table = rbio->fs_info->stripe_hash_table;
474
475 spin_lock_irqsave(&table->cache_lock, flags);
476 spin_lock(&rbio->bio_list_lock);
477
478 /* bump our ref if we were not in the list before */
479 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
dec95574 480 refcount_inc(&rbio->refs);
4ae10b3a
CM
481
482 if (!list_empty(&rbio->stripe_cache)){
483 list_move(&rbio->stripe_cache, &table->stripe_cache);
484 } else {
485 list_add(&rbio->stripe_cache, &table->stripe_cache);
486 table->cache_size += 1;
487 }
488
489 spin_unlock(&rbio->bio_list_lock);
490
491 if (table->cache_size > RBIO_CACHE_SIZE) {
492 struct btrfs_raid_bio *found;
493
494 found = list_entry(table->stripe_cache.prev,
495 struct btrfs_raid_bio,
496 stripe_cache);
497
498 if (found != rbio)
499 __remove_rbio_from_cache(found);
500 }
501
502 spin_unlock_irqrestore(&table->cache_lock, flags);
4ae10b3a
CM
503}
504
53b381b3
DW
505/*
506 * helper function to run the xor_blocks api. It is only
507 * able to do MAX_XOR_BLOCKS at a time, so we need to
508 * loop through.
509 */
510static void run_xor(void **pages, int src_cnt, ssize_t len)
511{
512 int src_off = 0;
513 int xor_src_cnt = 0;
514 void *dest = pages[src_cnt];
515
516 while(src_cnt > 0) {
517 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
518 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
519
520 src_cnt -= xor_src_cnt;
521 src_off += xor_src_cnt;
522 }
523}
524
525/*
526 * returns true if the bio list inside this rbio
527 * covers an entire stripe (no rmw required).
528 * Must be called with the bio list lock held, or
529 * at a time when you know it is impossible to add
530 * new bios into the list
531 */
532static int __rbio_is_full(struct btrfs_raid_bio *rbio)
533{
534 unsigned long size = rbio->bio_list_bytes;
535 int ret = 1;
536
537 if (size != rbio->nr_data * rbio->stripe_len)
538 ret = 0;
539
540 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
541 return ret;
542}
543
544static int rbio_is_full(struct btrfs_raid_bio *rbio)
545{
546 unsigned long flags;
547 int ret;
548
549 spin_lock_irqsave(&rbio->bio_list_lock, flags);
550 ret = __rbio_is_full(rbio);
551 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
552 return ret;
553}
554
555/*
556 * returns 1 if it is safe to merge two rbios together.
557 * The merging is safe if the two rbios correspond to
558 * the same stripe and if they are both going in the same
559 * direction (read vs write), and if neither one is
560 * locked for final IO
561 *
562 * The caller is responsible for locking such that
563 * rmw_locked is safe to test
564 */
565static int rbio_can_merge(struct btrfs_raid_bio *last,
566 struct btrfs_raid_bio *cur)
567{
568 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
569 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
570 return 0;
571
4ae10b3a
CM
572 /*
573 * we can't merge with cached rbios, since the
574 * idea is that when we merge the destination
575 * rbio is going to run our IO for us. We can
01327610 576 * steal from cached rbios though, other functions
4ae10b3a
CM
577 * handle that.
578 */
579 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
580 test_bit(RBIO_CACHE_BIT, &cur->flags))
581 return 0;
582
8e5cfb55
ZL
583 if (last->bbio->raid_map[0] !=
584 cur->bbio->raid_map[0])
53b381b3
DW
585 return 0;
586
5a6ac9ea
MX
587 /* we can't merge with different operations */
588 if (last->operation != cur->operation)
589 return 0;
590 /*
591 * We've need read the full stripe from the drive.
592 * check and repair the parity and write the new results.
593 *
594 * We're not allowed to add any new bios to the
595 * bio list here, anyone else that wants to
596 * change this stripe needs to do their own rmw.
597 */
598 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
599 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
53b381b3 600 return 0;
53b381b3 601
b4ee1782
OS
602 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
603 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
604 return 0;
605
53b381b3
DW
606 return 1;
607}
608
b7178a5f
ZL
609static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
610 int index)
611{
612 return stripe * rbio->stripe_npages + index;
613}
614
615/*
616 * these are just the pages from the rbio array, not from anything
617 * the FS sent down to us
618 */
619static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
620 int index)
621{
622 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
623}
624
53b381b3
DW
625/*
626 * helper to index into the pstripe
627 */
628static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
629{
b7178a5f 630 return rbio_stripe_page(rbio, rbio->nr_data, index);
53b381b3
DW
631}
632
633/*
634 * helper to index into the qstripe, returns null
635 * if there is no qstripe
636 */
637static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
638{
2c8cdd6e 639 if (rbio->nr_data + 1 == rbio->real_stripes)
53b381b3 640 return NULL;
b7178a5f 641 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
53b381b3
DW
642}
643
644/*
645 * The first stripe in the table for a logical address
646 * has the lock. rbios are added in one of three ways:
647 *
648 * 1) Nobody has the stripe locked yet. The rbio is given
649 * the lock and 0 is returned. The caller must start the IO
650 * themselves.
651 *
652 * 2) Someone has the stripe locked, but we're able to merge
653 * with the lock owner. The rbio is freed and the IO will
654 * start automatically along with the existing rbio. 1 is returned.
655 *
656 * 3) Someone has the stripe locked, but we're not able to merge.
657 * The rbio is added to the lock owner's plug list, or merged into
658 * an rbio already on the plug list. When the lock owner unlocks,
659 * the next rbio on the list is run and the IO is started automatically.
660 * 1 is returned
661 *
662 * If we return 0, the caller still owns the rbio and must continue with
663 * IO submission. If we return 1, the caller must assume the rbio has
664 * already been freed.
665 */
666static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
667{
668 int bucket = rbio_bucket(rbio);
669 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
670 struct btrfs_raid_bio *cur;
671 struct btrfs_raid_bio *pending;
672 unsigned long flags;
673 DEFINE_WAIT(wait);
674 struct btrfs_raid_bio *freeit = NULL;
4ae10b3a 675 struct btrfs_raid_bio *cache_drop = NULL;
53b381b3 676 int ret = 0;
53b381b3
DW
677
678 spin_lock_irqsave(&h->lock, flags);
679 list_for_each_entry(cur, &h->hash_list, hash_list) {
8e5cfb55 680 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
53b381b3
DW
681 spin_lock(&cur->bio_list_lock);
682
4ae10b3a
CM
683 /* can we steal this cached rbio's pages? */
684 if (bio_list_empty(&cur->bio_list) &&
685 list_empty(&cur->plug_list) &&
686 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
687 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
688 list_del_init(&cur->hash_list);
dec95574 689 refcount_dec(&cur->refs);
4ae10b3a
CM
690
691 steal_rbio(cur, rbio);
692 cache_drop = cur;
693 spin_unlock(&cur->bio_list_lock);
694
695 goto lockit;
696 }
697
53b381b3
DW
698 /* can we merge into the lock owner? */
699 if (rbio_can_merge(cur, rbio)) {
700 merge_rbio(cur, rbio);
701 spin_unlock(&cur->bio_list_lock);
702 freeit = rbio;
703 ret = 1;
704 goto out;
705 }
706
4ae10b3a 707
53b381b3
DW
708 /*
709 * we couldn't merge with the running
710 * rbio, see if we can merge with the
711 * pending ones. We don't have to
712 * check for rmw_locked because there
713 * is no way they are inside finish_rmw
714 * right now
715 */
716 list_for_each_entry(pending, &cur->plug_list,
717 plug_list) {
718 if (rbio_can_merge(pending, rbio)) {
719 merge_rbio(pending, rbio);
720 spin_unlock(&cur->bio_list_lock);
721 freeit = rbio;
722 ret = 1;
723 goto out;
724 }
725 }
726
727 /* no merging, put us on the tail of the plug list,
728 * our rbio will be started with the currently
729 * running rbio unlocks
730 */
731 list_add_tail(&rbio->plug_list, &cur->plug_list);
732 spin_unlock(&cur->bio_list_lock);
733 ret = 1;
734 goto out;
735 }
736 }
4ae10b3a 737lockit:
dec95574 738 refcount_inc(&rbio->refs);
53b381b3
DW
739 list_add(&rbio->hash_list, &h->hash_list);
740out:
741 spin_unlock_irqrestore(&h->lock, flags);
4ae10b3a
CM
742 if (cache_drop)
743 remove_rbio_from_cache(cache_drop);
53b381b3
DW
744 if (freeit)
745 __free_raid_bio(freeit);
746 return ret;
747}
748
749/*
750 * called as rmw or parity rebuild is completed. If the plug list has more
751 * rbios waiting for this stripe, the next one on the list will be started
752 */
753static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
754{
755 int bucket;
756 struct btrfs_stripe_hash *h;
757 unsigned long flags;
4ae10b3a 758 int keep_cache = 0;
53b381b3
DW
759
760 bucket = rbio_bucket(rbio);
761 h = rbio->fs_info->stripe_hash_table->table + bucket;
762
4ae10b3a
CM
763 if (list_empty(&rbio->plug_list))
764 cache_rbio(rbio);
765
53b381b3
DW
766 spin_lock_irqsave(&h->lock, flags);
767 spin_lock(&rbio->bio_list_lock);
768
769 if (!list_empty(&rbio->hash_list)) {
4ae10b3a
CM
770 /*
771 * if we're still cached and there is no other IO
772 * to perform, just leave this rbio here for others
773 * to steal from later
774 */
775 if (list_empty(&rbio->plug_list) &&
776 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
777 keep_cache = 1;
778 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
779 BUG_ON(!bio_list_empty(&rbio->bio_list));
780 goto done;
781 }
53b381b3
DW
782
783 list_del_init(&rbio->hash_list);
dec95574 784 refcount_dec(&rbio->refs);
53b381b3
DW
785
786 /*
787 * we use the plug list to hold all the rbios
788 * waiting for the chance to lock this stripe.
789 * hand the lock over to one of them.
790 */
791 if (!list_empty(&rbio->plug_list)) {
792 struct btrfs_raid_bio *next;
793 struct list_head *head = rbio->plug_list.next;
794
795 next = list_entry(head, struct btrfs_raid_bio,
796 plug_list);
797
798 list_del_init(&rbio->plug_list);
799
800 list_add(&next->hash_list, &h->hash_list);
dec95574 801 refcount_inc(&next->refs);
53b381b3
DW
802 spin_unlock(&rbio->bio_list_lock);
803 spin_unlock_irqrestore(&h->lock, flags);
804
1b94b556 805 if (next->operation == BTRFS_RBIO_READ_REBUILD)
53b381b3 806 async_read_rebuild(next);
b4ee1782
OS
807 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
808 steal_rbio(rbio, next);
809 async_read_rebuild(next);
810 } else if (next->operation == BTRFS_RBIO_WRITE) {
4ae10b3a 811 steal_rbio(rbio, next);
53b381b3 812 async_rmw_stripe(next);
5a6ac9ea
MX
813 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
814 steal_rbio(rbio, next);
815 async_scrub_parity(next);
4ae10b3a 816 }
53b381b3
DW
817
818 goto done_nolock;
33a9eca7
DS
819 /*
820 * The barrier for this waitqueue_active is not needed,
821 * we're protected by h->lock and can't miss a wakeup.
822 */
823 } else if (waitqueue_active(&h->wait)) {
53b381b3
DW
824 spin_unlock(&rbio->bio_list_lock);
825 spin_unlock_irqrestore(&h->lock, flags);
826 wake_up(&h->wait);
827 goto done_nolock;
828 }
829 }
4ae10b3a 830done:
53b381b3
DW
831 spin_unlock(&rbio->bio_list_lock);
832 spin_unlock_irqrestore(&h->lock, flags);
833
834done_nolock:
4ae10b3a
CM
835 if (!keep_cache)
836 remove_rbio_from_cache(rbio);
53b381b3
DW
837}
838
839static void __free_raid_bio(struct btrfs_raid_bio *rbio)
840{
841 int i;
842
dec95574 843 if (!refcount_dec_and_test(&rbio->refs))
53b381b3
DW
844 return;
845
4ae10b3a 846 WARN_ON(!list_empty(&rbio->stripe_cache));
53b381b3
DW
847 WARN_ON(!list_empty(&rbio->hash_list));
848 WARN_ON(!bio_list_empty(&rbio->bio_list));
849
850 for (i = 0; i < rbio->nr_pages; i++) {
851 if (rbio->stripe_pages[i]) {
852 __free_page(rbio->stripe_pages[i]);
853 rbio->stripe_pages[i] = NULL;
854 }
855 }
af8e2d1d 856
6e9606d2 857 btrfs_put_bbio(rbio->bbio);
53b381b3
DW
858 kfree(rbio);
859}
860
861static void free_raid_bio(struct btrfs_raid_bio *rbio)
862{
863 unlock_stripe(rbio);
864 __free_raid_bio(rbio);
865}
866
867/*
868 * this frees the rbio and runs through all the bios in the
869 * bio_list and calls end_io on them
870 */
4e4cbee9 871static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
53b381b3
DW
872{
873 struct bio *cur = bio_list_get(&rbio->bio_list);
874 struct bio *next;
4245215d
MX
875
876 if (rbio->generic_bio_cnt)
877 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
878
53b381b3
DW
879 free_raid_bio(rbio);
880
881 while (cur) {
882 next = cur->bi_next;
883 cur->bi_next = NULL;
4e4cbee9 884 cur->bi_status = err;
4246a0b6 885 bio_endio(cur);
53b381b3
DW
886 cur = next;
887 }
888}
889
890/*
891 * end io function used by finish_rmw. When we finally
892 * get here, we've written a full stripe
893 */
4246a0b6 894static void raid_write_end_io(struct bio *bio)
53b381b3
DW
895{
896 struct btrfs_raid_bio *rbio = bio->bi_private;
4e4cbee9 897 blk_status_t err = bio->bi_status;
a6111d11 898 int max_errors;
53b381b3
DW
899
900 if (err)
901 fail_bio_stripe(rbio, bio);
902
903 bio_put(bio);
904
b89e1b01 905 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
906 return;
907
58efbc9f 908 err = BLK_STS_OK;
53b381b3
DW
909
910 /* OK, we have read all the stripes we need to. */
a6111d11
ZL
911 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
912 0 : rbio->bbio->max_errors;
913 if (atomic_read(&rbio->error) > max_errors)
4e4cbee9 914 err = BLK_STS_IOERR;
53b381b3 915
4246a0b6 916 rbio_orig_end_io(rbio, err);
53b381b3
DW
917}
918
919/*
920 * the read/modify/write code wants to use the original bio for
921 * any pages it included, and then use the rbio for everything
922 * else. This function decides if a given index (stripe number)
923 * and page number in that stripe fall inside the original bio
924 * or the rbio.
925 *
926 * if you set bio_list_only, you'll get a NULL back for any ranges
927 * that are outside the bio_list
928 *
929 * This doesn't take any refs on anything, you get a bare page pointer
930 * and the caller must bump refs as required.
931 *
932 * You must call index_rbio_pages once before you can trust
933 * the answers from this function.
934 */
935static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
936 int index, int pagenr, int bio_list_only)
937{
938 int chunk_page;
939 struct page *p = NULL;
940
941 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
942
943 spin_lock_irq(&rbio->bio_list_lock);
944 p = rbio->bio_pages[chunk_page];
945 spin_unlock_irq(&rbio->bio_list_lock);
946
947 if (p || bio_list_only)
948 return p;
949
950 return rbio->stripe_pages[chunk_page];
951}
952
953/*
954 * number of pages we need for the entire stripe across all the
955 * drives
956 */
957static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
958{
09cbfeaf 959 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
53b381b3
DW
960}
961
962/*
963 * allocation and initial setup for the btrfs_raid_bio. Not
964 * this does not allocate any pages for rbio->pages.
965 */
2ff7e61e
JM
966static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
967 struct btrfs_bio *bbio,
968 u64 stripe_len)
53b381b3
DW
969{
970 struct btrfs_raid_bio *rbio;
971 int nr_data = 0;
2c8cdd6e
MX
972 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
973 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
5a6ac9ea 974 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
53b381b3
DW
975 void *p;
976
5a6ac9ea 977 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
bfca9a6d
ZL
978 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
979 sizeof(long), GFP_NOFS);
af8e2d1d 980 if (!rbio)
53b381b3 981 return ERR_PTR(-ENOMEM);
53b381b3
DW
982
983 bio_list_init(&rbio->bio_list);
984 INIT_LIST_HEAD(&rbio->plug_list);
985 spin_lock_init(&rbio->bio_list_lock);
4ae10b3a 986 INIT_LIST_HEAD(&rbio->stripe_cache);
53b381b3
DW
987 INIT_LIST_HEAD(&rbio->hash_list);
988 rbio->bbio = bbio;
2ff7e61e 989 rbio->fs_info = fs_info;
53b381b3
DW
990 rbio->stripe_len = stripe_len;
991 rbio->nr_pages = num_pages;
2c8cdd6e 992 rbio->real_stripes = real_stripes;
5a6ac9ea 993 rbio->stripe_npages = stripe_npages;
53b381b3
DW
994 rbio->faila = -1;
995 rbio->failb = -1;
dec95574 996 refcount_set(&rbio->refs, 1);
b89e1b01
MX
997 atomic_set(&rbio->error, 0);
998 atomic_set(&rbio->stripes_pending, 0);
53b381b3
DW
999
1000 /*
1001 * the stripe_pages and bio_pages array point to the extra
1002 * memory we allocated past the end of the rbio
1003 */
1004 p = rbio + 1;
1005 rbio->stripe_pages = p;
1006 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
5a6ac9ea 1007 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
53b381b3 1008
10f11900
ZL
1009 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1010 nr_data = real_stripes - 1;
1011 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
2c8cdd6e 1012 nr_data = real_stripes - 2;
53b381b3 1013 else
10f11900 1014 BUG();
53b381b3
DW
1015
1016 rbio->nr_data = nr_data;
1017 return rbio;
1018}
1019
1020/* allocate pages for all the stripes in the bio, including parity */
1021static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1022{
1023 int i;
1024 struct page *page;
1025
1026 for (i = 0; i < rbio->nr_pages; i++) {
1027 if (rbio->stripe_pages[i])
1028 continue;
1029 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1030 if (!page)
1031 return -ENOMEM;
1032 rbio->stripe_pages[i] = page;
53b381b3
DW
1033 }
1034 return 0;
1035}
1036
b7178a5f 1037/* only allocate pages for p/q stripes */
53b381b3
DW
1038static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1039{
1040 int i;
1041 struct page *page;
1042
b7178a5f 1043 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
53b381b3
DW
1044
1045 for (; i < rbio->nr_pages; i++) {
1046 if (rbio->stripe_pages[i])
1047 continue;
1048 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1049 if (!page)
1050 return -ENOMEM;
1051 rbio->stripe_pages[i] = page;
1052 }
1053 return 0;
1054}
1055
1056/*
1057 * add a single page from a specific stripe into our list of bios for IO
1058 * this will try to merge into existing bios if possible, and returns
1059 * zero if all went well.
1060 */
48a3b636
ES
1061static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1062 struct bio_list *bio_list,
1063 struct page *page,
1064 int stripe_nr,
1065 unsigned long page_index,
1066 unsigned long bio_max_len)
53b381b3
DW
1067{
1068 struct bio *last = bio_list->tail;
1069 u64 last_end = 0;
1070 int ret;
1071 struct bio *bio;
1072 struct btrfs_bio_stripe *stripe;
1073 u64 disk_start;
1074
1075 stripe = &rbio->bbio->stripes[stripe_nr];
09cbfeaf 1076 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
53b381b3
DW
1077
1078 /* if the device is missing, just fail this stripe */
1079 if (!stripe->dev->bdev)
1080 return fail_rbio_index(rbio, stripe_nr);
1081
1082 /* see if we can add this page onto our existing bio */
1083 if (last) {
4f024f37
KO
1084 last_end = (u64)last->bi_iter.bi_sector << 9;
1085 last_end += last->bi_iter.bi_size;
53b381b3
DW
1086
1087 /*
1088 * we can't merge these if they are from different
1089 * devices or if they are not contiguous
1090 */
1091 if (last_end == disk_start && stripe->dev->bdev &&
4e4cbee9 1092 !last->bi_status &&
53b381b3 1093 last->bi_bdev == stripe->dev->bdev) {
09cbfeaf
KS
1094 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1095 if (ret == PAGE_SIZE)
53b381b3
DW
1096 return 0;
1097 }
1098 }
1099
1100 /* put a new bio on the list */
c5e4c3d7 1101 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
4f024f37 1102 bio->bi_iter.bi_size = 0;
53b381b3 1103 bio->bi_bdev = stripe->dev->bdev;
4f024f37 1104 bio->bi_iter.bi_sector = disk_start >> 9;
53b381b3 1105
09cbfeaf 1106 bio_add_page(bio, page, PAGE_SIZE, 0);
53b381b3
DW
1107 bio_list_add(bio_list, bio);
1108 return 0;
1109}
1110
1111/*
1112 * while we're doing the read/modify/write cycle, we could
1113 * have errors in reading pages off the disk. This checks
1114 * for errors and if we're not able to read the page it'll
1115 * trigger parity reconstruction. The rmw will be finished
1116 * after we've reconstructed the failed stripes
1117 */
1118static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1119{
1120 if (rbio->faila >= 0 || rbio->failb >= 0) {
2c8cdd6e 1121 BUG_ON(rbio->faila == rbio->real_stripes - 1);
53b381b3
DW
1122 __raid56_parity_recover(rbio);
1123 } else {
1124 finish_rmw(rbio);
1125 }
1126}
1127
53b381b3
DW
1128/*
1129 * helper function to walk our bio list and populate the bio_pages array with
1130 * the result. This seems expensive, but it is faster than constantly
1131 * searching through the bio list as we setup the IO in finish_rmw or stripe
1132 * reconstruction.
1133 *
1134 * This must be called before you trust the answers from page_in_rbio
1135 */
1136static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1137{
1138 struct bio *bio;
1139 u64 start;
1140 unsigned long stripe_offset;
1141 unsigned long page_index;
53b381b3
DW
1142
1143 spin_lock_irq(&rbio->bio_list_lock);
1144 bio_list_for_each(bio, &rbio->bio_list) {
6592e58c
FM
1145 struct bio_vec bvec;
1146 struct bvec_iter iter;
1147 int i = 0;
1148
4f024f37 1149 start = (u64)bio->bi_iter.bi_sector << 9;
8e5cfb55 1150 stripe_offset = start - rbio->bbio->raid_map[0];
09cbfeaf 1151 page_index = stripe_offset >> PAGE_SHIFT;
53b381b3 1152
6592e58c
FM
1153 if (bio_flagged(bio, BIO_CLONED))
1154 bio->bi_iter = btrfs_io_bio(bio)->iter;
1155
1156 bio_for_each_segment(bvec, bio, iter) {
1157 rbio->bio_pages[page_index + i] = bvec.bv_page;
1158 i++;
1159 }
53b381b3
DW
1160 }
1161 spin_unlock_irq(&rbio->bio_list_lock);
1162}
1163
1164/*
1165 * this is called from one of two situations. We either
1166 * have a full stripe from the higher layers, or we've read all
1167 * the missing bits off disk.
1168 *
1169 * This will calculate the parity and then send down any
1170 * changed blocks.
1171 */
1172static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1173{
1174 struct btrfs_bio *bbio = rbio->bbio;
2c8cdd6e 1175 void *pointers[rbio->real_stripes];
53b381b3
DW
1176 int nr_data = rbio->nr_data;
1177 int stripe;
1178 int pagenr;
1179 int p_stripe = -1;
1180 int q_stripe = -1;
1181 struct bio_list bio_list;
1182 struct bio *bio;
53b381b3
DW
1183 int ret;
1184
1185 bio_list_init(&bio_list);
1186
2c8cdd6e
MX
1187 if (rbio->real_stripes - rbio->nr_data == 1) {
1188 p_stripe = rbio->real_stripes - 1;
1189 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1190 p_stripe = rbio->real_stripes - 2;
1191 q_stripe = rbio->real_stripes - 1;
53b381b3
DW
1192 } else {
1193 BUG();
1194 }
1195
1196 /* at this point we either have a full stripe,
1197 * or we've read the full stripe from the drive.
1198 * recalculate the parity and write the new results.
1199 *
1200 * We're not allowed to add any new bios to the
1201 * bio list here, anyone else that wants to
1202 * change this stripe needs to do their own rmw.
1203 */
1204 spin_lock_irq(&rbio->bio_list_lock);
1205 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1206 spin_unlock_irq(&rbio->bio_list_lock);
1207
b89e1b01 1208 atomic_set(&rbio->error, 0);
53b381b3
DW
1209
1210 /*
1211 * now that we've set rmw_locked, run through the
1212 * bio list one last time and map the page pointers
4ae10b3a
CM
1213 *
1214 * We don't cache full rbios because we're assuming
1215 * the higher layers are unlikely to use this area of
1216 * the disk again soon. If they do use it again,
1217 * hopefully they will send another full bio.
53b381b3
DW
1218 */
1219 index_rbio_pages(rbio);
4ae10b3a
CM
1220 if (!rbio_is_full(rbio))
1221 cache_rbio_pages(rbio);
1222 else
1223 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
53b381b3 1224
915e2290 1225 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1226 struct page *p;
1227 /* first collect one page from each data stripe */
1228 for (stripe = 0; stripe < nr_data; stripe++) {
1229 p = page_in_rbio(rbio, stripe, pagenr, 0);
1230 pointers[stripe] = kmap(p);
1231 }
1232
1233 /* then add the parity stripe */
1234 p = rbio_pstripe_page(rbio, pagenr);
1235 SetPageUptodate(p);
1236 pointers[stripe++] = kmap(p);
1237
1238 if (q_stripe != -1) {
1239
1240 /*
1241 * raid6, add the qstripe and call the
1242 * library function to fill in our p/q
1243 */
1244 p = rbio_qstripe_page(rbio, pagenr);
1245 SetPageUptodate(p);
1246 pointers[stripe++] = kmap(p);
1247
2c8cdd6e 1248 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
53b381b3
DW
1249 pointers);
1250 } else {
1251 /* raid5 */
1252 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
09cbfeaf 1253 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
53b381b3
DW
1254 }
1255
1256
2c8cdd6e 1257 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
53b381b3
DW
1258 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1259 }
1260
1261 /*
1262 * time to start writing. Make bios for everything from the
1263 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1264 * everything else.
1265 */
2c8cdd6e 1266 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
915e2290 1267 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1268 struct page *page;
1269 if (stripe < rbio->nr_data) {
1270 page = page_in_rbio(rbio, stripe, pagenr, 1);
1271 if (!page)
1272 continue;
1273 } else {
1274 page = rbio_stripe_page(rbio, stripe, pagenr);
1275 }
1276
1277 ret = rbio_add_io_page(rbio, &bio_list,
1278 page, stripe, pagenr, rbio->stripe_len);
1279 if (ret)
1280 goto cleanup;
1281 }
1282 }
1283
2c8cdd6e
MX
1284 if (likely(!bbio->num_tgtdevs))
1285 goto write_data;
1286
1287 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1288 if (!bbio->tgtdev_map[stripe])
1289 continue;
1290
915e2290 1291 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2c8cdd6e
MX
1292 struct page *page;
1293 if (stripe < rbio->nr_data) {
1294 page = page_in_rbio(rbio, stripe, pagenr, 1);
1295 if (!page)
1296 continue;
1297 } else {
1298 page = rbio_stripe_page(rbio, stripe, pagenr);
1299 }
1300
1301 ret = rbio_add_io_page(rbio, &bio_list, page,
1302 rbio->bbio->tgtdev_map[stripe],
1303 pagenr, rbio->stripe_len);
1304 if (ret)
1305 goto cleanup;
1306 }
1307 }
1308
1309write_data:
b89e1b01
MX
1310 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1311 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
53b381b3
DW
1312
1313 while (1) {
1314 bio = bio_list_pop(&bio_list);
1315 if (!bio)
1316 break;
1317
1318 bio->bi_private = rbio;
1319 bio->bi_end_io = raid_write_end_io;
37226b21 1320 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4e49ea4a
MC
1321
1322 submit_bio(bio);
53b381b3
DW
1323 }
1324 return;
1325
1326cleanup:
58efbc9f 1327 rbio_orig_end_io(rbio, BLK_STS_IOERR);
53b381b3
DW
1328}
1329
1330/*
1331 * helper to find the stripe number for a given bio. Used to figure out which
1332 * stripe has failed. This expects the bio to correspond to a physical disk,
1333 * so it looks up based on physical sector numbers.
1334 */
1335static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1336 struct bio *bio)
1337{
4f024f37 1338 u64 physical = bio->bi_iter.bi_sector;
53b381b3
DW
1339 u64 stripe_start;
1340 int i;
1341 struct btrfs_bio_stripe *stripe;
1342
1343 physical <<= 9;
1344
1345 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1346 stripe = &rbio->bbio->stripes[i];
1347 stripe_start = stripe->physical;
1348 if (physical >= stripe_start &&
2c8cdd6e
MX
1349 physical < stripe_start + rbio->stripe_len &&
1350 bio->bi_bdev == stripe->dev->bdev) {
53b381b3
DW
1351 return i;
1352 }
1353 }
1354 return -1;
1355}
1356
1357/*
1358 * helper to find the stripe number for a given
1359 * bio (before mapping). Used to figure out which stripe has
1360 * failed. This looks up based on logical block numbers.
1361 */
1362static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1363 struct bio *bio)
1364{
4f024f37 1365 u64 logical = bio->bi_iter.bi_sector;
53b381b3
DW
1366 u64 stripe_start;
1367 int i;
1368
1369 logical <<= 9;
1370
1371 for (i = 0; i < rbio->nr_data; i++) {
8e5cfb55 1372 stripe_start = rbio->bbio->raid_map[i];
53b381b3
DW
1373 if (logical >= stripe_start &&
1374 logical < stripe_start + rbio->stripe_len) {
1375 return i;
1376 }
1377 }
1378 return -1;
1379}
1380
1381/*
1382 * returns -EIO if we had too many failures
1383 */
1384static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1385{
1386 unsigned long flags;
1387 int ret = 0;
1388
1389 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1390
1391 /* we already know this stripe is bad, move on */
1392 if (rbio->faila == failed || rbio->failb == failed)
1393 goto out;
1394
1395 if (rbio->faila == -1) {
1396 /* first failure on this rbio */
1397 rbio->faila = failed;
b89e1b01 1398 atomic_inc(&rbio->error);
53b381b3
DW
1399 } else if (rbio->failb == -1) {
1400 /* second failure on this rbio */
1401 rbio->failb = failed;
b89e1b01 1402 atomic_inc(&rbio->error);
53b381b3
DW
1403 } else {
1404 ret = -EIO;
1405 }
1406out:
1407 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1408
1409 return ret;
1410}
1411
1412/*
1413 * helper to fail a stripe based on a physical disk
1414 * bio.
1415 */
1416static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1417 struct bio *bio)
1418{
1419 int failed = find_bio_stripe(rbio, bio);
1420
1421 if (failed < 0)
1422 return -EIO;
1423
1424 return fail_rbio_index(rbio, failed);
1425}
1426
1427/*
1428 * this sets each page in the bio uptodate. It should only be used on private
1429 * rbio pages, nothing that comes in from the higher layers
1430 */
1431static void set_bio_pages_uptodate(struct bio *bio)
1432{
6592e58c
FM
1433 struct bio_vec bvec;
1434 struct bvec_iter iter;
1435
1436 if (bio_flagged(bio, BIO_CLONED))
1437 bio->bi_iter = btrfs_io_bio(bio)->iter;
53b381b3 1438
6592e58c
FM
1439 bio_for_each_segment(bvec, bio, iter)
1440 SetPageUptodate(bvec.bv_page);
53b381b3
DW
1441}
1442
1443/*
1444 * end io for the read phase of the rmw cycle. All the bios here are physical
1445 * stripe bios we've read from the disk so we can recalculate the parity of the
1446 * stripe.
1447 *
1448 * This will usually kick off finish_rmw once all the bios are read in, but it
1449 * may trigger parity reconstruction if we had any errors along the way
1450 */
4246a0b6 1451static void raid_rmw_end_io(struct bio *bio)
53b381b3
DW
1452{
1453 struct btrfs_raid_bio *rbio = bio->bi_private;
1454
4e4cbee9 1455 if (bio->bi_status)
53b381b3
DW
1456 fail_bio_stripe(rbio, bio);
1457 else
1458 set_bio_pages_uptodate(bio);
1459
1460 bio_put(bio);
1461
b89e1b01 1462 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
1463 return;
1464
b89e1b01 1465 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
53b381b3
DW
1466 goto cleanup;
1467
1468 /*
1469 * this will normally call finish_rmw to start our write
1470 * but if there are any failed stripes we'll reconstruct
1471 * from parity first
1472 */
1473 validate_rbio_for_rmw(rbio);
1474 return;
1475
1476cleanup:
1477
58efbc9f 1478 rbio_orig_end_io(rbio, BLK_STS_IOERR);
53b381b3
DW
1479}
1480
1481static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1482{
0b246afa
JM
1483 btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL);
1484 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
53b381b3
DW
1485}
1486
1487static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1488{
9e0af237
LB
1489 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1490 read_rebuild_work, NULL, NULL);
53b381b3 1491
0b246afa 1492 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
53b381b3
DW
1493}
1494
1495/*
1496 * the stripe must be locked by the caller. It will
1497 * unlock after all the writes are done
1498 */
1499static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1500{
1501 int bios_to_read = 0;
53b381b3
DW
1502 struct bio_list bio_list;
1503 int ret;
53b381b3
DW
1504 int pagenr;
1505 int stripe;
1506 struct bio *bio;
1507
1508 bio_list_init(&bio_list);
1509
1510 ret = alloc_rbio_pages(rbio);
1511 if (ret)
1512 goto cleanup;
1513
1514 index_rbio_pages(rbio);
1515
b89e1b01 1516 atomic_set(&rbio->error, 0);
53b381b3
DW
1517 /*
1518 * build a list of bios to read all the missing parts of this
1519 * stripe
1520 */
1521 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
915e2290 1522 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1523 struct page *page;
1524 /*
1525 * we want to find all the pages missing from
1526 * the rbio and read them from the disk. If
1527 * page_in_rbio finds a page in the bio list
1528 * we don't need to read it off the stripe.
1529 */
1530 page = page_in_rbio(rbio, stripe, pagenr, 1);
1531 if (page)
1532 continue;
1533
1534 page = rbio_stripe_page(rbio, stripe, pagenr);
4ae10b3a
CM
1535 /*
1536 * the bio cache may have handed us an uptodate
1537 * page. If so, be happy and use it
1538 */
1539 if (PageUptodate(page))
1540 continue;
1541
53b381b3
DW
1542 ret = rbio_add_io_page(rbio, &bio_list, page,
1543 stripe, pagenr, rbio->stripe_len);
1544 if (ret)
1545 goto cleanup;
1546 }
1547 }
1548
1549 bios_to_read = bio_list_size(&bio_list);
1550 if (!bios_to_read) {
1551 /*
1552 * this can happen if others have merged with
1553 * us, it means there is nothing left to read.
1554 * But if there are missing devices it may not be
1555 * safe to do the full stripe write yet.
1556 */
1557 goto finish;
1558 }
1559
1560 /*
1561 * the bbio may be freed once we submit the last bio. Make sure
1562 * not to touch it after that
1563 */
b89e1b01 1564 atomic_set(&rbio->stripes_pending, bios_to_read);
53b381b3
DW
1565 while (1) {
1566 bio = bio_list_pop(&bio_list);
1567 if (!bio)
1568 break;
1569
1570 bio->bi_private = rbio;
1571 bio->bi_end_io = raid_rmw_end_io;
37226b21 1572 bio_set_op_attrs(bio, REQ_OP_READ, 0);
53b381b3 1573
0b246afa 1574 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
53b381b3 1575
4e49ea4a 1576 submit_bio(bio);
53b381b3
DW
1577 }
1578 /* the actual write will happen once the reads are done */
1579 return 0;
1580
1581cleanup:
58efbc9f 1582 rbio_orig_end_io(rbio, BLK_STS_IOERR);
53b381b3
DW
1583 return -EIO;
1584
1585finish:
1586 validate_rbio_for_rmw(rbio);
1587 return 0;
1588}
1589
1590/*
1591 * if the upper layers pass in a full stripe, we thank them by only allocating
1592 * enough pages to hold the parity, and sending it all down quickly.
1593 */
1594static int full_stripe_write(struct btrfs_raid_bio *rbio)
1595{
1596 int ret;
1597
1598 ret = alloc_rbio_parity_pages(rbio);
3cd846d1
MX
1599 if (ret) {
1600 __free_raid_bio(rbio);
53b381b3 1601 return ret;
3cd846d1 1602 }
53b381b3
DW
1603
1604 ret = lock_stripe_add(rbio);
1605 if (ret == 0)
1606 finish_rmw(rbio);
1607 return 0;
1608}
1609
1610/*
1611 * partial stripe writes get handed over to async helpers.
1612 * We're really hoping to merge a few more writes into this
1613 * rbio before calculating new parity
1614 */
1615static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1616{
1617 int ret;
1618
1619 ret = lock_stripe_add(rbio);
1620 if (ret == 0)
1621 async_rmw_stripe(rbio);
1622 return 0;
1623}
1624
1625/*
1626 * sometimes while we were reading from the drive to
1627 * recalculate parity, enough new bios come into create
1628 * a full stripe. So we do a check here to see if we can
1629 * go directly to finish_rmw
1630 */
1631static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1632{
1633 /* head off into rmw land if we don't have a full stripe */
1634 if (!rbio_is_full(rbio))
1635 return partial_stripe_write(rbio);
1636 return full_stripe_write(rbio);
1637}
1638
6ac0f488
CM
1639/*
1640 * We use plugging call backs to collect full stripes.
1641 * Any time we get a partial stripe write while plugged
1642 * we collect it into a list. When the unplug comes down,
1643 * we sort the list by logical block number and merge
1644 * everything we can into the same rbios
1645 */
1646struct btrfs_plug_cb {
1647 struct blk_plug_cb cb;
1648 struct btrfs_fs_info *info;
1649 struct list_head rbio_list;
1650 struct btrfs_work work;
1651};
1652
1653/*
1654 * rbios on the plug list are sorted for easier merging.
1655 */
1656static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1657{
1658 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1659 plug_list);
1660 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1661 plug_list);
4f024f37
KO
1662 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1663 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
6ac0f488
CM
1664
1665 if (a_sector < b_sector)
1666 return -1;
1667 if (a_sector > b_sector)
1668 return 1;
1669 return 0;
1670}
1671
1672static void run_plug(struct btrfs_plug_cb *plug)
1673{
1674 struct btrfs_raid_bio *cur;
1675 struct btrfs_raid_bio *last = NULL;
1676
1677 /*
1678 * sort our plug list then try to merge
1679 * everything we can in hopes of creating full
1680 * stripes.
1681 */
1682 list_sort(NULL, &plug->rbio_list, plug_cmp);
1683 while (!list_empty(&plug->rbio_list)) {
1684 cur = list_entry(plug->rbio_list.next,
1685 struct btrfs_raid_bio, plug_list);
1686 list_del_init(&cur->plug_list);
1687
1688 if (rbio_is_full(cur)) {
1689 /* we have a full stripe, send it down */
1690 full_stripe_write(cur);
1691 continue;
1692 }
1693 if (last) {
1694 if (rbio_can_merge(last, cur)) {
1695 merge_rbio(last, cur);
1696 __free_raid_bio(cur);
1697 continue;
1698
1699 }
1700 __raid56_parity_write(last);
1701 }
1702 last = cur;
1703 }
1704 if (last) {
1705 __raid56_parity_write(last);
1706 }
1707 kfree(plug);
1708}
1709
1710/*
1711 * if the unplug comes from schedule, we have to push the
1712 * work off to a helper thread
1713 */
1714static void unplug_work(struct btrfs_work *work)
1715{
1716 struct btrfs_plug_cb *plug;
1717 plug = container_of(work, struct btrfs_plug_cb, work);
1718 run_plug(plug);
1719}
1720
1721static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1722{
1723 struct btrfs_plug_cb *plug;
1724 plug = container_of(cb, struct btrfs_plug_cb, cb);
1725
1726 if (from_schedule) {
9e0af237
LB
1727 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1728 unplug_work, NULL, NULL);
d05a33ac
QW
1729 btrfs_queue_work(plug->info->rmw_workers,
1730 &plug->work);
6ac0f488
CM
1731 return;
1732 }
1733 run_plug(plug);
1734}
1735
53b381b3
DW
1736/*
1737 * our main entry point for writes from the rest of the FS.
1738 */
2ff7e61e 1739int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
8e5cfb55 1740 struct btrfs_bio *bbio, u64 stripe_len)
53b381b3
DW
1741{
1742 struct btrfs_raid_bio *rbio;
6ac0f488
CM
1743 struct btrfs_plug_cb *plug = NULL;
1744 struct blk_plug_cb *cb;
4245215d 1745 int ret;
53b381b3 1746
2ff7e61e 1747 rbio = alloc_rbio(fs_info, bbio, stripe_len);
af8e2d1d 1748 if (IS_ERR(rbio)) {
6e9606d2 1749 btrfs_put_bbio(bbio);
53b381b3 1750 return PTR_ERR(rbio);
af8e2d1d 1751 }
53b381b3 1752 bio_list_add(&rbio->bio_list, bio);
4f024f37 1753 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1b94b556 1754 rbio->operation = BTRFS_RBIO_WRITE;
6ac0f488 1755
0b246afa 1756 btrfs_bio_counter_inc_noblocked(fs_info);
4245215d
MX
1757 rbio->generic_bio_cnt = 1;
1758
6ac0f488
CM
1759 /*
1760 * don't plug on full rbios, just get them out the door
1761 * as quickly as we can
1762 */
4245215d
MX
1763 if (rbio_is_full(rbio)) {
1764 ret = full_stripe_write(rbio);
1765 if (ret)
0b246afa 1766 btrfs_bio_counter_dec(fs_info);
4245215d
MX
1767 return ret;
1768 }
6ac0f488 1769
0b246afa 1770 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
6ac0f488
CM
1771 if (cb) {
1772 plug = container_of(cb, struct btrfs_plug_cb, cb);
1773 if (!plug->info) {
0b246afa 1774 plug->info = fs_info;
6ac0f488
CM
1775 INIT_LIST_HEAD(&plug->rbio_list);
1776 }
1777 list_add_tail(&rbio->plug_list, &plug->rbio_list);
4245215d 1778 ret = 0;
6ac0f488 1779 } else {
4245215d
MX
1780 ret = __raid56_parity_write(rbio);
1781 if (ret)
0b246afa 1782 btrfs_bio_counter_dec(fs_info);
6ac0f488 1783 }
4245215d 1784 return ret;
53b381b3
DW
1785}
1786
1787/*
1788 * all parity reconstruction happens here. We've read in everything
1789 * we can find from the drives and this does the heavy lifting of
1790 * sorting the good from the bad.
1791 */
1792static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1793{
1794 int pagenr, stripe;
1795 void **pointers;
1796 int faila = -1, failb = -1;
53b381b3 1797 struct page *page;
58efbc9f 1798 blk_status_t err;
53b381b3
DW
1799 int i;
1800
31e818fe 1801 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
53b381b3 1802 if (!pointers) {
58efbc9f 1803 err = BLK_STS_RESOURCE;
53b381b3
DW
1804 goto cleanup_io;
1805 }
1806
1807 faila = rbio->faila;
1808 failb = rbio->failb;
1809
b4ee1782
OS
1810 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1811 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
53b381b3
DW
1812 spin_lock_irq(&rbio->bio_list_lock);
1813 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1814 spin_unlock_irq(&rbio->bio_list_lock);
1815 }
1816
1817 index_rbio_pages(rbio);
1818
915e2290 1819 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
5a6ac9ea
MX
1820 /*
1821 * Now we just use bitmap to mark the horizontal stripes in
1822 * which we have data when doing parity scrub.
1823 */
1824 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1825 !test_bit(pagenr, rbio->dbitmap))
1826 continue;
1827
53b381b3
DW
1828 /* setup our array of pointers with pages
1829 * from each stripe
1830 */
2c8cdd6e 1831 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
53b381b3
DW
1832 /*
1833 * if we're rebuilding a read, we have to use
1834 * pages from the bio list
1835 */
b4ee1782
OS
1836 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1837 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
53b381b3
DW
1838 (stripe == faila || stripe == failb)) {
1839 page = page_in_rbio(rbio, stripe, pagenr, 0);
1840 } else {
1841 page = rbio_stripe_page(rbio, stripe, pagenr);
1842 }
1843 pointers[stripe] = kmap(page);
1844 }
1845
1846 /* all raid6 handling here */
10f11900 1847 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
53b381b3
DW
1848 /*
1849 * single failure, rebuild from parity raid5
1850 * style
1851 */
1852 if (failb < 0) {
1853 if (faila == rbio->nr_data) {
1854 /*
1855 * Just the P stripe has failed, without
1856 * a bad data or Q stripe.
1857 * TODO, we should redo the xor here.
1858 */
58efbc9f 1859 err = BLK_STS_IOERR;
53b381b3
DW
1860 goto cleanup;
1861 }
1862 /*
1863 * a single failure in raid6 is rebuilt
1864 * in the pstripe code below
1865 */
1866 goto pstripe;
1867 }
1868
1869 /* make sure our ps and qs are in order */
1870 if (faila > failb) {
1871 int tmp = failb;
1872 failb = faila;
1873 faila = tmp;
1874 }
1875
1876 /* if the q stripe is failed, do a pstripe reconstruction
1877 * from the xors.
1878 * If both the q stripe and the P stripe are failed, we're
1879 * here due to a crc mismatch and we can't give them the
1880 * data they want
1881 */
8e5cfb55
ZL
1882 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1883 if (rbio->bbio->raid_map[faila] ==
1884 RAID5_P_STRIPE) {
58efbc9f 1885 err = BLK_STS_IOERR;
53b381b3
DW
1886 goto cleanup;
1887 }
1888 /*
1889 * otherwise we have one bad data stripe and
1890 * a good P stripe. raid5!
1891 */
1892 goto pstripe;
1893 }
1894
8e5cfb55 1895 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
2c8cdd6e 1896 raid6_datap_recov(rbio->real_stripes,
53b381b3
DW
1897 PAGE_SIZE, faila, pointers);
1898 } else {
2c8cdd6e 1899 raid6_2data_recov(rbio->real_stripes,
53b381b3
DW
1900 PAGE_SIZE, faila, failb,
1901 pointers);
1902 }
1903 } else {
1904 void *p;
1905
1906 /* rebuild from P stripe here (raid5 or raid6) */
1907 BUG_ON(failb != -1);
1908pstripe:
1909 /* Copy parity block into failed block to start with */
1910 memcpy(pointers[faila],
1911 pointers[rbio->nr_data],
09cbfeaf 1912 PAGE_SIZE);
53b381b3
DW
1913
1914 /* rearrange the pointer array */
1915 p = pointers[faila];
1916 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1917 pointers[stripe] = pointers[stripe + 1];
1918 pointers[rbio->nr_data - 1] = p;
1919
1920 /* xor in the rest */
09cbfeaf 1921 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
53b381b3
DW
1922 }
1923 /* if we're doing this rebuild as part of an rmw, go through
1924 * and set all of our private rbio pages in the
1925 * failed stripes as uptodate. This way finish_rmw will
1926 * know they can be trusted. If this was a read reconstruction,
1927 * other endio functions will fiddle the uptodate bits
1928 */
1b94b556 1929 if (rbio->operation == BTRFS_RBIO_WRITE) {
915e2290 1930 for (i = 0; i < rbio->stripe_npages; i++) {
53b381b3
DW
1931 if (faila != -1) {
1932 page = rbio_stripe_page(rbio, faila, i);
1933 SetPageUptodate(page);
1934 }
1935 if (failb != -1) {
1936 page = rbio_stripe_page(rbio, failb, i);
1937 SetPageUptodate(page);
1938 }
1939 }
1940 }
2c8cdd6e 1941 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
53b381b3
DW
1942 /*
1943 * if we're rebuilding a read, we have to use
1944 * pages from the bio list
1945 */
b4ee1782
OS
1946 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1947 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
53b381b3
DW
1948 (stripe == faila || stripe == failb)) {
1949 page = page_in_rbio(rbio, stripe, pagenr, 0);
1950 } else {
1951 page = rbio_stripe_page(rbio, stripe, pagenr);
1952 }
1953 kunmap(page);
1954 }
1955 }
1956
58efbc9f 1957 err = BLK_STS_OK;
53b381b3
DW
1958cleanup:
1959 kfree(pointers);
1960
1961cleanup_io:
1b94b556 1962 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
58efbc9f 1963 if (err == BLK_STS_OK)
4ae10b3a
CM
1964 cache_rbio_pages(rbio);
1965 else
1966 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1967
22365979 1968 rbio_orig_end_io(rbio, err);
b4ee1782 1969 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
4246a0b6 1970 rbio_orig_end_io(rbio, err);
58efbc9f 1971 } else if (err == BLK_STS_OK) {
53b381b3
DW
1972 rbio->faila = -1;
1973 rbio->failb = -1;
5a6ac9ea
MX
1974
1975 if (rbio->operation == BTRFS_RBIO_WRITE)
1976 finish_rmw(rbio);
1977 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1978 finish_parity_scrub(rbio, 0);
1979 else
1980 BUG();
53b381b3 1981 } else {
4246a0b6 1982 rbio_orig_end_io(rbio, err);
53b381b3
DW
1983 }
1984}
1985
1986/*
1987 * This is called only for stripes we've read from disk to
1988 * reconstruct the parity.
1989 */
4246a0b6 1990static void raid_recover_end_io(struct bio *bio)
53b381b3
DW
1991{
1992 struct btrfs_raid_bio *rbio = bio->bi_private;
1993
1994 /*
1995 * we only read stripe pages off the disk, set them
1996 * up to date if there were no errors
1997 */
4e4cbee9 1998 if (bio->bi_status)
53b381b3
DW
1999 fail_bio_stripe(rbio, bio);
2000 else
2001 set_bio_pages_uptodate(bio);
2002 bio_put(bio);
2003
b89e1b01 2004 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
2005 return;
2006
b89e1b01 2007 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
58efbc9f 2008 rbio_orig_end_io(rbio, BLK_STS_IOERR);
53b381b3
DW
2009 else
2010 __raid_recover_end_io(rbio);
2011}
2012
2013/*
2014 * reads everything we need off the disk to reconstruct
2015 * the parity. endio handlers trigger final reconstruction
2016 * when the IO is done.
2017 *
2018 * This is used both for reads from the higher layers and for
2019 * parity construction required to finish a rmw cycle.
2020 */
2021static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2022{
2023 int bios_to_read = 0;
53b381b3
DW
2024 struct bio_list bio_list;
2025 int ret;
53b381b3
DW
2026 int pagenr;
2027 int stripe;
2028 struct bio *bio;
2029
2030 bio_list_init(&bio_list);
2031
2032 ret = alloc_rbio_pages(rbio);
2033 if (ret)
2034 goto cleanup;
2035
b89e1b01 2036 atomic_set(&rbio->error, 0);
53b381b3
DW
2037
2038 /*
4ae10b3a
CM
2039 * read everything that hasn't failed. Thanks to the
2040 * stripe cache, it is possible that some or all of these
2041 * pages are going to be uptodate.
53b381b3 2042 */
2c8cdd6e 2043 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
5588383e 2044 if (rbio->faila == stripe || rbio->failb == stripe) {
b89e1b01 2045 atomic_inc(&rbio->error);
53b381b3 2046 continue;
5588383e 2047 }
53b381b3 2048
915e2290 2049 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
2050 struct page *p;
2051
2052 /*
2053 * the rmw code may have already read this
2054 * page in
2055 */
2056 p = rbio_stripe_page(rbio, stripe, pagenr);
2057 if (PageUptodate(p))
2058 continue;
2059
2060 ret = rbio_add_io_page(rbio, &bio_list,
2061 rbio_stripe_page(rbio, stripe, pagenr),
2062 stripe, pagenr, rbio->stripe_len);
2063 if (ret < 0)
2064 goto cleanup;
2065 }
2066 }
2067
2068 bios_to_read = bio_list_size(&bio_list);
2069 if (!bios_to_read) {
2070 /*
2071 * we might have no bios to read just because the pages
2072 * were up to date, or we might have no bios to read because
2073 * the devices were gone.
2074 */
b89e1b01 2075 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
53b381b3
DW
2076 __raid_recover_end_io(rbio);
2077 goto out;
2078 } else {
2079 goto cleanup;
2080 }
2081 }
2082
2083 /*
2084 * the bbio may be freed once we submit the last bio. Make sure
2085 * not to touch it after that
2086 */
b89e1b01 2087 atomic_set(&rbio->stripes_pending, bios_to_read);
53b381b3
DW
2088 while (1) {
2089 bio = bio_list_pop(&bio_list);
2090 if (!bio)
2091 break;
2092
2093 bio->bi_private = rbio;
2094 bio->bi_end_io = raid_recover_end_io;
37226b21 2095 bio_set_op_attrs(bio, REQ_OP_READ, 0);
53b381b3 2096
0b246afa 2097 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
53b381b3 2098
4e49ea4a 2099 submit_bio(bio);
53b381b3
DW
2100 }
2101out:
2102 return 0;
2103
2104cleanup:
b4ee1782
OS
2105 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2106 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
58efbc9f 2107 rbio_orig_end_io(rbio, BLK_STS_IOERR);
53b381b3
DW
2108 return -EIO;
2109}
2110
2111/*
2112 * the main entry point for reads from the higher layers. This
2113 * is really only called when the normal read path had a failure,
2114 * so we assume the bio they send down corresponds to a failed part
2115 * of the drive.
2116 */
2ff7e61e 2117int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
8e5cfb55
ZL
2118 struct btrfs_bio *bbio, u64 stripe_len,
2119 int mirror_num, int generic_io)
53b381b3
DW
2120{
2121 struct btrfs_raid_bio *rbio;
2122 int ret;
2123
abad60c6
LB
2124 if (generic_io) {
2125 ASSERT(bbio->mirror_num == mirror_num);
2126 btrfs_io_bio(bio)->mirror_num = mirror_num;
2127 }
2128
2ff7e61e 2129 rbio = alloc_rbio(fs_info, bbio, stripe_len);
af8e2d1d 2130 if (IS_ERR(rbio)) {
6e9606d2
ZL
2131 if (generic_io)
2132 btrfs_put_bbio(bbio);
53b381b3 2133 return PTR_ERR(rbio);
af8e2d1d 2134 }
53b381b3 2135
1b94b556 2136 rbio->operation = BTRFS_RBIO_READ_REBUILD;
53b381b3 2137 bio_list_add(&rbio->bio_list, bio);
4f024f37 2138 rbio->bio_list_bytes = bio->bi_iter.bi_size;
53b381b3
DW
2139
2140 rbio->faila = find_logical_bio_stripe(rbio, bio);
2141 if (rbio->faila == -1) {
0b246afa 2142 btrfs_warn(fs_info,
e46a28ca
LB
2143 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2144 __func__, (u64)bio->bi_iter.bi_sector << 9,
2145 (u64)bio->bi_iter.bi_size, bbio->map_type);
6e9606d2
ZL
2146 if (generic_io)
2147 btrfs_put_bbio(bbio);
53b381b3
DW
2148 kfree(rbio);
2149 return -EIO;
2150 }
2151
4245215d 2152 if (generic_io) {
0b246afa 2153 btrfs_bio_counter_inc_noblocked(fs_info);
4245215d
MX
2154 rbio->generic_bio_cnt = 1;
2155 } else {
6e9606d2 2156 btrfs_get_bbio(bbio);
4245215d
MX
2157 }
2158
53b381b3
DW
2159 /*
2160 * reconstruct from the q stripe if they are
2161 * asking for mirror 3
2162 */
2163 if (mirror_num == 3)
2c8cdd6e 2164 rbio->failb = rbio->real_stripes - 2;
53b381b3
DW
2165
2166 ret = lock_stripe_add(rbio);
2167
2168 /*
2169 * __raid56_parity_recover will end the bio with
2170 * any errors it hits. We don't want to return
2171 * its error value up the stack because our caller
2172 * will end up calling bio_endio with any nonzero
2173 * return
2174 */
2175 if (ret == 0)
2176 __raid56_parity_recover(rbio);
2177 /*
2178 * our rbio has been added to the list of
2179 * rbios that will be handled after the
2180 * currently lock owner is done
2181 */
2182 return 0;
2183
2184}
2185
2186static void rmw_work(struct btrfs_work *work)
2187{
2188 struct btrfs_raid_bio *rbio;
2189
2190 rbio = container_of(work, struct btrfs_raid_bio, work);
2191 raid56_rmw_stripe(rbio);
2192}
2193
2194static void read_rebuild_work(struct btrfs_work *work)
2195{
2196 struct btrfs_raid_bio *rbio;
2197
2198 rbio = container_of(work, struct btrfs_raid_bio, work);
2199 __raid56_parity_recover(rbio);
2200}
5a6ac9ea
MX
2201
2202/*
2203 * The following code is used to scrub/replace the parity stripe
2204 *
ae6529c3
QW
2205 * Caller must have already increased bio_counter for getting @bbio.
2206 *
5a6ac9ea
MX
2207 * Note: We need make sure all the pages that add into the scrub/replace
2208 * raid bio are correct and not be changed during the scrub/replace. That
2209 * is those pages just hold metadata or file data with checksum.
2210 */
2211
2212struct btrfs_raid_bio *
2ff7e61e 2213raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
8e5cfb55
ZL
2214 struct btrfs_bio *bbio, u64 stripe_len,
2215 struct btrfs_device *scrub_dev,
5a6ac9ea
MX
2216 unsigned long *dbitmap, int stripe_nsectors)
2217{
2218 struct btrfs_raid_bio *rbio;
2219 int i;
2220
2ff7e61e 2221 rbio = alloc_rbio(fs_info, bbio, stripe_len);
5a6ac9ea
MX
2222 if (IS_ERR(rbio))
2223 return NULL;
2224 bio_list_add(&rbio->bio_list, bio);
2225 /*
2226 * This is a special bio which is used to hold the completion handler
2227 * and make the scrub rbio is similar to the other types
2228 */
2229 ASSERT(!bio->bi_iter.bi_size);
2230 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2231
2c8cdd6e 2232 for (i = 0; i < rbio->real_stripes; i++) {
5a6ac9ea
MX
2233 if (bbio->stripes[i].dev == scrub_dev) {
2234 rbio->scrubp = i;
2235 break;
2236 }
2237 }
2238
2239 /* Now we just support the sectorsize equals to page size */
0b246afa 2240 ASSERT(fs_info->sectorsize == PAGE_SIZE);
5a6ac9ea
MX
2241 ASSERT(rbio->stripe_npages == stripe_nsectors);
2242 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2243
ae6529c3
QW
2244 /*
2245 * We have already increased bio_counter when getting bbio, record it
2246 * so we can free it at rbio_orig_end_io().
2247 */
2248 rbio->generic_bio_cnt = 1;
2249
5a6ac9ea
MX
2250 return rbio;
2251}
2252
b4ee1782
OS
2253/* Used for both parity scrub and missing. */
2254void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2255 u64 logical)
5a6ac9ea
MX
2256{
2257 int stripe_offset;
2258 int index;
2259
8e5cfb55
ZL
2260 ASSERT(logical >= rbio->bbio->raid_map[0]);
2261 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
5a6ac9ea 2262 rbio->stripe_len * rbio->nr_data);
8e5cfb55 2263 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
09cbfeaf 2264 index = stripe_offset >> PAGE_SHIFT;
5a6ac9ea
MX
2265 rbio->bio_pages[index] = page;
2266}
2267
2268/*
2269 * We just scrub the parity that we have correct data on the same horizontal,
2270 * so we needn't allocate all pages for all the stripes.
2271 */
2272static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2273{
2274 int i;
2275 int bit;
2276 int index;
2277 struct page *page;
2278
2279 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2c8cdd6e 2280 for (i = 0; i < rbio->real_stripes; i++) {
5a6ac9ea
MX
2281 index = i * rbio->stripe_npages + bit;
2282 if (rbio->stripe_pages[index])
2283 continue;
2284
2285 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2286 if (!page)
2287 return -ENOMEM;
2288 rbio->stripe_pages[index] = page;
5a6ac9ea
MX
2289 }
2290 }
2291 return 0;
2292}
2293
5a6ac9ea
MX
2294static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2295 int need_check)
2296{
76035976 2297 struct btrfs_bio *bbio = rbio->bbio;
2c8cdd6e 2298 void *pointers[rbio->real_stripes];
76035976 2299 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
5a6ac9ea
MX
2300 int nr_data = rbio->nr_data;
2301 int stripe;
2302 int pagenr;
2303 int p_stripe = -1;
2304 int q_stripe = -1;
2305 struct page *p_page = NULL;
2306 struct page *q_page = NULL;
2307 struct bio_list bio_list;
2308 struct bio *bio;
76035976 2309 int is_replace = 0;
5a6ac9ea
MX
2310 int ret;
2311
2312 bio_list_init(&bio_list);
2313
2c8cdd6e
MX
2314 if (rbio->real_stripes - rbio->nr_data == 1) {
2315 p_stripe = rbio->real_stripes - 1;
2316 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2317 p_stripe = rbio->real_stripes - 2;
2318 q_stripe = rbio->real_stripes - 1;
5a6ac9ea
MX
2319 } else {
2320 BUG();
2321 }
2322
76035976
MX
2323 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2324 is_replace = 1;
2325 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2326 }
2327
5a6ac9ea
MX
2328 /*
2329 * Because the higher layers(scrubber) are unlikely to
2330 * use this area of the disk again soon, so don't cache
2331 * it.
2332 */
2333 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2334
2335 if (!need_check)
2336 goto writeback;
2337
2338 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2339 if (!p_page)
2340 goto cleanup;
2341 SetPageUptodate(p_page);
2342
2343 if (q_stripe != -1) {
2344 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2345 if (!q_page) {
2346 __free_page(p_page);
2347 goto cleanup;
2348 }
2349 SetPageUptodate(q_page);
2350 }
2351
2352 atomic_set(&rbio->error, 0);
2353
2354 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2355 struct page *p;
2356 void *parity;
2357 /* first collect one page from each data stripe */
2358 for (stripe = 0; stripe < nr_data; stripe++) {
2359 p = page_in_rbio(rbio, stripe, pagenr, 0);
2360 pointers[stripe] = kmap(p);
2361 }
2362
2363 /* then add the parity stripe */
2364 pointers[stripe++] = kmap(p_page);
2365
2366 if (q_stripe != -1) {
2367
2368 /*
2369 * raid6, add the qstripe and call the
2370 * library function to fill in our p/q
2371 */
2372 pointers[stripe++] = kmap(q_page);
2373
2c8cdd6e 2374 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
5a6ac9ea
MX
2375 pointers);
2376 } else {
2377 /* raid5 */
2378 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
09cbfeaf 2379 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
5a6ac9ea
MX
2380 }
2381
01327610 2382 /* Check scrubbing parity and repair it */
5a6ac9ea
MX
2383 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2384 parity = kmap(p);
09cbfeaf
KS
2385 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2386 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
5a6ac9ea
MX
2387 else
2388 /* Parity is right, needn't writeback */
2389 bitmap_clear(rbio->dbitmap, pagenr, 1);
2390 kunmap(p);
2391
2c8cdd6e 2392 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
5a6ac9ea
MX
2393 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2394 }
2395
2396 __free_page(p_page);
2397 if (q_page)
2398 __free_page(q_page);
2399
2400writeback:
2401 /*
2402 * time to start writing. Make bios for everything from the
2403 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2404 * everything else.
2405 */
2406 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2407 struct page *page;
2408
2409 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2410 ret = rbio_add_io_page(rbio, &bio_list,
2411 page, rbio->scrubp, pagenr, rbio->stripe_len);
2412 if (ret)
2413 goto cleanup;
2414 }
2415
76035976
MX
2416 if (!is_replace)
2417 goto submit_write;
2418
2419 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2420 struct page *page;
2421
2422 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2423 ret = rbio_add_io_page(rbio, &bio_list, page,
2424 bbio->tgtdev_map[rbio->scrubp],
2425 pagenr, rbio->stripe_len);
2426 if (ret)
2427 goto cleanup;
2428 }
2429
2430submit_write:
5a6ac9ea
MX
2431 nr_data = bio_list_size(&bio_list);
2432 if (!nr_data) {
2433 /* Every parity is right */
58efbc9f 2434 rbio_orig_end_io(rbio, BLK_STS_OK);
5a6ac9ea
MX
2435 return;
2436 }
2437
2438 atomic_set(&rbio->stripes_pending, nr_data);
2439
2440 while (1) {
2441 bio = bio_list_pop(&bio_list);
2442 if (!bio)
2443 break;
2444
2445 bio->bi_private = rbio;
a6111d11 2446 bio->bi_end_io = raid_write_end_io;
37226b21 2447 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4e49ea4a
MC
2448
2449 submit_bio(bio);
5a6ac9ea
MX
2450 }
2451 return;
2452
2453cleanup:
58efbc9f 2454 rbio_orig_end_io(rbio, BLK_STS_IOERR);
5a6ac9ea
MX
2455}
2456
2457static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2458{
2459 if (stripe >= 0 && stripe < rbio->nr_data)
2460 return 1;
2461 return 0;
2462}
2463
2464/*
2465 * While we're doing the parity check and repair, we could have errors
2466 * in reading pages off the disk. This checks for errors and if we're
2467 * not able to read the page it'll trigger parity reconstruction. The
2468 * parity scrub will be finished after we've reconstructed the failed
2469 * stripes
2470 */
2471static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2472{
2473 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2474 goto cleanup;
2475
2476 if (rbio->faila >= 0 || rbio->failb >= 0) {
2477 int dfail = 0, failp = -1;
2478
2479 if (is_data_stripe(rbio, rbio->faila))
2480 dfail++;
2481 else if (is_parity_stripe(rbio->faila))
2482 failp = rbio->faila;
2483
2484 if (is_data_stripe(rbio, rbio->failb))
2485 dfail++;
2486 else if (is_parity_stripe(rbio->failb))
2487 failp = rbio->failb;
2488
2489 /*
2490 * Because we can not use a scrubbing parity to repair
2491 * the data, so the capability of the repair is declined.
2492 * (In the case of RAID5, we can not repair anything)
2493 */
2494 if (dfail > rbio->bbio->max_errors - 1)
2495 goto cleanup;
2496
2497 /*
2498 * If all data is good, only parity is correctly, just
2499 * repair the parity.
2500 */
2501 if (dfail == 0) {
2502 finish_parity_scrub(rbio, 0);
2503 return;
2504 }
2505
2506 /*
2507 * Here means we got one corrupted data stripe and one
2508 * corrupted parity on RAID6, if the corrupted parity
01327610 2509 * is scrubbing parity, luckily, use the other one to repair
5a6ac9ea
MX
2510 * the data, or we can not repair the data stripe.
2511 */
2512 if (failp != rbio->scrubp)
2513 goto cleanup;
2514
2515 __raid_recover_end_io(rbio);
2516 } else {
2517 finish_parity_scrub(rbio, 1);
2518 }
2519 return;
2520
2521cleanup:
58efbc9f 2522 rbio_orig_end_io(rbio, BLK_STS_IOERR);
5a6ac9ea
MX
2523}
2524
2525/*
2526 * end io for the read phase of the rmw cycle. All the bios here are physical
2527 * stripe bios we've read from the disk so we can recalculate the parity of the
2528 * stripe.
2529 *
2530 * This will usually kick off finish_rmw once all the bios are read in, but it
2531 * may trigger parity reconstruction if we had any errors along the way
2532 */
4246a0b6 2533static void raid56_parity_scrub_end_io(struct bio *bio)
5a6ac9ea
MX
2534{
2535 struct btrfs_raid_bio *rbio = bio->bi_private;
2536
4e4cbee9 2537 if (bio->bi_status)
5a6ac9ea
MX
2538 fail_bio_stripe(rbio, bio);
2539 else
2540 set_bio_pages_uptodate(bio);
2541
2542 bio_put(bio);
2543
2544 if (!atomic_dec_and_test(&rbio->stripes_pending))
2545 return;
2546
2547 /*
2548 * this will normally call finish_rmw to start our write
2549 * but if there are any failed stripes we'll reconstruct
2550 * from parity first
2551 */
2552 validate_rbio_for_parity_scrub(rbio);
2553}
2554
2555static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2556{
2557 int bios_to_read = 0;
5a6ac9ea
MX
2558 struct bio_list bio_list;
2559 int ret;
2560 int pagenr;
2561 int stripe;
2562 struct bio *bio;
2563
2564 ret = alloc_rbio_essential_pages(rbio);
2565 if (ret)
2566 goto cleanup;
2567
2568 bio_list_init(&bio_list);
2569
2570 atomic_set(&rbio->error, 0);
2571 /*
2572 * build a list of bios to read all the missing parts of this
2573 * stripe
2574 */
2c8cdd6e 2575 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
5a6ac9ea
MX
2576 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2577 struct page *page;
2578 /*
2579 * we want to find all the pages missing from
2580 * the rbio and read them from the disk. If
2581 * page_in_rbio finds a page in the bio list
2582 * we don't need to read it off the stripe.
2583 */
2584 page = page_in_rbio(rbio, stripe, pagenr, 1);
2585 if (page)
2586 continue;
2587
2588 page = rbio_stripe_page(rbio, stripe, pagenr);
2589 /*
2590 * the bio cache may have handed us an uptodate
2591 * page. If so, be happy and use it
2592 */
2593 if (PageUptodate(page))
2594 continue;
2595
2596 ret = rbio_add_io_page(rbio, &bio_list, page,
2597 stripe, pagenr, rbio->stripe_len);
2598 if (ret)
2599 goto cleanup;
2600 }
2601 }
2602
2603 bios_to_read = bio_list_size(&bio_list);
2604 if (!bios_to_read) {
2605 /*
2606 * this can happen if others have merged with
2607 * us, it means there is nothing left to read.
2608 * But if there are missing devices it may not be
2609 * safe to do the full stripe write yet.
2610 */
2611 goto finish;
2612 }
2613
2614 /*
2615 * the bbio may be freed once we submit the last bio. Make sure
2616 * not to touch it after that
2617 */
2618 atomic_set(&rbio->stripes_pending, bios_to_read);
2619 while (1) {
2620 bio = bio_list_pop(&bio_list);
2621 if (!bio)
2622 break;
2623
2624 bio->bi_private = rbio;
2625 bio->bi_end_io = raid56_parity_scrub_end_io;
37226b21 2626 bio_set_op_attrs(bio, REQ_OP_READ, 0);
5a6ac9ea 2627
0b246afa 2628 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
5a6ac9ea 2629
4e49ea4a 2630 submit_bio(bio);
5a6ac9ea
MX
2631 }
2632 /* the actual write will happen once the reads are done */
2633 return;
2634
2635cleanup:
58efbc9f 2636 rbio_orig_end_io(rbio, BLK_STS_IOERR);
5a6ac9ea
MX
2637 return;
2638
2639finish:
2640 validate_rbio_for_parity_scrub(rbio);
2641}
2642
2643static void scrub_parity_work(struct btrfs_work *work)
2644{
2645 struct btrfs_raid_bio *rbio;
2646
2647 rbio = container_of(work, struct btrfs_raid_bio, work);
2648 raid56_parity_scrub_stripe(rbio);
2649}
2650
2651static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2652{
2653 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2654 scrub_parity_work, NULL, NULL);
2655
0b246afa 2656 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
5a6ac9ea
MX
2657}
2658
2659void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2660{
2661 if (!lock_stripe_add(rbio))
2662 async_scrub_parity(rbio);
2663}
b4ee1782
OS
2664
2665/* The following code is used for dev replace of a missing RAID 5/6 device. */
2666
2667struct btrfs_raid_bio *
2ff7e61e 2668raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
b4ee1782
OS
2669 struct btrfs_bio *bbio, u64 length)
2670{
2671 struct btrfs_raid_bio *rbio;
2672
2ff7e61e 2673 rbio = alloc_rbio(fs_info, bbio, length);
b4ee1782
OS
2674 if (IS_ERR(rbio))
2675 return NULL;
2676
2677 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2678 bio_list_add(&rbio->bio_list, bio);
2679 /*
2680 * This is a special bio which is used to hold the completion handler
2681 * and make the scrub rbio is similar to the other types
2682 */
2683 ASSERT(!bio->bi_iter.bi_size);
2684
2685 rbio->faila = find_logical_bio_stripe(rbio, bio);
2686 if (rbio->faila == -1) {
2687 BUG();
2688 kfree(rbio);
2689 return NULL;
2690 }
2691
ae6529c3
QW
2692 /*
2693 * When we get bbio, we have already increased bio_counter, record it
2694 * so we can free it at rbio_orig_end_io()
2695 */
2696 rbio->generic_bio_cnt = 1;
2697
b4ee1782
OS
2698 return rbio;
2699}
2700
2701static void missing_raid56_work(struct btrfs_work *work)
2702{
2703 struct btrfs_raid_bio *rbio;
2704
2705 rbio = container_of(work, struct btrfs_raid_bio, work);
2706 __raid56_parity_recover(rbio);
2707}
2708
2709static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2710{
2711 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2712 missing_raid56_work, NULL, NULL);
2713
2714 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2715}
2716
2717void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2718{
2719 if (!lock_stripe_add(rbio))
2720 async_missing_raid56(rbio);
2721}