]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - fs/btrfs/raid56.c
btrfs: pull node/sector/stripe sizes out of root and into fs_info
[mirror_ubuntu-artful-kernel.git] / fs / btrfs / raid56.c
CommitLineData
53b381b3
DW
1/*
2 * Copyright (C) 2012 Fusion-io All rights reserved.
3 * Copyright (C) 2012 Intel Corp. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public
7 * License v2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public
15 * License along with this program; if not, write to the
16 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 * Boston, MA 021110-1307, USA.
18 */
19#include <linux/sched.h>
20#include <linux/wait.h>
21#include <linux/bio.h>
22#include <linux/slab.h>
23#include <linux/buffer_head.h>
24#include <linux/blkdev.h>
25#include <linux/random.h>
26#include <linux/iocontext.h>
27#include <linux/capability.h>
28#include <linux/ratelimit.h>
29#include <linux/kthread.h>
30#include <linux/raid/pq.h>
31#include <linux/hash.h>
32#include <linux/list_sort.h>
33#include <linux/raid/xor.h>
d7011f5b 34#include <linux/vmalloc.h>
53b381b3 35#include <asm/div64.h>
53b381b3
DW
36#include "ctree.h"
37#include "extent_map.h"
38#include "disk-io.h"
39#include "transaction.h"
40#include "print-tree.h"
41#include "volumes.h"
42#include "raid56.h"
43#include "async-thread.h"
44#include "check-integrity.h"
45#include "rcu-string.h"
46
47/* set when additional merges to this rbio are not allowed */
48#define RBIO_RMW_LOCKED_BIT 1
49
4ae10b3a
CM
50/*
51 * set when this rbio is sitting in the hash, but it is just a cache
52 * of past RMW
53 */
54#define RBIO_CACHE_BIT 2
55
56/*
57 * set when it is safe to trust the stripe_pages for caching
58 */
59#define RBIO_CACHE_READY_BIT 3
60
4ae10b3a
CM
61#define RBIO_CACHE_SIZE 1024
62
1b94b556 63enum btrfs_rbio_ops {
b4ee1782
OS
64 BTRFS_RBIO_WRITE,
65 BTRFS_RBIO_READ_REBUILD,
66 BTRFS_RBIO_PARITY_SCRUB,
67 BTRFS_RBIO_REBUILD_MISSING,
1b94b556
MX
68};
69
53b381b3
DW
70struct btrfs_raid_bio {
71 struct btrfs_fs_info *fs_info;
72 struct btrfs_bio *bbio;
73
53b381b3
DW
74 /* while we're doing rmw on a stripe
75 * we put it into a hash table so we can
76 * lock the stripe and merge more rbios
77 * into it.
78 */
79 struct list_head hash_list;
80
4ae10b3a
CM
81 /*
82 * LRU list for the stripe cache
83 */
84 struct list_head stripe_cache;
85
53b381b3
DW
86 /*
87 * for scheduling work in the helper threads
88 */
89 struct btrfs_work work;
90
91 /*
92 * bio list and bio_list_lock are used
93 * to add more bios into the stripe
94 * in hopes of avoiding the full rmw
95 */
96 struct bio_list bio_list;
97 spinlock_t bio_list_lock;
98
6ac0f488
CM
99 /* also protected by the bio_list_lock, the
100 * plug list is used by the plugging code
101 * to collect partial bios while plugged. The
102 * stripe locking code also uses it to hand off
53b381b3
DW
103 * the stripe lock to the next pending IO
104 */
105 struct list_head plug_list;
106
107 /*
108 * flags that tell us if it is safe to
109 * merge with this bio
110 */
111 unsigned long flags;
112
113 /* size of each individual stripe on disk */
114 int stripe_len;
115
116 /* number of data stripes (no p/q) */
117 int nr_data;
118
2c8cdd6e
MX
119 int real_stripes;
120
5a6ac9ea 121 int stripe_npages;
53b381b3
DW
122 /*
123 * set if we're doing a parity rebuild
124 * for a read from higher up, which is handled
125 * differently from a parity rebuild as part of
126 * rmw
127 */
1b94b556 128 enum btrfs_rbio_ops operation;
53b381b3
DW
129
130 /* first bad stripe */
131 int faila;
132
133 /* second bad stripe (for raid6 use) */
134 int failb;
135
5a6ac9ea 136 int scrubp;
53b381b3
DW
137 /*
138 * number of pages needed to represent the full
139 * stripe
140 */
141 int nr_pages;
142
143 /*
144 * size of all the bios in the bio_list. This
145 * helps us decide if the rbio maps to a full
146 * stripe or not
147 */
148 int bio_list_bytes;
149
4245215d
MX
150 int generic_bio_cnt;
151
53b381b3
DW
152 atomic_t refs;
153
b89e1b01
MX
154 atomic_t stripes_pending;
155
156 atomic_t error;
53b381b3
DW
157 /*
158 * these are two arrays of pointers. We allocate the
159 * rbio big enough to hold them both and setup their
160 * locations when the rbio is allocated
161 */
162
163 /* pointers to pages that we allocated for
164 * reading/writing stripes directly from the disk (including P/Q)
165 */
166 struct page **stripe_pages;
167
168 /*
169 * pointers to the pages in the bio_list. Stored
170 * here for faster lookup
171 */
172 struct page **bio_pages;
5a6ac9ea
MX
173
174 /*
175 * bitmap to record which horizontal stripe has data
176 */
177 unsigned long *dbitmap;
53b381b3
DW
178};
179
180static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
181static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
182static void rmw_work(struct btrfs_work *work);
183static void read_rebuild_work(struct btrfs_work *work);
184static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
185static void async_read_rebuild(struct btrfs_raid_bio *rbio);
186static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
187static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
188static void __free_raid_bio(struct btrfs_raid_bio *rbio);
189static void index_rbio_pages(struct btrfs_raid_bio *rbio);
190static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
191
5a6ac9ea
MX
192static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
193 int need_check);
194static void async_scrub_parity(struct btrfs_raid_bio *rbio);
195
53b381b3
DW
196/*
197 * the stripe hash table is used for locking, and to collect
198 * bios in hopes of making a full stripe
199 */
200int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
201{
202 struct btrfs_stripe_hash_table *table;
203 struct btrfs_stripe_hash_table *x;
204 struct btrfs_stripe_hash *cur;
205 struct btrfs_stripe_hash *h;
206 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
207 int i;
83c8266a 208 int table_size;
53b381b3
DW
209
210 if (info->stripe_hash_table)
211 return 0;
212
83c8266a
DS
213 /*
214 * The table is large, starting with order 4 and can go as high as
215 * order 7 in case lock debugging is turned on.
216 *
217 * Try harder to allocate and fallback to vmalloc to lower the chance
218 * of a failing mount.
219 */
220 table_size = sizeof(*table) + sizeof(*h) * num_entries;
221 table = kzalloc(table_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
222 if (!table) {
223 table = vzalloc(table_size);
224 if (!table)
225 return -ENOMEM;
226 }
53b381b3 227
4ae10b3a
CM
228 spin_lock_init(&table->cache_lock);
229 INIT_LIST_HEAD(&table->stripe_cache);
230
53b381b3
DW
231 h = table->table;
232
233 for (i = 0; i < num_entries; i++) {
234 cur = h + i;
235 INIT_LIST_HEAD(&cur->hash_list);
236 spin_lock_init(&cur->lock);
237 init_waitqueue_head(&cur->wait);
238 }
239
240 x = cmpxchg(&info->stripe_hash_table, NULL, table);
f749303b
WS
241 if (x)
242 kvfree(x);
53b381b3
DW
243 return 0;
244}
245
4ae10b3a
CM
246/*
247 * caching an rbio means to copy anything from the
248 * bio_pages array into the stripe_pages array. We
249 * use the page uptodate bit in the stripe cache array
250 * to indicate if it has valid data
251 *
252 * once the caching is done, we set the cache ready
253 * bit.
254 */
255static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
256{
257 int i;
258 char *s;
259 char *d;
260 int ret;
261
262 ret = alloc_rbio_pages(rbio);
263 if (ret)
264 return;
265
266 for (i = 0; i < rbio->nr_pages; i++) {
267 if (!rbio->bio_pages[i])
268 continue;
269
270 s = kmap(rbio->bio_pages[i]);
271 d = kmap(rbio->stripe_pages[i]);
272
09cbfeaf 273 memcpy(d, s, PAGE_SIZE);
4ae10b3a
CM
274
275 kunmap(rbio->bio_pages[i]);
276 kunmap(rbio->stripe_pages[i]);
277 SetPageUptodate(rbio->stripe_pages[i]);
278 }
279 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
280}
281
53b381b3
DW
282/*
283 * we hash on the first logical address of the stripe
284 */
285static int rbio_bucket(struct btrfs_raid_bio *rbio)
286{
8e5cfb55 287 u64 num = rbio->bbio->raid_map[0];
53b381b3
DW
288
289 /*
290 * we shift down quite a bit. We're using byte
291 * addressing, and most of the lower bits are zeros.
292 * This tends to upset hash_64, and it consistently
293 * returns just one or two different values.
294 *
295 * shifting off the lower bits fixes things.
296 */
297 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
298}
299
4ae10b3a
CM
300/*
301 * stealing an rbio means taking all the uptodate pages from the stripe
302 * array in the source rbio and putting them into the destination rbio
303 */
304static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
305{
306 int i;
307 struct page *s;
308 struct page *d;
309
310 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
311 return;
312
313 for (i = 0; i < dest->nr_pages; i++) {
314 s = src->stripe_pages[i];
315 if (!s || !PageUptodate(s)) {
316 continue;
317 }
318
319 d = dest->stripe_pages[i];
320 if (d)
321 __free_page(d);
322
323 dest->stripe_pages[i] = s;
324 src->stripe_pages[i] = NULL;
325 }
326}
327
53b381b3
DW
328/*
329 * merging means we take the bio_list from the victim and
330 * splice it into the destination. The victim should
331 * be discarded afterwards.
332 *
333 * must be called with dest->rbio_list_lock held
334 */
335static void merge_rbio(struct btrfs_raid_bio *dest,
336 struct btrfs_raid_bio *victim)
337{
338 bio_list_merge(&dest->bio_list, &victim->bio_list);
339 dest->bio_list_bytes += victim->bio_list_bytes;
4245215d 340 dest->generic_bio_cnt += victim->generic_bio_cnt;
53b381b3
DW
341 bio_list_init(&victim->bio_list);
342}
343
344/*
4ae10b3a
CM
345 * used to prune items that are in the cache. The caller
346 * must hold the hash table lock.
347 */
348static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
349{
350 int bucket = rbio_bucket(rbio);
351 struct btrfs_stripe_hash_table *table;
352 struct btrfs_stripe_hash *h;
353 int freeit = 0;
354
355 /*
356 * check the bit again under the hash table lock.
357 */
358 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
359 return;
360
361 table = rbio->fs_info->stripe_hash_table;
362 h = table->table + bucket;
363
364 /* hold the lock for the bucket because we may be
365 * removing it from the hash table
366 */
367 spin_lock(&h->lock);
368
369 /*
370 * hold the lock for the bio list because we need
371 * to make sure the bio list is empty
372 */
373 spin_lock(&rbio->bio_list_lock);
374
375 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
376 list_del_init(&rbio->stripe_cache);
377 table->cache_size -= 1;
378 freeit = 1;
379
380 /* if the bio list isn't empty, this rbio is
381 * still involved in an IO. We take it out
382 * of the cache list, and drop the ref that
383 * was held for the list.
384 *
385 * If the bio_list was empty, we also remove
386 * the rbio from the hash_table, and drop
387 * the corresponding ref
388 */
389 if (bio_list_empty(&rbio->bio_list)) {
390 if (!list_empty(&rbio->hash_list)) {
391 list_del_init(&rbio->hash_list);
392 atomic_dec(&rbio->refs);
393 BUG_ON(!list_empty(&rbio->plug_list));
394 }
395 }
396 }
397
398 spin_unlock(&rbio->bio_list_lock);
399 spin_unlock(&h->lock);
400
401 if (freeit)
402 __free_raid_bio(rbio);
403}
404
405/*
406 * prune a given rbio from the cache
407 */
408static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
409{
410 struct btrfs_stripe_hash_table *table;
411 unsigned long flags;
412
413 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
414 return;
415
416 table = rbio->fs_info->stripe_hash_table;
417
418 spin_lock_irqsave(&table->cache_lock, flags);
419 __remove_rbio_from_cache(rbio);
420 spin_unlock_irqrestore(&table->cache_lock, flags);
421}
422
423/*
424 * remove everything in the cache
425 */
48a3b636 426static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
4ae10b3a
CM
427{
428 struct btrfs_stripe_hash_table *table;
429 unsigned long flags;
430 struct btrfs_raid_bio *rbio;
431
432 table = info->stripe_hash_table;
433
434 spin_lock_irqsave(&table->cache_lock, flags);
435 while (!list_empty(&table->stripe_cache)) {
436 rbio = list_entry(table->stripe_cache.next,
437 struct btrfs_raid_bio,
438 stripe_cache);
439 __remove_rbio_from_cache(rbio);
440 }
441 spin_unlock_irqrestore(&table->cache_lock, flags);
442}
443
444/*
445 * remove all cached entries and free the hash table
446 * used by unmount
53b381b3
DW
447 */
448void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
449{
450 if (!info->stripe_hash_table)
451 return;
4ae10b3a 452 btrfs_clear_rbio_cache(info);
f749303b 453 kvfree(info->stripe_hash_table);
53b381b3
DW
454 info->stripe_hash_table = NULL;
455}
456
4ae10b3a
CM
457/*
458 * insert an rbio into the stripe cache. It
459 * must have already been prepared by calling
460 * cache_rbio_pages
461 *
462 * If this rbio was already cached, it gets
463 * moved to the front of the lru.
464 *
465 * If the size of the rbio cache is too big, we
466 * prune an item.
467 */
468static void cache_rbio(struct btrfs_raid_bio *rbio)
469{
470 struct btrfs_stripe_hash_table *table;
471 unsigned long flags;
472
473 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
474 return;
475
476 table = rbio->fs_info->stripe_hash_table;
477
478 spin_lock_irqsave(&table->cache_lock, flags);
479 spin_lock(&rbio->bio_list_lock);
480
481 /* bump our ref if we were not in the list before */
482 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
483 atomic_inc(&rbio->refs);
484
485 if (!list_empty(&rbio->stripe_cache)){
486 list_move(&rbio->stripe_cache, &table->stripe_cache);
487 } else {
488 list_add(&rbio->stripe_cache, &table->stripe_cache);
489 table->cache_size += 1;
490 }
491
492 spin_unlock(&rbio->bio_list_lock);
493
494 if (table->cache_size > RBIO_CACHE_SIZE) {
495 struct btrfs_raid_bio *found;
496
497 found = list_entry(table->stripe_cache.prev,
498 struct btrfs_raid_bio,
499 stripe_cache);
500
501 if (found != rbio)
502 __remove_rbio_from_cache(found);
503 }
504
505 spin_unlock_irqrestore(&table->cache_lock, flags);
4ae10b3a
CM
506}
507
53b381b3
DW
508/*
509 * helper function to run the xor_blocks api. It is only
510 * able to do MAX_XOR_BLOCKS at a time, so we need to
511 * loop through.
512 */
513static void run_xor(void **pages, int src_cnt, ssize_t len)
514{
515 int src_off = 0;
516 int xor_src_cnt = 0;
517 void *dest = pages[src_cnt];
518
519 while(src_cnt > 0) {
520 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
521 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
522
523 src_cnt -= xor_src_cnt;
524 src_off += xor_src_cnt;
525 }
526}
527
528/*
529 * returns true if the bio list inside this rbio
530 * covers an entire stripe (no rmw required).
531 * Must be called with the bio list lock held, or
532 * at a time when you know it is impossible to add
533 * new bios into the list
534 */
535static int __rbio_is_full(struct btrfs_raid_bio *rbio)
536{
537 unsigned long size = rbio->bio_list_bytes;
538 int ret = 1;
539
540 if (size != rbio->nr_data * rbio->stripe_len)
541 ret = 0;
542
543 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
544 return ret;
545}
546
547static int rbio_is_full(struct btrfs_raid_bio *rbio)
548{
549 unsigned long flags;
550 int ret;
551
552 spin_lock_irqsave(&rbio->bio_list_lock, flags);
553 ret = __rbio_is_full(rbio);
554 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
555 return ret;
556}
557
558/*
559 * returns 1 if it is safe to merge two rbios together.
560 * The merging is safe if the two rbios correspond to
561 * the same stripe and if they are both going in the same
562 * direction (read vs write), and if neither one is
563 * locked for final IO
564 *
565 * The caller is responsible for locking such that
566 * rmw_locked is safe to test
567 */
568static int rbio_can_merge(struct btrfs_raid_bio *last,
569 struct btrfs_raid_bio *cur)
570{
571 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
572 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
573 return 0;
574
4ae10b3a
CM
575 /*
576 * we can't merge with cached rbios, since the
577 * idea is that when we merge the destination
578 * rbio is going to run our IO for us. We can
01327610 579 * steal from cached rbios though, other functions
4ae10b3a
CM
580 * handle that.
581 */
582 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
583 test_bit(RBIO_CACHE_BIT, &cur->flags))
584 return 0;
585
8e5cfb55
ZL
586 if (last->bbio->raid_map[0] !=
587 cur->bbio->raid_map[0])
53b381b3
DW
588 return 0;
589
5a6ac9ea
MX
590 /* we can't merge with different operations */
591 if (last->operation != cur->operation)
592 return 0;
593 /*
594 * We've need read the full stripe from the drive.
595 * check and repair the parity and write the new results.
596 *
597 * We're not allowed to add any new bios to the
598 * bio list here, anyone else that wants to
599 * change this stripe needs to do their own rmw.
600 */
601 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
602 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
53b381b3 603 return 0;
53b381b3 604
b4ee1782
OS
605 if (last->operation == BTRFS_RBIO_REBUILD_MISSING ||
606 cur->operation == BTRFS_RBIO_REBUILD_MISSING)
607 return 0;
608
53b381b3
DW
609 return 1;
610}
611
b7178a5f
ZL
612static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
613 int index)
614{
615 return stripe * rbio->stripe_npages + index;
616}
617
618/*
619 * these are just the pages from the rbio array, not from anything
620 * the FS sent down to us
621 */
622static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
623 int index)
624{
625 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
626}
627
53b381b3
DW
628/*
629 * helper to index into the pstripe
630 */
631static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
632{
b7178a5f 633 return rbio_stripe_page(rbio, rbio->nr_data, index);
53b381b3
DW
634}
635
636/*
637 * helper to index into the qstripe, returns null
638 * if there is no qstripe
639 */
640static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
641{
2c8cdd6e 642 if (rbio->nr_data + 1 == rbio->real_stripes)
53b381b3 643 return NULL;
b7178a5f 644 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
53b381b3
DW
645}
646
647/*
648 * The first stripe in the table for a logical address
649 * has the lock. rbios are added in one of three ways:
650 *
651 * 1) Nobody has the stripe locked yet. The rbio is given
652 * the lock and 0 is returned. The caller must start the IO
653 * themselves.
654 *
655 * 2) Someone has the stripe locked, but we're able to merge
656 * with the lock owner. The rbio is freed and the IO will
657 * start automatically along with the existing rbio. 1 is returned.
658 *
659 * 3) Someone has the stripe locked, but we're not able to merge.
660 * The rbio is added to the lock owner's plug list, or merged into
661 * an rbio already on the plug list. When the lock owner unlocks,
662 * the next rbio on the list is run and the IO is started automatically.
663 * 1 is returned
664 *
665 * If we return 0, the caller still owns the rbio and must continue with
666 * IO submission. If we return 1, the caller must assume the rbio has
667 * already been freed.
668 */
669static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
670{
671 int bucket = rbio_bucket(rbio);
672 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
673 struct btrfs_raid_bio *cur;
674 struct btrfs_raid_bio *pending;
675 unsigned long flags;
676 DEFINE_WAIT(wait);
677 struct btrfs_raid_bio *freeit = NULL;
4ae10b3a 678 struct btrfs_raid_bio *cache_drop = NULL;
53b381b3
DW
679 int ret = 0;
680 int walk = 0;
681
682 spin_lock_irqsave(&h->lock, flags);
683 list_for_each_entry(cur, &h->hash_list, hash_list) {
684 walk++;
8e5cfb55 685 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
53b381b3
DW
686 spin_lock(&cur->bio_list_lock);
687
4ae10b3a
CM
688 /* can we steal this cached rbio's pages? */
689 if (bio_list_empty(&cur->bio_list) &&
690 list_empty(&cur->plug_list) &&
691 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
692 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
693 list_del_init(&cur->hash_list);
694 atomic_dec(&cur->refs);
695
696 steal_rbio(cur, rbio);
697 cache_drop = cur;
698 spin_unlock(&cur->bio_list_lock);
699
700 goto lockit;
701 }
702
53b381b3
DW
703 /* can we merge into the lock owner? */
704 if (rbio_can_merge(cur, rbio)) {
705 merge_rbio(cur, rbio);
706 spin_unlock(&cur->bio_list_lock);
707 freeit = rbio;
708 ret = 1;
709 goto out;
710 }
711
4ae10b3a 712
53b381b3
DW
713 /*
714 * we couldn't merge with the running
715 * rbio, see if we can merge with the
716 * pending ones. We don't have to
717 * check for rmw_locked because there
718 * is no way they are inside finish_rmw
719 * right now
720 */
721 list_for_each_entry(pending, &cur->plug_list,
722 plug_list) {
723 if (rbio_can_merge(pending, rbio)) {
724 merge_rbio(pending, rbio);
725 spin_unlock(&cur->bio_list_lock);
726 freeit = rbio;
727 ret = 1;
728 goto out;
729 }
730 }
731
732 /* no merging, put us on the tail of the plug list,
733 * our rbio will be started with the currently
734 * running rbio unlocks
735 */
736 list_add_tail(&rbio->plug_list, &cur->plug_list);
737 spin_unlock(&cur->bio_list_lock);
738 ret = 1;
739 goto out;
740 }
741 }
4ae10b3a 742lockit:
53b381b3
DW
743 atomic_inc(&rbio->refs);
744 list_add(&rbio->hash_list, &h->hash_list);
745out:
746 spin_unlock_irqrestore(&h->lock, flags);
4ae10b3a
CM
747 if (cache_drop)
748 remove_rbio_from_cache(cache_drop);
53b381b3
DW
749 if (freeit)
750 __free_raid_bio(freeit);
751 return ret;
752}
753
754/*
755 * called as rmw or parity rebuild is completed. If the plug list has more
756 * rbios waiting for this stripe, the next one on the list will be started
757 */
758static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
759{
760 int bucket;
761 struct btrfs_stripe_hash *h;
762 unsigned long flags;
4ae10b3a 763 int keep_cache = 0;
53b381b3
DW
764
765 bucket = rbio_bucket(rbio);
766 h = rbio->fs_info->stripe_hash_table->table + bucket;
767
4ae10b3a
CM
768 if (list_empty(&rbio->plug_list))
769 cache_rbio(rbio);
770
53b381b3
DW
771 spin_lock_irqsave(&h->lock, flags);
772 spin_lock(&rbio->bio_list_lock);
773
774 if (!list_empty(&rbio->hash_list)) {
4ae10b3a
CM
775 /*
776 * if we're still cached and there is no other IO
777 * to perform, just leave this rbio here for others
778 * to steal from later
779 */
780 if (list_empty(&rbio->plug_list) &&
781 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
782 keep_cache = 1;
783 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
784 BUG_ON(!bio_list_empty(&rbio->bio_list));
785 goto done;
786 }
53b381b3
DW
787
788 list_del_init(&rbio->hash_list);
789 atomic_dec(&rbio->refs);
790
791 /*
792 * we use the plug list to hold all the rbios
793 * waiting for the chance to lock this stripe.
794 * hand the lock over to one of them.
795 */
796 if (!list_empty(&rbio->plug_list)) {
797 struct btrfs_raid_bio *next;
798 struct list_head *head = rbio->plug_list.next;
799
800 next = list_entry(head, struct btrfs_raid_bio,
801 plug_list);
802
803 list_del_init(&rbio->plug_list);
804
805 list_add(&next->hash_list, &h->hash_list);
806 atomic_inc(&next->refs);
807 spin_unlock(&rbio->bio_list_lock);
808 spin_unlock_irqrestore(&h->lock, flags);
809
1b94b556 810 if (next->operation == BTRFS_RBIO_READ_REBUILD)
53b381b3 811 async_read_rebuild(next);
b4ee1782
OS
812 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
813 steal_rbio(rbio, next);
814 async_read_rebuild(next);
815 } else if (next->operation == BTRFS_RBIO_WRITE) {
4ae10b3a 816 steal_rbio(rbio, next);
53b381b3 817 async_rmw_stripe(next);
5a6ac9ea
MX
818 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
819 steal_rbio(rbio, next);
820 async_scrub_parity(next);
4ae10b3a 821 }
53b381b3
DW
822
823 goto done_nolock;
33a9eca7
DS
824 /*
825 * The barrier for this waitqueue_active is not needed,
826 * we're protected by h->lock and can't miss a wakeup.
827 */
828 } else if (waitqueue_active(&h->wait)) {
53b381b3
DW
829 spin_unlock(&rbio->bio_list_lock);
830 spin_unlock_irqrestore(&h->lock, flags);
831 wake_up(&h->wait);
832 goto done_nolock;
833 }
834 }
4ae10b3a 835done:
53b381b3
DW
836 spin_unlock(&rbio->bio_list_lock);
837 spin_unlock_irqrestore(&h->lock, flags);
838
839done_nolock:
4ae10b3a
CM
840 if (!keep_cache)
841 remove_rbio_from_cache(rbio);
53b381b3
DW
842}
843
844static void __free_raid_bio(struct btrfs_raid_bio *rbio)
845{
846 int i;
847
848 WARN_ON(atomic_read(&rbio->refs) < 0);
849 if (!atomic_dec_and_test(&rbio->refs))
850 return;
851
4ae10b3a 852 WARN_ON(!list_empty(&rbio->stripe_cache));
53b381b3
DW
853 WARN_ON(!list_empty(&rbio->hash_list));
854 WARN_ON(!bio_list_empty(&rbio->bio_list));
855
856 for (i = 0; i < rbio->nr_pages; i++) {
857 if (rbio->stripe_pages[i]) {
858 __free_page(rbio->stripe_pages[i]);
859 rbio->stripe_pages[i] = NULL;
860 }
861 }
af8e2d1d 862
6e9606d2 863 btrfs_put_bbio(rbio->bbio);
53b381b3
DW
864 kfree(rbio);
865}
866
867static void free_raid_bio(struct btrfs_raid_bio *rbio)
868{
869 unlock_stripe(rbio);
870 __free_raid_bio(rbio);
871}
872
873/*
874 * this frees the rbio and runs through all the bios in the
875 * bio_list and calls end_io on them
876 */
4246a0b6 877static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err)
53b381b3
DW
878{
879 struct bio *cur = bio_list_get(&rbio->bio_list);
880 struct bio *next;
4245215d
MX
881
882 if (rbio->generic_bio_cnt)
883 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
884
53b381b3
DW
885 free_raid_bio(rbio);
886
887 while (cur) {
888 next = cur->bi_next;
889 cur->bi_next = NULL;
4246a0b6
CH
890 cur->bi_error = err;
891 bio_endio(cur);
53b381b3
DW
892 cur = next;
893 }
894}
895
896/*
897 * end io function used by finish_rmw. When we finally
898 * get here, we've written a full stripe
899 */
4246a0b6 900static void raid_write_end_io(struct bio *bio)
53b381b3
DW
901{
902 struct btrfs_raid_bio *rbio = bio->bi_private;
4246a0b6 903 int err = bio->bi_error;
a6111d11 904 int max_errors;
53b381b3
DW
905
906 if (err)
907 fail_bio_stripe(rbio, bio);
908
909 bio_put(bio);
910
b89e1b01 911 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
912 return;
913
914 err = 0;
915
916 /* OK, we have read all the stripes we need to. */
a6111d11
ZL
917 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
918 0 : rbio->bbio->max_errors;
919 if (atomic_read(&rbio->error) > max_errors)
53b381b3
DW
920 err = -EIO;
921
4246a0b6 922 rbio_orig_end_io(rbio, err);
53b381b3
DW
923}
924
925/*
926 * the read/modify/write code wants to use the original bio for
927 * any pages it included, and then use the rbio for everything
928 * else. This function decides if a given index (stripe number)
929 * and page number in that stripe fall inside the original bio
930 * or the rbio.
931 *
932 * if you set bio_list_only, you'll get a NULL back for any ranges
933 * that are outside the bio_list
934 *
935 * This doesn't take any refs on anything, you get a bare page pointer
936 * and the caller must bump refs as required.
937 *
938 * You must call index_rbio_pages once before you can trust
939 * the answers from this function.
940 */
941static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
942 int index, int pagenr, int bio_list_only)
943{
944 int chunk_page;
945 struct page *p = NULL;
946
947 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
948
949 spin_lock_irq(&rbio->bio_list_lock);
950 p = rbio->bio_pages[chunk_page];
951 spin_unlock_irq(&rbio->bio_list_lock);
952
953 if (p || bio_list_only)
954 return p;
955
956 return rbio->stripe_pages[chunk_page];
957}
958
959/*
960 * number of pages we need for the entire stripe across all the
961 * drives
962 */
963static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
964{
09cbfeaf 965 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
53b381b3
DW
966}
967
968/*
969 * allocation and initial setup for the btrfs_raid_bio. Not
970 * this does not allocate any pages for rbio->pages.
971 */
972static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
8e5cfb55 973 struct btrfs_bio *bbio, u64 stripe_len)
53b381b3
DW
974{
975 struct btrfs_raid_bio *rbio;
976 int nr_data = 0;
2c8cdd6e
MX
977 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
978 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
5a6ac9ea 979 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
53b381b3
DW
980 void *p;
981
5a6ac9ea 982 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
bfca9a6d
ZL
983 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) *
984 sizeof(long), GFP_NOFS);
af8e2d1d 985 if (!rbio)
53b381b3 986 return ERR_PTR(-ENOMEM);
53b381b3
DW
987
988 bio_list_init(&rbio->bio_list);
989 INIT_LIST_HEAD(&rbio->plug_list);
990 spin_lock_init(&rbio->bio_list_lock);
4ae10b3a 991 INIT_LIST_HEAD(&rbio->stripe_cache);
53b381b3
DW
992 INIT_LIST_HEAD(&rbio->hash_list);
993 rbio->bbio = bbio;
53b381b3
DW
994 rbio->fs_info = root->fs_info;
995 rbio->stripe_len = stripe_len;
996 rbio->nr_pages = num_pages;
2c8cdd6e 997 rbio->real_stripes = real_stripes;
5a6ac9ea 998 rbio->stripe_npages = stripe_npages;
53b381b3
DW
999 rbio->faila = -1;
1000 rbio->failb = -1;
1001 atomic_set(&rbio->refs, 1);
b89e1b01
MX
1002 atomic_set(&rbio->error, 0);
1003 atomic_set(&rbio->stripes_pending, 0);
53b381b3
DW
1004
1005 /*
1006 * the stripe_pages and bio_pages array point to the extra
1007 * memory we allocated past the end of the rbio
1008 */
1009 p = rbio + 1;
1010 rbio->stripe_pages = p;
1011 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
5a6ac9ea 1012 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
53b381b3 1013
10f11900
ZL
1014 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1015 nr_data = real_stripes - 1;
1016 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
2c8cdd6e 1017 nr_data = real_stripes - 2;
53b381b3 1018 else
10f11900 1019 BUG();
53b381b3
DW
1020
1021 rbio->nr_data = nr_data;
1022 return rbio;
1023}
1024
1025/* allocate pages for all the stripes in the bio, including parity */
1026static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1027{
1028 int i;
1029 struct page *page;
1030
1031 for (i = 0; i < rbio->nr_pages; i++) {
1032 if (rbio->stripe_pages[i])
1033 continue;
1034 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1035 if (!page)
1036 return -ENOMEM;
1037 rbio->stripe_pages[i] = page;
53b381b3
DW
1038 }
1039 return 0;
1040}
1041
b7178a5f 1042/* only allocate pages for p/q stripes */
53b381b3
DW
1043static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1044{
1045 int i;
1046 struct page *page;
1047
b7178a5f 1048 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
53b381b3
DW
1049
1050 for (; i < rbio->nr_pages; i++) {
1051 if (rbio->stripe_pages[i])
1052 continue;
1053 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1054 if (!page)
1055 return -ENOMEM;
1056 rbio->stripe_pages[i] = page;
1057 }
1058 return 0;
1059}
1060
1061/*
1062 * add a single page from a specific stripe into our list of bios for IO
1063 * this will try to merge into existing bios if possible, and returns
1064 * zero if all went well.
1065 */
48a3b636
ES
1066static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1067 struct bio_list *bio_list,
1068 struct page *page,
1069 int stripe_nr,
1070 unsigned long page_index,
1071 unsigned long bio_max_len)
53b381b3
DW
1072{
1073 struct bio *last = bio_list->tail;
1074 u64 last_end = 0;
1075 int ret;
1076 struct bio *bio;
1077 struct btrfs_bio_stripe *stripe;
1078 u64 disk_start;
1079
1080 stripe = &rbio->bbio->stripes[stripe_nr];
09cbfeaf 1081 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
53b381b3
DW
1082
1083 /* if the device is missing, just fail this stripe */
1084 if (!stripe->dev->bdev)
1085 return fail_rbio_index(rbio, stripe_nr);
1086
1087 /* see if we can add this page onto our existing bio */
1088 if (last) {
4f024f37
KO
1089 last_end = (u64)last->bi_iter.bi_sector << 9;
1090 last_end += last->bi_iter.bi_size;
53b381b3
DW
1091
1092 /*
1093 * we can't merge these if they are from different
1094 * devices or if they are not contiguous
1095 */
1096 if (last_end == disk_start && stripe->dev->bdev &&
4246a0b6 1097 !last->bi_error &&
53b381b3 1098 last->bi_bdev == stripe->dev->bdev) {
09cbfeaf
KS
1099 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1100 if (ret == PAGE_SIZE)
53b381b3
DW
1101 return 0;
1102 }
1103 }
1104
1105 /* put a new bio on the list */
9be3395b 1106 bio = btrfs_io_bio_alloc(GFP_NOFS, bio_max_len >> PAGE_SHIFT?:1);
53b381b3
DW
1107 if (!bio)
1108 return -ENOMEM;
1109
4f024f37 1110 bio->bi_iter.bi_size = 0;
53b381b3 1111 bio->bi_bdev = stripe->dev->bdev;
4f024f37 1112 bio->bi_iter.bi_sector = disk_start >> 9;
53b381b3 1113
09cbfeaf 1114 bio_add_page(bio, page, PAGE_SIZE, 0);
53b381b3
DW
1115 bio_list_add(bio_list, bio);
1116 return 0;
1117}
1118
1119/*
1120 * while we're doing the read/modify/write cycle, we could
1121 * have errors in reading pages off the disk. This checks
1122 * for errors and if we're not able to read the page it'll
1123 * trigger parity reconstruction. The rmw will be finished
1124 * after we've reconstructed the failed stripes
1125 */
1126static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1127{
1128 if (rbio->faila >= 0 || rbio->failb >= 0) {
2c8cdd6e 1129 BUG_ON(rbio->faila == rbio->real_stripes - 1);
53b381b3
DW
1130 __raid56_parity_recover(rbio);
1131 } else {
1132 finish_rmw(rbio);
1133 }
1134}
1135
53b381b3
DW
1136/*
1137 * helper function to walk our bio list and populate the bio_pages array with
1138 * the result. This seems expensive, but it is faster than constantly
1139 * searching through the bio list as we setup the IO in finish_rmw or stripe
1140 * reconstruction.
1141 *
1142 * This must be called before you trust the answers from page_in_rbio
1143 */
1144static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1145{
1146 struct bio *bio;
80ace3e4 1147 struct bio_vec *bvec;
53b381b3
DW
1148 u64 start;
1149 unsigned long stripe_offset;
1150 unsigned long page_index;
53b381b3
DW
1151 int i;
1152
1153 spin_lock_irq(&rbio->bio_list_lock);
1154 bio_list_for_each(bio, &rbio->bio_list) {
4f024f37 1155 start = (u64)bio->bi_iter.bi_sector << 9;
8e5cfb55 1156 stripe_offset = start - rbio->bbio->raid_map[0];
09cbfeaf 1157 page_index = stripe_offset >> PAGE_SHIFT;
53b381b3 1158
80ace3e4
CH
1159 bio_for_each_segment_all(bvec, bio, i)
1160 rbio->bio_pages[page_index + i] = bvec->bv_page;
53b381b3
DW
1161 }
1162 spin_unlock_irq(&rbio->bio_list_lock);
1163}
1164
1165/*
1166 * this is called from one of two situations. We either
1167 * have a full stripe from the higher layers, or we've read all
1168 * the missing bits off disk.
1169 *
1170 * This will calculate the parity and then send down any
1171 * changed blocks.
1172 */
1173static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1174{
1175 struct btrfs_bio *bbio = rbio->bbio;
2c8cdd6e 1176 void *pointers[rbio->real_stripes];
53b381b3
DW
1177 int nr_data = rbio->nr_data;
1178 int stripe;
1179 int pagenr;
1180 int p_stripe = -1;
1181 int q_stripe = -1;
1182 struct bio_list bio_list;
1183 struct bio *bio;
53b381b3
DW
1184 int ret;
1185
1186 bio_list_init(&bio_list);
1187
2c8cdd6e
MX
1188 if (rbio->real_stripes - rbio->nr_data == 1) {
1189 p_stripe = rbio->real_stripes - 1;
1190 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1191 p_stripe = rbio->real_stripes - 2;
1192 q_stripe = rbio->real_stripes - 1;
53b381b3
DW
1193 } else {
1194 BUG();
1195 }
1196
1197 /* at this point we either have a full stripe,
1198 * or we've read the full stripe from the drive.
1199 * recalculate the parity and write the new results.
1200 *
1201 * We're not allowed to add any new bios to the
1202 * bio list here, anyone else that wants to
1203 * change this stripe needs to do their own rmw.
1204 */
1205 spin_lock_irq(&rbio->bio_list_lock);
1206 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1207 spin_unlock_irq(&rbio->bio_list_lock);
1208
b89e1b01 1209 atomic_set(&rbio->error, 0);
53b381b3
DW
1210
1211 /*
1212 * now that we've set rmw_locked, run through the
1213 * bio list one last time and map the page pointers
4ae10b3a
CM
1214 *
1215 * We don't cache full rbios because we're assuming
1216 * the higher layers are unlikely to use this area of
1217 * the disk again soon. If they do use it again,
1218 * hopefully they will send another full bio.
53b381b3
DW
1219 */
1220 index_rbio_pages(rbio);
4ae10b3a
CM
1221 if (!rbio_is_full(rbio))
1222 cache_rbio_pages(rbio);
1223 else
1224 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
53b381b3 1225
915e2290 1226 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1227 struct page *p;
1228 /* first collect one page from each data stripe */
1229 for (stripe = 0; stripe < nr_data; stripe++) {
1230 p = page_in_rbio(rbio, stripe, pagenr, 0);
1231 pointers[stripe] = kmap(p);
1232 }
1233
1234 /* then add the parity stripe */
1235 p = rbio_pstripe_page(rbio, pagenr);
1236 SetPageUptodate(p);
1237 pointers[stripe++] = kmap(p);
1238
1239 if (q_stripe != -1) {
1240
1241 /*
1242 * raid6, add the qstripe and call the
1243 * library function to fill in our p/q
1244 */
1245 p = rbio_qstripe_page(rbio, pagenr);
1246 SetPageUptodate(p);
1247 pointers[stripe++] = kmap(p);
1248
2c8cdd6e 1249 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
53b381b3
DW
1250 pointers);
1251 } else {
1252 /* raid5 */
1253 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
09cbfeaf 1254 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
53b381b3
DW
1255 }
1256
1257
2c8cdd6e 1258 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
53b381b3
DW
1259 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1260 }
1261
1262 /*
1263 * time to start writing. Make bios for everything from the
1264 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1265 * everything else.
1266 */
2c8cdd6e 1267 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
915e2290 1268 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1269 struct page *page;
1270 if (stripe < rbio->nr_data) {
1271 page = page_in_rbio(rbio, stripe, pagenr, 1);
1272 if (!page)
1273 continue;
1274 } else {
1275 page = rbio_stripe_page(rbio, stripe, pagenr);
1276 }
1277
1278 ret = rbio_add_io_page(rbio, &bio_list,
1279 page, stripe, pagenr, rbio->stripe_len);
1280 if (ret)
1281 goto cleanup;
1282 }
1283 }
1284
2c8cdd6e
MX
1285 if (likely(!bbio->num_tgtdevs))
1286 goto write_data;
1287
1288 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1289 if (!bbio->tgtdev_map[stripe])
1290 continue;
1291
915e2290 1292 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2c8cdd6e
MX
1293 struct page *page;
1294 if (stripe < rbio->nr_data) {
1295 page = page_in_rbio(rbio, stripe, pagenr, 1);
1296 if (!page)
1297 continue;
1298 } else {
1299 page = rbio_stripe_page(rbio, stripe, pagenr);
1300 }
1301
1302 ret = rbio_add_io_page(rbio, &bio_list, page,
1303 rbio->bbio->tgtdev_map[stripe],
1304 pagenr, rbio->stripe_len);
1305 if (ret)
1306 goto cleanup;
1307 }
1308 }
1309
1310write_data:
b89e1b01
MX
1311 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1312 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
53b381b3
DW
1313
1314 while (1) {
1315 bio = bio_list_pop(&bio_list);
1316 if (!bio)
1317 break;
1318
1319 bio->bi_private = rbio;
1320 bio->bi_end_io = raid_write_end_io;
37226b21 1321 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4e49ea4a
MC
1322
1323 submit_bio(bio);
53b381b3
DW
1324 }
1325 return;
1326
1327cleanup:
4246a0b6 1328 rbio_orig_end_io(rbio, -EIO);
53b381b3
DW
1329}
1330
1331/*
1332 * helper to find the stripe number for a given bio. Used to figure out which
1333 * stripe has failed. This expects the bio to correspond to a physical disk,
1334 * so it looks up based on physical sector numbers.
1335 */
1336static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1337 struct bio *bio)
1338{
4f024f37 1339 u64 physical = bio->bi_iter.bi_sector;
53b381b3
DW
1340 u64 stripe_start;
1341 int i;
1342 struct btrfs_bio_stripe *stripe;
1343
1344 physical <<= 9;
1345
1346 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1347 stripe = &rbio->bbio->stripes[i];
1348 stripe_start = stripe->physical;
1349 if (physical >= stripe_start &&
2c8cdd6e
MX
1350 physical < stripe_start + rbio->stripe_len &&
1351 bio->bi_bdev == stripe->dev->bdev) {
53b381b3
DW
1352 return i;
1353 }
1354 }
1355 return -1;
1356}
1357
1358/*
1359 * helper to find the stripe number for a given
1360 * bio (before mapping). Used to figure out which stripe has
1361 * failed. This looks up based on logical block numbers.
1362 */
1363static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1364 struct bio *bio)
1365{
4f024f37 1366 u64 logical = bio->bi_iter.bi_sector;
53b381b3
DW
1367 u64 stripe_start;
1368 int i;
1369
1370 logical <<= 9;
1371
1372 for (i = 0; i < rbio->nr_data; i++) {
8e5cfb55 1373 stripe_start = rbio->bbio->raid_map[i];
53b381b3
DW
1374 if (logical >= stripe_start &&
1375 logical < stripe_start + rbio->stripe_len) {
1376 return i;
1377 }
1378 }
1379 return -1;
1380}
1381
1382/*
1383 * returns -EIO if we had too many failures
1384 */
1385static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1386{
1387 unsigned long flags;
1388 int ret = 0;
1389
1390 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1391
1392 /* we already know this stripe is bad, move on */
1393 if (rbio->faila == failed || rbio->failb == failed)
1394 goto out;
1395
1396 if (rbio->faila == -1) {
1397 /* first failure on this rbio */
1398 rbio->faila = failed;
b89e1b01 1399 atomic_inc(&rbio->error);
53b381b3
DW
1400 } else if (rbio->failb == -1) {
1401 /* second failure on this rbio */
1402 rbio->failb = failed;
b89e1b01 1403 atomic_inc(&rbio->error);
53b381b3
DW
1404 } else {
1405 ret = -EIO;
1406 }
1407out:
1408 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1409
1410 return ret;
1411}
1412
1413/*
1414 * helper to fail a stripe based on a physical disk
1415 * bio.
1416 */
1417static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1418 struct bio *bio)
1419{
1420 int failed = find_bio_stripe(rbio, bio);
1421
1422 if (failed < 0)
1423 return -EIO;
1424
1425 return fail_rbio_index(rbio, failed);
1426}
1427
1428/*
1429 * this sets each page in the bio uptodate. It should only be used on private
1430 * rbio pages, nothing that comes in from the higher layers
1431 */
1432static void set_bio_pages_uptodate(struct bio *bio)
1433{
80ace3e4 1434 struct bio_vec *bvec;
53b381b3 1435 int i;
53b381b3 1436
80ace3e4
CH
1437 bio_for_each_segment_all(bvec, bio, i)
1438 SetPageUptodate(bvec->bv_page);
53b381b3
DW
1439}
1440
1441/*
1442 * end io for the read phase of the rmw cycle. All the bios here are physical
1443 * stripe bios we've read from the disk so we can recalculate the parity of the
1444 * stripe.
1445 *
1446 * This will usually kick off finish_rmw once all the bios are read in, but it
1447 * may trigger parity reconstruction if we had any errors along the way
1448 */
4246a0b6 1449static void raid_rmw_end_io(struct bio *bio)
53b381b3
DW
1450{
1451 struct btrfs_raid_bio *rbio = bio->bi_private;
1452
4246a0b6 1453 if (bio->bi_error)
53b381b3
DW
1454 fail_bio_stripe(rbio, bio);
1455 else
1456 set_bio_pages_uptodate(bio);
1457
1458 bio_put(bio);
1459
b89e1b01 1460 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
1461 return;
1462
b89e1b01 1463 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
53b381b3
DW
1464 goto cleanup;
1465
1466 /*
1467 * this will normally call finish_rmw to start our write
1468 * but if there are any failed stripes we'll reconstruct
1469 * from parity first
1470 */
1471 validate_rbio_for_rmw(rbio);
1472 return;
1473
1474cleanup:
1475
4246a0b6 1476 rbio_orig_end_io(rbio, -EIO);
53b381b3
DW
1477}
1478
1479static void async_rmw_stripe(struct btrfs_raid_bio *rbio)
1480{
9e0af237
LB
1481 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1482 rmw_work, NULL, NULL);
53b381b3 1483
d05a33ac
QW
1484 btrfs_queue_work(rbio->fs_info->rmw_workers,
1485 &rbio->work);
53b381b3
DW
1486}
1487
1488static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1489{
9e0af237
LB
1490 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
1491 read_rebuild_work, NULL, NULL);
53b381b3 1492
d05a33ac
QW
1493 btrfs_queue_work(rbio->fs_info->rmw_workers,
1494 &rbio->work);
53b381b3
DW
1495}
1496
1497/*
1498 * the stripe must be locked by the caller. It will
1499 * unlock after all the writes are done
1500 */
1501static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1502{
1503 int bios_to_read = 0;
53b381b3
DW
1504 struct bio_list bio_list;
1505 int ret;
53b381b3
DW
1506 int pagenr;
1507 int stripe;
1508 struct bio *bio;
1509
1510 bio_list_init(&bio_list);
1511
1512 ret = alloc_rbio_pages(rbio);
1513 if (ret)
1514 goto cleanup;
1515
1516 index_rbio_pages(rbio);
1517
b89e1b01 1518 atomic_set(&rbio->error, 0);
53b381b3
DW
1519 /*
1520 * build a list of bios to read all the missing parts of this
1521 * stripe
1522 */
1523 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
915e2290 1524 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
1525 struct page *page;
1526 /*
1527 * we want to find all the pages missing from
1528 * the rbio and read them from the disk. If
1529 * page_in_rbio finds a page in the bio list
1530 * we don't need to read it off the stripe.
1531 */
1532 page = page_in_rbio(rbio, stripe, pagenr, 1);
1533 if (page)
1534 continue;
1535
1536 page = rbio_stripe_page(rbio, stripe, pagenr);
4ae10b3a
CM
1537 /*
1538 * the bio cache may have handed us an uptodate
1539 * page. If so, be happy and use it
1540 */
1541 if (PageUptodate(page))
1542 continue;
1543
53b381b3
DW
1544 ret = rbio_add_io_page(rbio, &bio_list, page,
1545 stripe, pagenr, rbio->stripe_len);
1546 if (ret)
1547 goto cleanup;
1548 }
1549 }
1550
1551 bios_to_read = bio_list_size(&bio_list);
1552 if (!bios_to_read) {
1553 /*
1554 * this can happen if others have merged with
1555 * us, it means there is nothing left to read.
1556 * But if there are missing devices it may not be
1557 * safe to do the full stripe write yet.
1558 */
1559 goto finish;
1560 }
1561
1562 /*
1563 * the bbio may be freed once we submit the last bio. Make sure
1564 * not to touch it after that
1565 */
b89e1b01 1566 atomic_set(&rbio->stripes_pending, bios_to_read);
53b381b3
DW
1567 while (1) {
1568 bio = bio_list_pop(&bio_list);
1569 if (!bio)
1570 break;
1571
1572 bio->bi_private = rbio;
1573 bio->bi_end_io = raid_rmw_end_io;
37226b21 1574 bio_set_op_attrs(bio, REQ_OP_READ, 0);
53b381b3
DW
1575
1576 btrfs_bio_wq_end_io(rbio->fs_info, bio,
1577 BTRFS_WQ_ENDIO_RAID56);
1578
4e49ea4a 1579 submit_bio(bio);
53b381b3
DW
1580 }
1581 /* the actual write will happen once the reads are done */
1582 return 0;
1583
1584cleanup:
4246a0b6 1585 rbio_orig_end_io(rbio, -EIO);
53b381b3
DW
1586 return -EIO;
1587
1588finish:
1589 validate_rbio_for_rmw(rbio);
1590 return 0;
1591}
1592
1593/*
1594 * if the upper layers pass in a full stripe, we thank them by only allocating
1595 * enough pages to hold the parity, and sending it all down quickly.
1596 */
1597static int full_stripe_write(struct btrfs_raid_bio *rbio)
1598{
1599 int ret;
1600
1601 ret = alloc_rbio_parity_pages(rbio);
3cd846d1
MX
1602 if (ret) {
1603 __free_raid_bio(rbio);
53b381b3 1604 return ret;
3cd846d1 1605 }
53b381b3
DW
1606
1607 ret = lock_stripe_add(rbio);
1608 if (ret == 0)
1609 finish_rmw(rbio);
1610 return 0;
1611}
1612
1613/*
1614 * partial stripe writes get handed over to async helpers.
1615 * We're really hoping to merge a few more writes into this
1616 * rbio before calculating new parity
1617 */
1618static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1619{
1620 int ret;
1621
1622 ret = lock_stripe_add(rbio);
1623 if (ret == 0)
1624 async_rmw_stripe(rbio);
1625 return 0;
1626}
1627
1628/*
1629 * sometimes while we were reading from the drive to
1630 * recalculate parity, enough new bios come into create
1631 * a full stripe. So we do a check here to see if we can
1632 * go directly to finish_rmw
1633 */
1634static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1635{
1636 /* head off into rmw land if we don't have a full stripe */
1637 if (!rbio_is_full(rbio))
1638 return partial_stripe_write(rbio);
1639 return full_stripe_write(rbio);
1640}
1641
6ac0f488
CM
1642/*
1643 * We use plugging call backs to collect full stripes.
1644 * Any time we get a partial stripe write while plugged
1645 * we collect it into a list. When the unplug comes down,
1646 * we sort the list by logical block number and merge
1647 * everything we can into the same rbios
1648 */
1649struct btrfs_plug_cb {
1650 struct blk_plug_cb cb;
1651 struct btrfs_fs_info *info;
1652 struct list_head rbio_list;
1653 struct btrfs_work work;
1654};
1655
1656/*
1657 * rbios on the plug list are sorted for easier merging.
1658 */
1659static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1660{
1661 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1662 plug_list);
1663 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1664 plug_list);
4f024f37
KO
1665 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1666 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
6ac0f488
CM
1667
1668 if (a_sector < b_sector)
1669 return -1;
1670 if (a_sector > b_sector)
1671 return 1;
1672 return 0;
1673}
1674
1675static void run_plug(struct btrfs_plug_cb *plug)
1676{
1677 struct btrfs_raid_bio *cur;
1678 struct btrfs_raid_bio *last = NULL;
1679
1680 /*
1681 * sort our plug list then try to merge
1682 * everything we can in hopes of creating full
1683 * stripes.
1684 */
1685 list_sort(NULL, &plug->rbio_list, plug_cmp);
1686 while (!list_empty(&plug->rbio_list)) {
1687 cur = list_entry(plug->rbio_list.next,
1688 struct btrfs_raid_bio, plug_list);
1689 list_del_init(&cur->plug_list);
1690
1691 if (rbio_is_full(cur)) {
1692 /* we have a full stripe, send it down */
1693 full_stripe_write(cur);
1694 continue;
1695 }
1696 if (last) {
1697 if (rbio_can_merge(last, cur)) {
1698 merge_rbio(last, cur);
1699 __free_raid_bio(cur);
1700 continue;
1701
1702 }
1703 __raid56_parity_write(last);
1704 }
1705 last = cur;
1706 }
1707 if (last) {
1708 __raid56_parity_write(last);
1709 }
1710 kfree(plug);
1711}
1712
1713/*
1714 * if the unplug comes from schedule, we have to push the
1715 * work off to a helper thread
1716 */
1717static void unplug_work(struct btrfs_work *work)
1718{
1719 struct btrfs_plug_cb *plug;
1720 plug = container_of(work, struct btrfs_plug_cb, work);
1721 run_plug(plug);
1722}
1723
1724static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1725{
1726 struct btrfs_plug_cb *plug;
1727 plug = container_of(cb, struct btrfs_plug_cb, cb);
1728
1729 if (from_schedule) {
9e0af237
LB
1730 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1731 unplug_work, NULL, NULL);
d05a33ac
QW
1732 btrfs_queue_work(plug->info->rmw_workers,
1733 &plug->work);
6ac0f488
CM
1734 return;
1735 }
1736 run_plug(plug);
1737}
1738
53b381b3
DW
1739/*
1740 * our main entry point for writes from the rest of the FS.
1741 */
1742int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
8e5cfb55 1743 struct btrfs_bio *bbio, u64 stripe_len)
53b381b3
DW
1744{
1745 struct btrfs_raid_bio *rbio;
6ac0f488
CM
1746 struct btrfs_plug_cb *plug = NULL;
1747 struct blk_plug_cb *cb;
4245215d 1748 int ret;
53b381b3 1749
8e5cfb55 1750 rbio = alloc_rbio(root, bbio, stripe_len);
af8e2d1d 1751 if (IS_ERR(rbio)) {
6e9606d2 1752 btrfs_put_bbio(bbio);
53b381b3 1753 return PTR_ERR(rbio);
af8e2d1d 1754 }
53b381b3 1755 bio_list_add(&rbio->bio_list, bio);
4f024f37 1756 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1b94b556 1757 rbio->operation = BTRFS_RBIO_WRITE;
6ac0f488 1758
4245215d
MX
1759 btrfs_bio_counter_inc_noblocked(root->fs_info);
1760 rbio->generic_bio_cnt = 1;
1761
6ac0f488
CM
1762 /*
1763 * don't plug on full rbios, just get them out the door
1764 * as quickly as we can
1765 */
4245215d
MX
1766 if (rbio_is_full(rbio)) {
1767 ret = full_stripe_write(rbio);
1768 if (ret)
1769 btrfs_bio_counter_dec(root->fs_info);
1770 return ret;
1771 }
6ac0f488
CM
1772
1773 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1774 sizeof(*plug));
1775 if (cb) {
1776 plug = container_of(cb, struct btrfs_plug_cb, cb);
1777 if (!plug->info) {
1778 plug->info = root->fs_info;
1779 INIT_LIST_HEAD(&plug->rbio_list);
1780 }
1781 list_add_tail(&rbio->plug_list, &plug->rbio_list);
4245215d 1782 ret = 0;
6ac0f488 1783 } else {
4245215d
MX
1784 ret = __raid56_parity_write(rbio);
1785 if (ret)
1786 btrfs_bio_counter_dec(root->fs_info);
6ac0f488 1787 }
4245215d 1788 return ret;
53b381b3
DW
1789}
1790
1791/*
1792 * all parity reconstruction happens here. We've read in everything
1793 * we can find from the drives and this does the heavy lifting of
1794 * sorting the good from the bad.
1795 */
1796static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1797{
1798 int pagenr, stripe;
1799 void **pointers;
1800 int faila = -1, failb = -1;
53b381b3
DW
1801 struct page *page;
1802 int err;
1803 int i;
1804
31e818fe 1805 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
53b381b3
DW
1806 if (!pointers) {
1807 err = -ENOMEM;
1808 goto cleanup_io;
1809 }
1810
1811 faila = rbio->faila;
1812 failb = rbio->failb;
1813
b4ee1782
OS
1814 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1815 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
53b381b3
DW
1816 spin_lock_irq(&rbio->bio_list_lock);
1817 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1818 spin_unlock_irq(&rbio->bio_list_lock);
1819 }
1820
1821 index_rbio_pages(rbio);
1822
915e2290 1823 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
5a6ac9ea
MX
1824 /*
1825 * Now we just use bitmap to mark the horizontal stripes in
1826 * which we have data when doing parity scrub.
1827 */
1828 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1829 !test_bit(pagenr, rbio->dbitmap))
1830 continue;
1831
53b381b3
DW
1832 /* setup our array of pointers with pages
1833 * from each stripe
1834 */
2c8cdd6e 1835 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
53b381b3
DW
1836 /*
1837 * if we're rebuilding a read, we have to use
1838 * pages from the bio list
1839 */
b4ee1782
OS
1840 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1841 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
53b381b3
DW
1842 (stripe == faila || stripe == failb)) {
1843 page = page_in_rbio(rbio, stripe, pagenr, 0);
1844 } else {
1845 page = rbio_stripe_page(rbio, stripe, pagenr);
1846 }
1847 pointers[stripe] = kmap(page);
1848 }
1849
1850 /* all raid6 handling here */
10f11900 1851 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
53b381b3
DW
1852 /*
1853 * single failure, rebuild from parity raid5
1854 * style
1855 */
1856 if (failb < 0) {
1857 if (faila == rbio->nr_data) {
1858 /*
1859 * Just the P stripe has failed, without
1860 * a bad data or Q stripe.
1861 * TODO, we should redo the xor here.
1862 */
1863 err = -EIO;
1864 goto cleanup;
1865 }
1866 /*
1867 * a single failure in raid6 is rebuilt
1868 * in the pstripe code below
1869 */
1870 goto pstripe;
1871 }
1872
1873 /* make sure our ps and qs are in order */
1874 if (faila > failb) {
1875 int tmp = failb;
1876 failb = faila;
1877 faila = tmp;
1878 }
1879
1880 /* if the q stripe is failed, do a pstripe reconstruction
1881 * from the xors.
1882 * If both the q stripe and the P stripe are failed, we're
1883 * here due to a crc mismatch and we can't give them the
1884 * data they want
1885 */
8e5cfb55
ZL
1886 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1887 if (rbio->bbio->raid_map[faila] ==
1888 RAID5_P_STRIPE) {
53b381b3
DW
1889 err = -EIO;
1890 goto cleanup;
1891 }
1892 /*
1893 * otherwise we have one bad data stripe and
1894 * a good P stripe. raid5!
1895 */
1896 goto pstripe;
1897 }
1898
8e5cfb55 1899 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
2c8cdd6e 1900 raid6_datap_recov(rbio->real_stripes,
53b381b3
DW
1901 PAGE_SIZE, faila, pointers);
1902 } else {
2c8cdd6e 1903 raid6_2data_recov(rbio->real_stripes,
53b381b3
DW
1904 PAGE_SIZE, faila, failb,
1905 pointers);
1906 }
1907 } else {
1908 void *p;
1909
1910 /* rebuild from P stripe here (raid5 or raid6) */
1911 BUG_ON(failb != -1);
1912pstripe:
1913 /* Copy parity block into failed block to start with */
1914 memcpy(pointers[faila],
1915 pointers[rbio->nr_data],
09cbfeaf 1916 PAGE_SIZE);
53b381b3
DW
1917
1918 /* rearrange the pointer array */
1919 p = pointers[faila];
1920 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1921 pointers[stripe] = pointers[stripe + 1];
1922 pointers[rbio->nr_data - 1] = p;
1923
1924 /* xor in the rest */
09cbfeaf 1925 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
53b381b3
DW
1926 }
1927 /* if we're doing this rebuild as part of an rmw, go through
1928 * and set all of our private rbio pages in the
1929 * failed stripes as uptodate. This way finish_rmw will
1930 * know they can be trusted. If this was a read reconstruction,
1931 * other endio functions will fiddle the uptodate bits
1932 */
1b94b556 1933 if (rbio->operation == BTRFS_RBIO_WRITE) {
915e2290 1934 for (i = 0; i < rbio->stripe_npages; i++) {
53b381b3
DW
1935 if (faila != -1) {
1936 page = rbio_stripe_page(rbio, faila, i);
1937 SetPageUptodate(page);
1938 }
1939 if (failb != -1) {
1940 page = rbio_stripe_page(rbio, failb, i);
1941 SetPageUptodate(page);
1942 }
1943 }
1944 }
2c8cdd6e 1945 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
53b381b3
DW
1946 /*
1947 * if we're rebuilding a read, we have to use
1948 * pages from the bio list
1949 */
b4ee1782
OS
1950 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1951 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
53b381b3
DW
1952 (stripe == faila || stripe == failb)) {
1953 page = page_in_rbio(rbio, stripe, pagenr, 0);
1954 } else {
1955 page = rbio_stripe_page(rbio, stripe, pagenr);
1956 }
1957 kunmap(page);
1958 }
1959 }
1960
1961 err = 0;
1962cleanup:
1963 kfree(pointers);
1964
1965cleanup_io:
1b94b556 1966 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
6e9606d2 1967 if (err == 0)
4ae10b3a
CM
1968 cache_rbio_pages(rbio);
1969 else
1970 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1971
22365979 1972 rbio_orig_end_io(rbio, err);
b4ee1782 1973 } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
4246a0b6 1974 rbio_orig_end_io(rbio, err);
53b381b3
DW
1975 } else if (err == 0) {
1976 rbio->faila = -1;
1977 rbio->failb = -1;
5a6ac9ea
MX
1978
1979 if (rbio->operation == BTRFS_RBIO_WRITE)
1980 finish_rmw(rbio);
1981 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
1982 finish_parity_scrub(rbio, 0);
1983 else
1984 BUG();
53b381b3 1985 } else {
4246a0b6 1986 rbio_orig_end_io(rbio, err);
53b381b3
DW
1987 }
1988}
1989
1990/*
1991 * This is called only for stripes we've read from disk to
1992 * reconstruct the parity.
1993 */
4246a0b6 1994static void raid_recover_end_io(struct bio *bio)
53b381b3
DW
1995{
1996 struct btrfs_raid_bio *rbio = bio->bi_private;
1997
1998 /*
1999 * we only read stripe pages off the disk, set them
2000 * up to date if there were no errors
2001 */
4246a0b6 2002 if (bio->bi_error)
53b381b3
DW
2003 fail_bio_stripe(rbio, bio);
2004 else
2005 set_bio_pages_uptodate(bio);
2006 bio_put(bio);
2007
b89e1b01 2008 if (!atomic_dec_and_test(&rbio->stripes_pending))
53b381b3
DW
2009 return;
2010
b89e1b01 2011 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
4246a0b6 2012 rbio_orig_end_io(rbio, -EIO);
53b381b3
DW
2013 else
2014 __raid_recover_end_io(rbio);
2015}
2016
2017/*
2018 * reads everything we need off the disk to reconstruct
2019 * the parity. endio handlers trigger final reconstruction
2020 * when the IO is done.
2021 *
2022 * This is used both for reads from the higher layers and for
2023 * parity construction required to finish a rmw cycle.
2024 */
2025static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2026{
2027 int bios_to_read = 0;
53b381b3
DW
2028 struct bio_list bio_list;
2029 int ret;
53b381b3
DW
2030 int pagenr;
2031 int stripe;
2032 struct bio *bio;
2033
2034 bio_list_init(&bio_list);
2035
2036 ret = alloc_rbio_pages(rbio);
2037 if (ret)
2038 goto cleanup;
2039
b89e1b01 2040 atomic_set(&rbio->error, 0);
53b381b3
DW
2041
2042 /*
4ae10b3a
CM
2043 * read everything that hasn't failed. Thanks to the
2044 * stripe cache, it is possible that some or all of these
2045 * pages are going to be uptodate.
53b381b3 2046 */
2c8cdd6e 2047 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
5588383e 2048 if (rbio->faila == stripe || rbio->failb == stripe) {
b89e1b01 2049 atomic_inc(&rbio->error);
53b381b3 2050 continue;
5588383e 2051 }
53b381b3 2052
915e2290 2053 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
53b381b3
DW
2054 struct page *p;
2055
2056 /*
2057 * the rmw code may have already read this
2058 * page in
2059 */
2060 p = rbio_stripe_page(rbio, stripe, pagenr);
2061 if (PageUptodate(p))
2062 continue;
2063
2064 ret = rbio_add_io_page(rbio, &bio_list,
2065 rbio_stripe_page(rbio, stripe, pagenr),
2066 stripe, pagenr, rbio->stripe_len);
2067 if (ret < 0)
2068 goto cleanup;
2069 }
2070 }
2071
2072 bios_to_read = bio_list_size(&bio_list);
2073 if (!bios_to_read) {
2074 /*
2075 * we might have no bios to read just because the pages
2076 * were up to date, or we might have no bios to read because
2077 * the devices were gone.
2078 */
b89e1b01 2079 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
53b381b3
DW
2080 __raid_recover_end_io(rbio);
2081 goto out;
2082 } else {
2083 goto cleanup;
2084 }
2085 }
2086
2087 /*
2088 * the bbio may be freed once we submit the last bio. Make sure
2089 * not to touch it after that
2090 */
b89e1b01 2091 atomic_set(&rbio->stripes_pending, bios_to_read);
53b381b3
DW
2092 while (1) {
2093 bio = bio_list_pop(&bio_list);
2094 if (!bio)
2095 break;
2096
2097 bio->bi_private = rbio;
2098 bio->bi_end_io = raid_recover_end_io;
37226b21 2099 bio_set_op_attrs(bio, REQ_OP_READ, 0);
53b381b3
DW
2100
2101 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2102 BTRFS_WQ_ENDIO_RAID56);
2103
4e49ea4a 2104 submit_bio(bio);
53b381b3
DW
2105 }
2106out:
2107 return 0;
2108
2109cleanup:
b4ee1782
OS
2110 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2111 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
4246a0b6 2112 rbio_orig_end_io(rbio, -EIO);
53b381b3
DW
2113 return -EIO;
2114}
2115
2116/*
2117 * the main entry point for reads from the higher layers. This
2118 * is really only called when the normal read path had a failure,
2119 * so we assume the bio they send down corresponds to a failed part
2120 * of the drive.
2121 */
2122int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
8e5cfb55
ZL
2123 struct btrfs_bio *bbio, u64 stripe_len,
2124 int mirror_num, int generic_io)
53b381b3
DW
2125{
2126 struct btrfs_raid_bio *rbio;
2127 int ret;
2128
8e5cfb55 2129 rbio = alloc_rbio(root, bbio, stripe_len);
af8e2d1d 2130 if (IS_ERR(rbio)) {
6e9606d2
ZL
2131 if (generic_io)
2132 btrfs_put_bbio(bbio);
53b381b3 2133 return PTR_ERR(rbio);
af8e2d1d 2134 }
53b381b3 2135
1b94b556 2136 rbio->operation = BTRFS_RBIO_READ_REBUILD;
53b381b3 2137 bio_list_add(&rbio->bio_list, bio);
4f024f37 2138 rbio->bio_list_bytes = bio->bi_iter.bi_size;
53b381b3
DW
2139
2140 rbio->faila = find_logical_bio_stripe(rbio, bio);
2141 if (rbio->faila == -1) {
e46a28ca
LB
2142 btrfs_warn(root->fs_info,
2143 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2144 __func__, (u64)bio->bi_iter.bi_sector << 9,
2145 (u64)bio->bi_iter.bi_size, bbio->map_type);
6e9606d2
ZL
2146 if (generic_io)
2147 btrfs_put_bbio(bbio);
53b381b3
DW
2148 kfree(rbio);
2149 return -EIO;
2150 }
2151
4245215d
MX
2152 if (generic_io) {
2153 btrfs_bio_counter_inc_noblocked(root->fs_info);
2154 rbio->generic_bio_cnt = 1;
2155 } else {
6e9606d2 2156 btrfs_get_bbio(bbio);
4245215d
MX
2157 }
2158
53b381b3
DW
2159 /*
2160 * reconstruct from the q stripe if they are
2161 * asking for mirror 3
2162 */
2163 if (mirror_num == 3)
2c8cdd6e 2164 rbio->failb = rbio->real_stripes - 2;
53b381b3
DW
2165
2166 ret = lock_stripe_add(rbio);
2167
2168 /*
2169 * __raid56_parity_recover will end the bio with
2170 * any errors it hits. We don't want to return
2171 * its error value up the stack because our caller
2172 * will end up calling bio_endio with any nonzero
2173 * return
2174 */
2175 if (ret == 0)
2176 __raid56_parity_recover(rbio);
2177 /*
2178 * our rbio has been added to the list of
2179 * rbios that will be handled after the
2180 * currently lock owner is done
2181 */
2182 return 0;
2183
2184}
2185
2186static void rmw_work(struct btrfs_work *work)
2187{
2188 struct btrfs_raid_bio *rbio;
2189
2190 rbio = container_of(work, struct btrfs_raid_bio, work);
2191 raid56_rmw_stripe(rbio);
2192}
2193
2194static void read_rebuild_work(struct btrfs_work *work)
2195{
2196 struct btrfs_raid_bio *rbio;
2197
2198 rbio = container_of(work, struct btrfs_raid_bio, work);
2199 __raid56_parity_recover(rbio);
2200}
5a6ac9ea
MX
2201
2202/*
2203 * The following code is used to scrub/replace the parity stripe
2204 *
2205 * Note: We need make sure all the pages that add into the scrub/replace
2206 * raid bio are correct and not be changed during the scrub/replace. That
2207 * is those pages just hold metadata or file data with checksum.
2208 */
2209
2210struct btrfs_raid_bio *
2211raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
8e5cfb55
ZL
2212 struct btrfs_bio *bbio, u64 stripe_len,
2213 struct btrfs_device *scrub_dev,
5a6ac9ea
MX
2214 unsigned long *dbitmap, int stripe_nsectors)
2215{
2216 struct btrfs_raid_bio *rbio;
2217 int i;
2218
8e5cfb55 2219 rbio = alloc_rbio(root, bbio, stripe_len);
5a6ac9ea
MX
2220 if (IS_ERR(rbio))
2221 return NULL;
2222 bio_list_add(&rbio->bio_list, bio);
2223 /*
2224 * This is a special bio which is used to hold the completion handler
2225 * and make the scrub rbio is similar to the other types
2226 */
2227 ASSERT(!bio->bi_iter.bi_size);
2228 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2229
2c8cdd6e 2230 for (i = 0; i < rbio->real_stripes; i++) {
5a6ac9ea
MX
2231 if (bbio->stripes[i].dev == scrub_dev) {
2232 rbio->scrubp = i;
2233 break;
2234 }
2235 }
2236
2237 /* Now we just support the sectorsize equals to page size */
da17066c 2238 ASSERT(root->fs_info->sectorsize == PAGE_SIZE);
5a6ac9ea
MX
2239 ASSERT(rbio->stripe_npages == stripe_nsectors);
2240 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2241
2242 return rbio;
2243}
2244
b4ee1782
OS
2245/* Used for both parity scrub and missing. */
2246void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2247 u64 logical)
5a6ac9ea
MX
2248{
2249 int stripe_offset;
2250 int index;
2251
8e5cfb55
ZL
2252 ASSERT(logical >= rbio->bbio->raid_map[0]);
2253 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
5a6ac9ea 2254 rbio->stripe_len * rbio->nr_data);
8e5cfb55 2255 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
09cbfeaf 2256 index = stripe_offset >> PAGE_SHIFT;
5a6ac9ea
MX
2257 rbio->bio_pages[index] = page;
2258}
2259
2260/*
2261 * We just scrub the parity that we have correct data on the same horizontal,
2262 * so we needn't allocate all pages for all the stripes.
2263 */
2264static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2265{
2266 int i;
2267 int bit;
2268 int index;
2269 struct page *page;
2270
2271 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2c8cdd6e 2272 for (i = 0; i < rbio->real_stripes; i++) {
5a6ac9ea
MX
2273 index = i * rbio->stripe_npages + bit;
2274 if (rbio->stripe_pages[index])
2275 continue;
2276
2277 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2278 if (!page)
2279 return -ENOMEM;
2280 rbio->stripe_pages[index] = page;
5a6ac9ea
MX
2281 }
2282 }
2283 return 0;
2284}
2285
5a6ac9ea
MX
2286static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2287 int need_check)
2288{
76035976 2289 struct btrfs_bio *bbio = rbio->bbio;
2c8cdd6e 2290 void *pointers[rbio->real_stripes];
76035976 2291 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
5a6ac9ea
MX
2292 int nr_data = rbio->nr_data;
2293 int stripe;
2294 int pagenr;
2295 int p_stripe = -1;
2296 int q_stripe = -1;
2297 struct page *p_page = NULL;
2298 struct page *q_page = NULL;
2299 struct bio_list bio_list;
2300 struct bio *bio;
76035976 2301 int is_replace = 0;
5a6ac9ea
MX
2302 int ret;
2303
2304 bio_list_init(&bio_list);
2305
2c8cdd6e
MX
2306 if (rbio->real_stripes - rbio->nr_data == 1) {
2307 p_stripe = rbio->real_stripes - 1;
2308 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2309 p_stripe = rbio->real_stripes - 2;
2310 q_stripe = rbio->real_stripes - 1;
5a6ac9ea
MX
2311 } else {
2312 BUG();
2313 }
2314
76035976
MX
2315 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2316 is_replace = 1;
2317 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2318 }
2319
5a6ac9ea
MX
2320 /*
2321 * Because the higher layers(scrubber) are unlikely to
2322 * use this area of the disk again soon, so don't cache
2323 * it.
2324 */
2325 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2326
2327 if (!need_check)
2328 goto writeback;
2329
2330 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2331 if (!p_page)
2332 goto cleanup;
2333 SetPageUptodate(p_page);
2334
2335 if (q_stripe != -1) {
2336 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2337 if (!q_page) {
2338 __free_page(p_page);
2339 goto cleanup;
2340 }
2341 SetPageUptodate(q_page);
2342 }
2343
2344 atomic_set(&rbio->error, 0);
2345
2346 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2347 struct page *p;
2348 void *parity;
2349 /* first collect one page from each data stripe */
2350 for (stripe = 0; stripe < nr_data; stripe++) {
2351 p = page_in_rbio(rbio, stripe, pagenr, 0);
2352 pointers[stripe] = kmap(p);
2353 }
2354
2355 /* then add the parity stripe */
2356 pointers[stripe++] = kmap(p_page);
2357
2358 if (q_stripe != -1) {
2359
2360 /*
2361 * raid6, add the qstripe and call the
2362 * library function to fill in our p/q
2363 */
2364 pointers[stripe++] = kmap(q_page);
2365
2c8cdd6e 2366 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
5a6ac9ea
MX
2367 pointers);
2368 } else {
2369 /* raid5 */
2370 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
09cbfeaf 2371 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
5a6ac9ea
MX
2372 }
2373
01327610 2374 /* Check scrubbing parity and repair it */
5a6ac9ea
MX
2375 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2376 parity = kmap(p);
09cbfeaf
KS
2377 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2378 memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE);
5a6ac9ea
MX
2379 else
2380 /* Parity is right, needn't writeback */
2381 bitmap_clear(rbio->dbitmap, pagenr, 1);
2382 kunmap(p);
2383
2c8cdd6e 2384 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
5a6ac9ea
MX
2385 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2386 }
2387
2388 __free_page(p_page);
2389 if (q_page)
2390 __free_page(q_page);
2391
2392writeback:
2393 /*
2394 * time to start writing. Make bios for everything from the
2395 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2396 * everything else.
2397 */
2398 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2399 struct page *page;
2400
2401 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2402 ret = rbio_add_io_page(rbio, &bio_list,
2403 page, rbio->scrubp, pagenr, rbio->stripe_len);
2404 if (ret)
2405 goto cleanup;
2406 }
2407
76035976
MX
2408 if (!is_replace)
2409 goto submit_write;
2410
2411 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2412 struct page *page;
2413
2414 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2415 ret = rbio_add_io_page(rbio, &bio_list, page,
2416 bbio->tgtdev_map[rbio->scrubp],
2417 pagenr, rbio->stripe_len);
2418 if (ret)
2419 goto cleanup;
2420 }
2421
2422submit_write:
5a6ac9ea
MX
2423 nr_data = bio_list_size(&bio_list);
2424 if (!nr_data) {
2425 /* Every parity is right */
4246a0b6 2426 rbio_orig_end_io(rbio, 0);
5a6ac9ea
MX
2427 return;
2428 }
2429
2430 atomic_set(&rbio->stripes_pending, nr_data);
2431
2432 while (1) {
2433 bio = bio_list_pop(&bio_list);
2434 if (!bio)
2435 break;
2436
2437 bio->bi_private = rbio;
a6111d11 2438 bio->bi_end_io = raid_write_end_io;
37226b21 2439 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
4e49ea4a
MC
2440
2441 submit_bio(bio);
5a6ac9ea
MX
2442 }
2443 return;
2444
2445cleanup:
4246a0b6 2446 rbio_orig_end_io(rbio, -EIO);
5a6ac9ea
MX
2447}
2448
2449static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2450{
2451 if (stripe >= 0 && stripe < rbio->nr_data)
2452 return 1;
2453 return 0;
2454}
2455
2456/*
2457 * While we're doing the parity check and repair, we could have errors
2458 * in reading pages off the disk. This checks for errors and if we're
2459 * not able to read the page it'll trigger parity reconstruction. The
2460 * parity scrub will be finished after we've reconstructed the failed
2461 * stripes
2462 */
2463static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2464{
2465 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2466 goto cleanup;
2467
2468 if (rbio->faila >= 0 || rbio->failb >= 0) {
2469 int dfail = 0, failp = -1;
2470
2471 if (is_data_stripe(rbio, rbio->faila))
2472 dfail++;
2473 else if (is_parity_stripe(rbio->faila))
2474 failp = rbio->faila;
2475
2476 if (is_data_stripe(rbio, rbio->failb))
2477 dfail++;
2478 else if (is_parity_stripe(rbio->failb))
2479 failp = rbio->failb;
2480
2481 /*
2482 * Because we can not use a scrubbing parity to repair
2483 * the data, so the capability of the repair is declined.
2484 * (In the case of RAID5, we can not repair anything)
2485 */
2486 if (dfail > rbio->bbio->max_errors - 1)
2487 goto cleanup;
2488
2489 /*
2490 * If all data is good, only parity is correctly, just
2491 * repair the parity.
2492 */
2493 if (dfail == 0) {
2494 finish_parity_scrub(rbio, 0);
2495 return;
2496 }
2497
2498 /*
2499 * Here means we got one corrupted data stripe and one
2500 * corrupted parity on RAID6, if the corrupted parity
01327610 2501 * is scrubbing parity, luckily, use the other one to repair
5a6ac9ea
MX
2502 * the data, or we can not repair the data stripe.
2503 */
2504 if (failp != rbio->scrubp)
2505 goto cleanup;
2506
2507 __raid_recover_end_io(rbio);
2508 } else {
2509 finish_parity_scrub(rbio, 1);
2510 }
2511 return;
2512
2513cleanup:
4246a0b6 2514 rbio_orig_end_io(rbio, -EIO);
5a6ac9ea
MX
2515}
2516
2517/*
2518 * end io for the read phase of the rmw cycle. All the bios here are physical
2519 * stripe bios we've read from the disk so we can recalculate the parity of the
2520 * stripe.
2521 *
2522 * This will usually kick off finish_rmw once all the bios are read in, but it
2523 * may trigger parity reconstruction if we had any errors along the way
2524 */
4246a0b6 2525static void raid56_parity_scrub_end_io(struct bio *bio)
5a6ac9ea
MX
2526{
2527 struct btrfs_raid_bio *rbio = bio->bi_private;
2528
4246a0b6 2529 if (bio->bi_error)
5a6ac9ea
MX
2530 fail_bio_stripe(rbio, bio);
2531 else
2532 set_bio_pages_uptodate(bio);
2533
2534 bio_put(bio);
2535
2536 if (!atomic_dec_and_test(&rbio->stripes_pending))
2537 return;
2538
2539 /*
2540 * this will normally call finish_rmw to start our write
2541 * but if there are any failed stripes we'll reconstruct
2542 * from parity first
2543 */
2544 validate_rbio_for_parity_scrub(rbio);
2545}
2546
2547static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2548{
2549 int bios_to_read = 0;
5a6ac9ea
MX
2550 struct bio_list bio_list;
2551 int ret;
2552 int pagenr;
2553 int stripe;
2554 struct bio *bio;
2555
2556 ret = alloc_rbio_essential_pages(rbio);
2557 if (ret)
2558 goto cleanup;
2559
2560 bio_list_init(&bio_list);
2561
2562 atomic_set(&rbio->error, 0);
2563 /*
2564 * build a list of bios to read all the missing parts of this
2565 * stripe
2566 */
2c8cdd6e 2567 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
5a6ac9ea
MX
2568 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2569 struct page *page;
2570 /*
2571 * we want to find all the pages missing from
2572 * the rbio and read them from the disk. If
2573 * page_in_rbio finds a page in the bio list
2574 * we don't need to read it off the stripe.
2575 */
2576 page = page_in_rbio(rbio, stripe, pagenr, 1);
2577 if (page)
2578 continue;
2579
2580 page = rbio_stripe_page(rbio, stripe, pagenr);
2581 /*
2582 * the bio cache may have handed us an uptodate
2583 * page. If so, be happy and use it
2584 */
2585 if (PageUptodate(page))
2586 continue;
2587
2588 ret = rbio_add_io_page(rbio, &bio_list, page,
2589 stripe, pagenr, rbio->stripe_len);
2590 if (ret)
2591 goto cleanup;
2592 }
2593 }
2594
2595 bios_to_read = bio_list_size(&bio_list);
2596 if (!bios_to_read) {
2597 /*
2598 * this can happen if others have merged with
2599 * us, it means there is nothing left to read.
2600 * But if there are missing devices it may not be
2601 * safe to do the full stripe write yet.
2602 */
2603 goto finish;
2604 }
2605
2606 /*
2607 * the bbio may be freed once we submit the last bio. Make sure
2608 * not to touch it after that
2609 */
2610 atomic_set(&rbio->stripes_pending, bios_to_read);
2611 while (1) {
2612 bio = bio_list_pop(&bio_list);
2613 if (!bio)
2614 break;
2615
2616 bio->bi_private = rbio;
2617 bio->bi_end_io = raid56_parity_scrub_end_io;
37226b21 2618 bio_set_op_attrs(bio, REQ_OP_READ, 0);
5a6ac9ea
MX
2619
2620 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2621 BTRFS_WQ_ENDIO_RAID56);
2622
4e49ea4a 2623 submit_bio(bio);
5a6ac9ea
MX
2624 }
2625 /* the actual write will happen once the reads are done */
2626 return;
2627
2628cleanup:
4246a0b6 2629 rbio_orig_end_io(rbio, -EIO);
5a6ac9ea
MX
2630 return;
2631
2632finish:
2633 validate_rbio_for_parity_scrub(rbio);
2634}
2635
2636static void scrub_parity_work(struct btrfs_work *work)
2637{
2638 struct btrfs_raid_bio *rbio;
2639
2640 rbio = container_of(work, struct btrfs_raid_bio, work);
2641 raid56_parity_scrub_stripe(rbio);
2642}
2643
2644static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2645{
2646 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2647 scrub_parity_work, NULL, NULL);
2648
2649 btrfs_queue_work(rbio->fs_info->rmw_workers,
2650 &rbio->work);
2651}
2652
2653void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2654{
2655 if (!lock_stripe_add(rbio))
2656 async_scrub_parity(rbio);
2657}
b4ee1782
OS
2658
2659/* The following code is used for dev replace of a missing RAID 5/6 device. */
2660
2661struct btrfs_raid_bio *
2662raid56_alloc_missing_rbio(struct btrfs_root *root, struct bio *bio,
2663 struct btrfs_bio *bbio, u64 length)
2664{
2665 struct btrfs_raid_bio *rbio;
2666
2667 rbio = alloc_rbio(root, bbio, length);
2668 if (IS_ERR(rbio))
2669 return NULL;
2670
2671 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2672 bio_list_add(&rbio->bio_list, bio);
2673 /*
2674 * This is a special bio which is used to hold the completion handler
2675 * and make the scrub rbio is similar to the other types
2676 */
2677 ASSERT(!bio->bi_iter.bi_size);
2678
2679 rbio->faila = find_logical_bio_stripe(rbio, bio);
2680 if (rbio->faila == -1) {
2681 BUG();
2682 kfree(rbio);
2683 return NULL;
2684 }
2685
2686 return rbio;
2687}
2688
2689static void missing_raid56_work(struct btrfs_work *work)
2690{
2691 struct btrfs_raid_bio *rbio;
2692
2693 rbio = container_of(work, struct btrfs_raid_bio, work);
2694 __raid56_parity_recover(rbio);
2695}
2696
2697static void async_missing_raid56(struct btrfs_raid_bio *rbio)
2698{
2699 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2700 missing_raid56_work, NULL, NULL);
2701
2702 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2703}
2704
2705void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2706{
2707 if (!lock_stripe_add(rbio))
2708 async_missing_raid56(rbio);
2709}