]>
Commit | Line | Data |
---|---|---|
53b381b3 DW |
1 | /* |
2 | * Copyright (C) 2012 Fusion-io All rights reserved. | |
3 | * Copyright (C) 2012 Intel Corp. All rights reserved. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or | |
6 | * modify it under the terms of the GNU General Public | |
7 | * License v2 as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
12 | * General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public | |
15 | * License along with this program; if not, write to the | |
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
17 | * Boston, MA 021110-1307, USA. | |
18 | */ | |
19 | #include <linux/sched.h> | |
20 | #include <linux/wait.h> | |
21 | #include <linux/bio.h> | |
22 | #include <linux/slab.h> | |
23 | #include <linux/buffer_head.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include <linux/random.h> | |
26 | #include <linux/iocontext.h> | |
27 | #include <linux/capability.h> | |
28 | #include <linux/ratelimit.h> | |
29 | #include <linux/kthread.h> | |
30 | #include <linux/raid/pq.h> | |
31 | #include <linux/hash.h> | |
32 | #include <linux/list_sort.h> | |
33 | #include <linux/raid/xor.h> | |
818e010b | 34 | #include <linux/mm.h> |
53b381b3 | 35 | #include <asm/div64.h> |
53b381b3 DW |
36 | #include "ctree.h" |
37 | #include "extent_map.h" | |
38 | #include "disk-io.h" | |
39 | #include "transaction.h" | |
40 | #include "print-tree.h" | |
41 | #include "volumes.h" | |
42 | #include "raid56.h" | |
43 | #include "async-thread.h" | |
44 | #include "check-integrity.h" | |
45 | #include "rcu-string.h" | |
46 | ||
47 | /* set when additional merges to this rbio are not allowed */ | |
48 | #define RBIO_RMW_LOCKED_BIT 1 | |
49 | ||
4ae10b3a CM |
50 | /* |
51 | * set when this rbio is sitting in the hash, but it is just a cache | |
52 | * of past RMW | |
53 | */ | |
54 | #define RBIO_CACHE_BIT 2 | |
55 | ||
56 | /* | |
57 | * set when it is safe to trust the stripe_pages for caching | |
58 | */ | |
59 | #define RBIO_CACHE_READY_BIT 3 | |
60 | ||
4ae10b3a CM |
61 | #define RBIO_CACHE_SIZE 1024 |
62 | ||
1b94b556 | 63 | enum btrfs_rbio_ops { |
b4ee1782 OS |
64 | BTRFS_RBIO_WRITE, |
65 | BTRFS_RBIO_READ_REBUILD, | |
66 | BTRFS_RBIO_PARITY_SCRUB, | |
67 | BTRFS_RBIO_REBUILD_MISSING, | |
1b94b556 MX |
68 | }; |
69 | ||
53b381b3 DW |
70 | struct btrfs_raid_bio { |
71 | struct btrfs_fs_info *fs_info; | |
72 | struct btrfs_bio *bbio; | |
73 | ||
53b381b3 DW |
74 | /* while we're doing rmw on a stripe |
75 | * we put it into a hash table so we can | |
76 | * lock the stripe and merge more rbios | |
77 | * into it. | |
78 | */ | |
79 | struct list_head hash_list; | |
80 | ||
4ae10b3a CM |
81 | /* |
82 | * LRU list for the stripe cache | |
83 | */ | |
84 | struct list_head stripe_cache; | |
85 | ||
53b381b3 DW |
86 | /* |
87 | * for scheduling work in the helper threads | |
88 | */ | |
89 | struct btrfs_work work; | |
90 | ||
91 | /* | |
92 | * bio list and bio_list_lock are used | |
93 | * to add more bios into the stripe | |
94 | * in hopes of avoiding the full rmw | |
95 | */ | |
96 | struct bio_list bio_list; | |
97 | spinlock_t bio_list_lock; | |
98 | ||
6ac0f488 CM |
99 | /* also protected by the bio_list_lock, the |
100 | * plug list is used by the plugging code | |
101 | * to collect partial bios while plugged. The | |
102 | * stripe locking code also uses it to hand off | |
53b381b3 DW |
103 | * the stripe lock to the next pending IO |
104 | */ | |
105 | struct list_head plug_list; | |
106 | ||
107 | /* | |
108 | * flags that tell us if it is safe to | |
109 | * merge with this bio | |
110 | */ | |
111 | unsigned long flags; | |
112 | ||
113 | /* size of each individual stripe on disk */ | |
114 | int stripe_len; | |
115 | ||
116 | /* number of data stripes (no p/q) */ | |
117 | int nr_data; | |
118 | ||
2c8cdd6e MX |
119 | int real_stripes; |
120 | ||
5a6ac9ea | 121 | int stripe_npages; |
53b381b3 DW |
122 | /* |
123 | * set if we're doing a parity rebuild | |
124 | * for a read from higher up, which is handled | |
125 | * differently from a parity rebuild as part of | |
126 | * rmw | |
127 | */ | |
1b94b556 | 128 | enum btrfs_rbio_ops operation; |
53b381b3 DW |
129 | |
130 | /* first bad stripe */ | |
131 | int faila; | |
132 | ||
133 | /* second bad stripe (for raid6 use) */ | |
134 | int failb; | |
135 | ||
5a6ac9ea | 136 | int scrubp; |
53b381b3 DW |
137 | /* |
138 | * number of pages needed to represent the full | |
139 | * stripe | |
140 | */ | |
141 | int nr_pages; | |
142 | ||
143 | /* | |
144 | * size of all the bios in the bio_list. This | |
145 | * helps us decide if the rbio maps to a full | |
146 | * stripe or not | |
147 | */ | |
148 | int bio_list_bytes; | |
149 | ||
4245215d MX |
150 | int generic_bio_cnt; |
151 | ||
dec95574 | 152 | refcount_t refs; |
53b381b3 | 153 | |
b89e1b01 MX |
154 | atomic_t stripes_pending; |
155 | ||
156 | atomic_t error; | |
53b381b3 DW |
157 | /* |
158 | * these are two arrays of pointers. We allocate the | |
159 | * rbio big enough to hold them both and setup their | |
160 | * locations when the rbio is allocated | |
161 | */ | |
162 | ||
163 | /* pointers to pages that we allocated for | |
164 | * reading/writing stripes directly from the disk (including P/Q) | |
165 | */ | |
166 | struct page **stripe_pages; | |
167 | ||
168 | /* | |
169 | * pointers to the pages in the bio_list. Stored | |
170 | * here for faster lookup | |
171 | */ | |
172 | struct page **bio_pages; | |
5a6ac9ea MX |
173 | |
174 | /* | |
175 | * bitmap to record which horizontal stripe has data | |
176 | */ | |
177 | unsigned long *dbitmap; | |
53b381b3 DW |
178 | }; |
179 | ||
180 | static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); | |
181 | static noinline void finish_rmw(struct btrfs_raid_bio *rbio); | |
182 | static void rmw_work(struct btrfs_work *work); | |
183 | static void read_rebuild_work(struct btrfs_work *work); | |
184 | static void async_rmw_stripe(struct btrfs_raid_bio *rbio); | |
185 | static void async_read_rebuild(struct btrfs_raid_bio *rbio); | |
186 | static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); | |
187 | static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed); | |
188 | static void __free_raid_bio(struct btrfs_raid_bio *rbio); | |
189 | static void index_rbio_pages(struct btrfs_raid_bio *rbio); | |
190 | static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); | |
191 | ||
5a6ac9ea MX |
192 | static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, |
193 | int need_check); | |
194 | static void async_scrub_parity(struct btrfs_raid_bio *rbio); | |
195 | ||
53b381b3 DW |
196 | /* |
197 | * the stripe hash table is used for locking, and to collect | |
198 | * bios in hopes of making a full stripe | |
199 | */ | |
200 | int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info) | |
201 | { | |
202 | struct btrfs_stripe_hash_table *table; | |
203 | struct btrfs_stripe_hash_table *x; | |
204 | struct btrfs_stripe_hash *cur; | |
205 | struct btrfs_stripe_hash *h; | |
206 | int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS; | |
207 | int i; | |
83c8266a | 208 | int table_size; |
53b381b3 DW |
209 | |
210 | if (info->stripe_hash_table) | |
211 | return 0; | |
212 | ||
83c8266a DS |
213 | /* |
214 | * The table is large, starting with order 4 and can go as high as | |
215 | * order 7 in case lock debugging is turned on. | |
216 | * | |
217 | * Try harder to allocate and fallback to vmalloc to lower the chance | |
218 | * of a failing mount. | |
219 | */ | |
220 | table_size = sizeof(*table) + sizeof(*h) * num_entries; | |
818e010b DS |
221 | table = kvzalloc(table_size, GFP_KERNEL); |
222 | if (!table) | |
223 | return -ENOMEM; | |
53b381b3 | 224 | |
4ae10b3a CM |
225 | spin_lock_init(&table->cache_lock); |
226 | INIT_LIST_HEAD(&table->stripe_cache); | |
227 | ||
53b381b3 DW |
228 | h = table->table; |
229 | ||
230 | for (i = 0; i < num_entries; i++) { | |
231 | cur = h + i; | |
232 | INIT_LIST_HEAD(&cur->hash_list); | |
233 | spin_lock_init(&cur->lock); | |
234 | init_waitqueue_head(&cur->wait); | |
235 | } | |
236 | ||
237 | x = cmpxchg(&info->stripe_hash_table, NULL, table); | |
f749303b WS |
238 | if (x) |
239 | kvfree(x); | |
53b381b3 DW |
240 | return 0; |
241 | } | |
242 | ||
4ae10b3a CM |
243 | /* |
244 | * caching an rbio means to copy anything from the | |
245 | * bio_pages array into the stripe_pages array. We | |
246 | * use the page uptodate bit in the stripe cache array | |
247 | * to indicate if it has valid data | |
248 | * | |
249 | * once the caching is done, we set the cache ready | |
250 | * bit. | |
251 | */ | |
252 | static void cache_rbio_pages(struct btrfs_raid_bio *rbio) | |
253 | { | |
254 | int i; | |
255 | char *s; | |
256 | char *d; | |
257 | int ret; | |
258 | ||
259 | ret = alloc_rbio_pages(rbio); | |
260 | if (ret) | |
261 | return; | |
262 | ||
263 | for (i = 0; i < rbio->nr_pages; i++) { | |
264 | if (!rbio->bio_pages[i]) | |
265 | continue; | |
266 | ||
267 | s = kmap(rbio->bio_pages[i]); | |
268 | d = kmap(rbio->stripe_pages[i]); | |
269 | ||
09cbfeaf | 270 | memcpy(d, s, PAGE_SIZE); |
4ae10b3a CM |
271 | |
272 | kunmap(rbio->bio_pages[i]); | |
273 | kunmap(rbio->stripe_pages[i]); | |
274 | SetPageUptodate(rbio->stripe_pages[i]); | |
275 | } | |
276 | set_bit(RBIO_CACHE_READY_BIT, &rbio->flags); | |
277 | } | |
278 | ||
53b381b3 DW |
279 | /* |
280 | * we hash on the first logical address of the stripe | |
281 | */ | |
282 | static int rbio_bucket(struct btrfs_raid_bio *rbio) | |
283 | { | |
8e5cfb55 | 284 | u64 num = rbio->bbio->raid_map[0]; |
53b381b3 DW |
285 | |
286 | /* | |
287 | * we shift down quite a bit. We're using byte | |
288 | * addressing, and most of the lower bits are zeros. | |
289 | * This tends to upset hash_64, and it consistently | |
290 | * returns just one or two different values. | |
291 | * | |
292 | * shifting off the lower bits fixes things. | |
293 | */ | |
294 | return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS); | |
295 | } | |
296 | ||
4ae10b3a CM |
297 | /* |
298 | * stealing an rbio means taking all the uptodate pages from the stripe | |
299 | * array in the source rbio and putting them into the destination rbio | |
300 | */ | |
301 | static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest) | |
302 | { | |
303 | int i; | |
304 | struct page *s; | |
305 | struct page *d; | |
306 | ||
307 | if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags)) | |
308 | return; | |
309 | ||
310 | for (i = 0; i < dest->nr_pages; i++) { | |
311 | s = src->stripe_pages[i]; | |
312 | if (!s || !PageUptodate(s)) { | |
313 | continue; | |
314 | } | |
315 | ||
316 | d = dest->stripe_pages[i]; | |
317 | if (d) | |
318 | __free_page(d); | |
319 | ||
320 | dest->stripe_pages[i] = s; | |
321 | src->stripe_pages[i] = NULL; | |
322 | } | |
323 | } | |
324 | ||
53b381b3 DW |
325 | /* |
326 | * merging means we take the bio_list from the victim and | |
327 | * splice it into the destination. The victim should | |
328 | * be discarded afterwards. | |
329 | * | |
330 | * must be called with dest->rbio_list_lock held | |
331 | */ | |
332 | static void merge_rbio(struct btrfs_raid_bio *dest, | |
333 | struct btrfs_raid_bio *victim) | |
334 | { | |
335 | bio_list_merge(&dest->bio_list, &victim->bio_list); | |
336 | dest->bio_list_bytes += victim->bio_list_bytes; | |
4245215d | 337 | dest->generic_bio_cnt += victim->generic_bio_cnt; |
53b381b3 DW |
338 | bio_list_init(&victim->bio_list); |
339 | } | |
340 | ||
341 | /* | |
4ae10b3a CM |
342 | * used to prune items that are in the cache. The caller |
343 | * must hold the hash table lock. | |
344 | */ | |
345 | static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio) | |
346 | { | |
347 | int bucket = rbio_bucket(rbio); | |
348 | struct btrfs_stripe_hash_table *table; | |
349 | struct btrfs_stripe_hash *h; | |
350 | int freeit = 0; | |
351 | ||
352 | /* | |
353 | * check the bit again under the hash table lock. | |
354 | */ | |
355 | if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) | |
356 | return; | |
357 | ||
358 | table = rbio->fs_info->stripe_hash_table; | |
359 | h = table->table + bucket; | |
360 | ||
361 | /* hold the lock for the bucket because we may be | |
362 | * removing it from the hash table | |
363 | */ | |
364 | spin_lock(&h->lock); | |
365 | ||
366 | /* | |
367 | * hold the lock for the bio list because we need | |
368 | * to make sure the bio list is empty | |
369 | */ | |
370 | spin_lock(&rbio->bio_list_lock); | |
371 | ||
372 | if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) { | |
373 | list_del_init(&rbio->stripe_cache); | |
374 | table->cache_size -= 1; | |
375 | freeit = 1; | |
376 | ||
377 | /* if the bio list isn't empty, this rbio is | |
378 | * still involved in an IO. We take it out | |
379 | * of the cache list, and drop the ref that | |
380 | * was held for the list. | |
381 | * | |
382 | * If the bio_list was empty, we also remove | |
383 | * the rbio from the hash_table, and drop | |
384 | * the corresponding ref | |
385 | */ | |
386 | if (bio_list_empty(&rbio->bio_list)) { | |
387 | if (!list_empty(&rbio->hash_list)) { | |
388 | list_del_init(&rbio->hash_list); | |
dec95574 | 389 | refcount_dec(&rbio->refs); |
4ae10b3a CM |
390 | BUG_ON(!list_empty(&rbio->plug_list)); |
391 | } | |
392 | } | |
393 | } | |
394 | ||
395 | spin_unlock(&rbio->bio_list_lock); | |
396 | spin_unlock(&h->lock); | |
397 | ||
398 | if (freeit) | |
399 | __free_raid_bio(rbio); | |
400 | } | |
401 | ||
402 | /* | |
403 | * prune a given rbio from the cache | |
404 | */ | |
405 | static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio) | |
406 | { | |
407 | struct btrfs_stripe_hash_table *table; | |
408 | unsigned long flags; | |
409 | ||
410 | if (!test_bit(RBIO_CACHE_BIT, &rbio->flags)) | |
411 | return; | |
412 | ||
413 | table = rbio->fs_info->stripe_hash_table; | |
414 | ||
415 | spin_lock_irqsave(&table->cache_lock, flags); | |
416 | __remove_rbio_from_cache(rbio); | |
417 | spin_unlock_irqrestore(&table->cache_lock, flags); | |
418 | } | |
419 | ||
420 | /* | |
421 | * remove everything in the cache | |
422 | */ | |
48a3b636 | 423 | static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info) |
4ae10b3a CM |
424 | { |
425 | struct btrfs_stripe_hash_table *table; | |
426 | unsigned long flags; | |
427 | struct btrfs_raid_bio *rbio; | |
428 | ||
429 | table = info->stripe_hash_table; | |
430 | ||
431 | spin_lock_irqsave(&table->cache_lock, flags); | |
432 | while (!list_empty(&table->stripe_cache)) { | |
433 | rbio = list_entry(table->stripe_cache.next, | |
434 | struct btrfs_raid_bio, | |
435 | stripe_cache); | |
436 | __remove_rbio_from_cache(rbio); | |
437 | } | |
438 | spin_unlock_irqrestore(&table->cache_lock, flags); | |
439 | } | |
440 | ||
441 | /* | |
442 | * remove all cached entries and free the hash table | |
443 | * used by unmount | |
53b381b3 DW |
444 | */ |
445 | void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info) | |
446 | { | |
447 | if (!info->stripe_hash_table) | |
448 | return; | |
4ae10b3a | 449 | btrfs_clear_rbio_cache(info); |
f749303b | 450 | kvfree(info->stripe_hash_table); |
53b381b3 DW |
451 | info->stripe_hash_table = NULL; |
452 | } | |
453 | ||
4ae10b3a CM |
454 | /* |
455 | * insert an rbio into the stripe cache. It | |
456 | * must have already been prepared by calling | |
457 | * cache_rbio_pages | |
458 | * | |
459 | * If this rbio was already cached, it gets | |
460 | * moved to the front of the lru. | |
461 | * | |
462 | * If the size of the rbio cache is too big, we | |
463 | * prune an item. | |
464 | */ | |
465 | static void cache_rbio(struct btrfs_raid_bio *rbio) | |
466 | { | |
467 | struct btrfs_stripe_hash_table *table; | |
468 | unsigned long flags; | |
469 | ||
470 | if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags)) | |
471 | return; | |
472 | ||
473 | table = rbio->fs_info->stripe_hash_table; | |
474 | ||
475 | spin_lock_irqsave(&table->cache_lock, flags); | |
476 | spin_lock(&rbio->bio_list_lock); | |
477 | ||
478 | /* bump our ref if we were not in the list before */ | |
479 | if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags)) | |
dec95574 | 480 | refcount_inc(&rbio->refs); |
4ae10b3a CM |
481 | |
482 | if (!list_empty(&rbio->stripe_cache)){ | |
483 | list_move(&rbio->stripe_cache, &table->stripe_cache); | |
484 | } else { | |
485 | list_add(&rbio->stripe_cache, &table->stripe_cache); | |
486 | table->cache_size += 1; | |
487 | } | |
488 | ||
489 | spin_unlock(&rbio->bio_list_lock); | |
490 | ||
491 | if (table->cache_size > RBIO_CACHE_SIZE) { | |
492 | struct btrfs_raid_bio *found; | |
493 | ||
494 | found = list_entry(table->stripe_cache.prev, | |
495 | struct btrfs_raid_bio, | |
496 | stripe_cache); | |
497 | ||
498 | if (found != rbio) | |
499 | __remove_rbio_from_cache(found); | |
500 | } | |
501 | ||
502 | spin_unlock_irqrestore(&table->cache_lock, flags); | |
4ae10b3a CM |
503 | } |
504 | ||
53b381b3 DW |
505 | /* |
506 | * helper function to run the xor_blocks api. It is only | |
507 | * able to do MAX_XOR_BLOCKS at a time, so we need to | |
508 | * loop through. | |
509 | */ | |
510 | static void run_xor(void **pages, int src_cnt, ssize_t len) | |
511 | { | |
512 | int src_off = 0; | |
513 | int xor_src_cnt = 0; | |
514 | void *dest = pages[src_cnt]; | |
515 | ||
516 | while(src_cnt > 0) { | |
517 | xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); | |
518 | xor_blocks(xor_src_cnt, len, dest, pages + src_off); | |
519 | ||
520 | src_cnt -= xor_src_cnt; | |
521 | src_off += xor_src_cnt; | |
522 | } | |
523 | } | |
524 | ||
525 | /* | |
526 | * returns true if the bio list inside this rbio | |
527 | * covers an entire stripe (no rmw required). | |
528 | * Must be called with the bio list lock held, or | |
529 | * at a time when you know it is impossible to add | |
530 | * new bios into the list | |
531 | */ | |
532 | static int __rbio_is_full(struct btrfs_raid_bio *rbio) | |
533 | { | |
534 | unsigned long size = rbio->bio_list_bytes; | |
535 | int ret = 1; | |
536 | ||
537 | if (size != rbio->nr_data * rbio->stripe_len) | |
538 | ret = 0; | |
539 | ||
540 | BUG_ON(size > rbio->nr_data * rbio->stripe_len); | |
541 | return ret; | |
542 | } | |
543 | ||
544 | static int rbio_is_full(struct btrfs_raid_bio *rbio) | |
545 | { | |
546 | unsigned long flags; | |
547 | int ret; | |
548 | ||
549 | spin_lock_irqsave(&rbio->bio_list_lock, flags); | |
550 | ret = __rbio_is_full(rbio); | |
551 | spin_unlock_irqrestore(&rbio->bio_list_lock, flags); | |
552 | return ret; | |
553 | } | |
554 | ||
555 | /* | |
556 | * returns 1 if it is safe to merge two rbios together. | |
557 | * The merging is safe if the two rbios correspond to | |
558 | * the same stripe and if they are both going in the same | |
559 | * direction (read vs write), and if neither one is | |
560 | * locked for final IO | |
561 | * | |
562 | * The caller is responsible for locking such that | |
563 | * rmw_locked is safe to test | |
564 | */ | |
565 | static int rbio_can_merge(struct btrfs_raid_bio *last, | |
566 | struct btrfs_raid_bio *cur) | |
567 | { | |
568 | if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) || | |
569 | test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) | |
570 | return 0; | |
571 | ||
4ae10b3a CM |
572 | /* |
573 | * we can't merge with cached rbios, since the | |
574 | * idea is that when we merge the destination | |
575 | * rbio is going to run our IO for us. We can | |
01327610 | 576 | * steal from cached rbios though, other functions |
4ae10b3a CM |
577 | * handle that. |
578 | */ | |
579 | if (test_bit(RBIO_CACHE_BIT, &last->flags) || | |
580 | test_bit(RBIO_CACHE_BIT, &cur->flags)) | |
581 | return 0; | |
582 | ||
8e5cfb55 ZL |
583 | if (last->bbio->raid_map[0] != |
584 | cur->bbio->raid_map[0]) | |
53b381b3 DW |
585 | return 0; |
586 | ||
5a6ac9ea MX |
587 | /* we can't merge with different operations */ |
588 | if (last->operation != cur->operation) | |
589 | return 0; | |
590 | /* | |
591 | * We've need read the full stripe from the drive. | |
592 | * check and repair the parity and write the new results. | |
593 | * | |
594 | * We're not allowed to add any new bios to the | |
595 | * bio list here, anyone else that wants to | |
596 | * change this stripe needs to do their own rmw. | |
597 | */ | |
598 | if (last->operation == BTRFS_RBIO_PARITY_SCRUB || | |
599 | cur->operation == BTRFS_RBIO_PARITY_SCRUB) | |
53b381b3 | 600 | return 0; |
53b381b3 | 601 | |
b4ee1782 OS |
602 | if (last->operation == BTRFS_RBIO_REBUILD_MISSING || |
603 | cur->operation == BTRFS_RBIO_REBUILD_MISSING) | |
604 | return 0; | |
605 | ||
53b381b3 DW |
606 | return 1; |
607 | } | |
608 | ||
b7178a5f ZL |
609 | static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe, |
610 | int index) | |
611 | { | |
612 | return stripe * rbio->stripe_npages + index; | |
613 | } | |
614 | ||
615 | /* | |
616 | * these are just the pages from the rbio array, not from anything | |
617 | * the FS sent down to us | |
618 | */ | |
619 | static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe, | |
620 | int index) | |
621 | { | |
622 | return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)]; | |
623 | } | |
624 | ||
53b381b3 DW |
625 | /* |
626 | * helper to index into the pstripe | |
627 | */ | |
628 | static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index) | |
629 | { | |
b7178a5f | 630 | return rbio_stripe_page(rbio, rbio->nr_data, index); |
53b381b3 DW |
631 | } |
632 | ||
633 | /* | |
634 | * helper to index into the qstripe, returns null | |
635 | * if there is no qstripe | |
636 | */ | |
637 | static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) | |
638 | { | |
2c8cdd6e | 639 | if (rbio->nr_data + 1 == rbio->real_stripes) |
53b381b3 | 640 | return NULL; |
b7178a5f | 641 | return rbio_stripe_page(rbio, rbio->nr_data + 1, index); |
53b381b3 DW |
642 | } |
643 | ||
644 | /* | |
645 | * The first stripe in the table for a logical address | |
646 | * has the lock. rbios are added in one of three ways: | |
647 | * | |
648 | * 1) Nobody has the stripe locked yet. The rbio is given | |
649 | * the lock and 0 is returned. The caller must start the IO | |
650 | * themselves. | |
651 | * | |
652 | * 2) Someone has the stripe locked, but we're able to merge | |
653 | * with the lock owner. The rbio is freed and the IO will | |
654 | * start automatically along with the existing rbio. 1 is returned. | |
655 | * | |
656 | * 3) Someone has the stripe locked, but we're not able to merge. | |
657 | * The rbio is added to the lock owner's plug list, or merged into | |
658 | * an rbio already on the plug list. When the lock owner unlocks, | |
659 | * the next rbio on the list is run and the IO is started automatically. | |
660 | * 1 is returned | |
661 | * | |
662 | * If we return 0, the caller still owns the rbio and must continue with | |
663 | * IO submission. If we return 1, the caller must assume the rbio has | |
664 | * already been freed. | |
665 | */ | |
666 | static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio) | |
667 | { | |
668 | int bucket = rbio_bucket(rbio); | |
669 | struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket; | |
670 | struct btrfs_raid_bio *cur; | |
671 | struct btrfs_raid_bio *pending; | |
672 | unsigned long flags; | |
673 | DEFINE_WAIT(wait); | |
674 | struct btrfs_raid_bio *freeit = NULL; | |
4ae10b3a | 675 | struct btrfs_raid_bio *cache_drop = NULL; |
53b381b3 | 676 | int ret = 0; |
53b381b3 DW |
677 | |
678 | spin_lock_irqsave(&h->lock, flags); | |
679 | list_for_each_entry(cur, &h->hash_list, hash_list) { | |
8e5cfb55 | 680 | if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) { |
53b381b3 DW |
681 | spin_lock(&cur->bio_list_lock); |
682 | ||
4ae10b3a CM |
683 | /* can we steal this cached rbio's pages? */ |
684 | if (bio_list_empty(&cur->bio_list) && | |
685 | list_empty(&cur->plug_list) && | |
686 | test_bit(RBIO_CACHE_BIT, &cur->flags) && | |
687 | !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) { | |
688 | list_del_init(&cur->hash_list); | |
dec95574 | 689 | refcount_dec(&cur->refs); |
4ae10b3a CM |
690 | |
691 | steal_rbio(cur, rbio); | |
692 | cache_drop = cur; | |
693 | spin_unlock(&cur->bio_list_lock); | |
694 | ||
695 | goto lockit; | |
696 | } | |
697 | ||
53b381b3 DW |
698 | /* can we merge into the lock owner? */ |
699 | if (rbio_can_merge(cur, rbio)) { | |
700 | merge_rbio(cur, rbio); | |
701 | spin_unlock(&cur->bio_list_lock); | |
702 | freeit = rbio; | |
703 | ret = 1; | |
704 | goto out; | |
705 | } | |
706 | ||
4ae10b3a | 707 | |
53b381b3 DW |
708 | /* |
709 | * we couldn't merge with the running | |
710 | * rbio, see if we can merge with the | |
711 | * pending ones. We don't have to | |
712 | * check for rmw_locked because there | |
713 | * is no way they are inside finish_rmw | |
714 | * right now | |
715 | */ | |
716 | list_for_each_entry(pending, &cur->plug_list, | |
717 | plug_list) { | |
718 | if (rbio_can_merge(pending, rbio)) { | |
719 | merge_rbio(pending, rbio); | |
720 | spin_unlock(&cur->bio_list_lock); | |
721 | freeit = rbio; | |
722 | ret = 1; | |
723 | goto out; | |
724 | } | |
725 | } | |
726 | ||
727 | /* no merging, put us on the tail of the plug list, | |
728 | * our rbio will be started with the currently | |
729 | * running rbio unlocks | |
730 | */ | |
731 | list_add_tail(&rbio->plug_list, &cur->plug_list); | |
732 | spin_unlock(&cur->bio_list_lock); | |
733 | ret = 1; | |
734 | goto out; | |
735 | } | |
736 | } | |
4ae10b3a | 737 | lockit: |
dec95574 | 738 | refcount_inc(&rbio->refs); |
53b381b3 DW |
739 | list_add(&rbio->hash_list, &h->hash_list); |
740 | out: | |
741 | spin_unlock_irqrestore(&h->lock, flags); | |
4ae10b3a CM |
742 | if (cache_drop) |
743 | remove_rbio_from_cache(cache_drop); | |
53b381b3 DW |
744 | if (freeit) |
745 | __free_raid_bio(freeit); | |
746 | return ret; | |
747 | } | |
748 | ||
749 | /* | |
750 | * called as rmw or parity rebuild is completed. If the plug list has more | |
751 | * rbios waiting for this stripe, the next one on the list will be started | |
752 | */ | |
753 | static noinline void unlock_stripe(struct btrfs_raid_bio *rbio) | |
754 | { | |
755 | int bucket; | |
756 | struct btrfs_stripe_hash *h; | |
757 | unsigned long flags; | |
4ae10b3a | 758 | int keep_cache = 0; |
53b381b3 DW |
759 | |
760 | bucket = rbio_bucket(rbio); | |
761 | h = rbio->fs_info->stripe_hash_table->table + bucket; | |
762 | ||
4ae10b3a CM |
763 | if (list_empty(&rbio->plug_list)) |
764 | cache_rbio(rbio); | |
765 | ||
53b381b3 DW |
766 | spin_lock_irqsave(&h->lock, flags); |
767 | spin_lock(&rbio->bio_list_lock); | |
768 | ||
769 | if (!list_empty(&rbio->hash_list)) { | |
4ae10b3a CM |
770 | /* |
771 | * if we're still cached and there is no other IO | |
772 | * to perform, just leave this rbio here for others | |
773 | * to steal from later | |
774 | */ | |
775 | if (list_empty(&rbio->plug_list) && | |
776 | test_bit(RBIO_CACHE_BIT, &rbio->flags)) { | |
777 | keep_cache = 1; | |
778 | clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); | |
779 | BUG_ON(!bio_list_empty(&rbio->bio_list)); | |
780 | goto done; | |
781 | } | |
53b381b3 DW |
782 | |
783 | list_del_init(&rbio->hash_list); | |
dec95574 | 784 | refcount_dec(&rbio->refs); |
53b381b3 DW |
785 | |
786 | /* | |
787 | * we use the plug list to hold all the rbios | |
788 | * waiting for the chance to lock this stripe. | |
789 | * hand the lock over to one of them. | |
790 | */ | |
791 | if (!list_empty(&rbio->plug_list)) { | |
792 | struct btrfs_raid_bio *next; | |
793 | struct list_head *head = rbio->plug_list.next; | |
794 | ||
795 | next = list_entry(head, struct btrfs_raid_bio, | |
796 | plug_list); | |
797 | ||
798 | list_del_init(&rbio->plug_list); | |
799 | ||
800 | list_add(&next->hash_list, &h->hash_list); | |
dec95574 | 801 | refcount_inc(&next->refs); |
53b381b3 DW |
802 | spin_unlock(&rbio->bio_list_lock); |
803 | spin_unlock_irqrestore(&h->lock, flags); | |
804 | ||
1b94b556 | 805 | if (next->operation == BTRFS_RBIO_READ_REBUILD) |
53b381b3 | 806 | async_read_rebuild(next); |
b4ee1782 OS |
807 | else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) { |
808 | steal_rbio(rbio, next); | |
809 | async_read_rebuild(next); | |
810 | } else if (next->operation == BTRFS_RBIO_WRITE) { | |
4ae10b3a | 811 | steal_rbio(rbio, next); |
53b381b3 | 812 | async_rmw_stripe(next); |
5a6ac9ea MX |
813 | } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) { |
814 | steal_rbio(rbio, next); | |
815 | async_scrub_parity(next); | |
4ae10b3a | 816 | } |
53b381b3 DW |
817 | |
818 | goto done_nolock; | |
33a9eca7 DS |
819 | /* |
820 | * The barrier for this waitqueue_active is not needed, | |
821 | * we're protected by h->lock and can't miss a wakeup. | |
822 | */ | |
823 | } else if (waitqueue_active(&h->wait)) { | |
53b381b3 DW |
824 | spin_unlock(&rbio->bio_list_lock); |
825 | spin_unlock_irqrestore(&h->lock, flags); | |
826 | wake_up(&h->wait); | |
827 | goto done_nolock; | |
828 | } | |
829 | } | |
4ae10b3a | 830 | done: |
53b381b3 DW |
831 | spin_unlock(&rbio->bio_list_lock); |
832 | spin_unlock_irqrestore(&h->lock, flags); | |
833 | ||
834 | done_nolock: | |
4ae10b3a CM |
835 | if (!keep_cache) |
836 | remove_rbio_from_cache(rbio); | |
53b381b3 DW |
837 | } |
838 | ||
839 | static void __free_raid_bio(struct btrfs_raid_bio *rbio) | |
840 | { | |
841 | int i; | |
842 | ||
dec95574 | 843 | if (!refcount_dec_and_test(&rbio->refs)) |
53b381b3 DW |
844 | return; |
845 | ||
4ae10b3a | 846 | WARN_ON(!list_empty(&rbio->stripe_cache)); |
53b381b3 DW |
847 | WARN_ON(!list_empty(&rbio->hash_list)); |
848 | WARN_ON(!bio_list_empty(&rbio->bio_list)); | |
849 | ||
850 | for (i = 0; i < rbio->nr_pages; i++) { | |
851 | if (rbio->stripe_pages[i]) { | |
852 | __free_page(rbio->stripe_pages[i]); | |
853 | rbio->stripe_pages[i] = NULL; | |
854 | } | |
855 | } | |
af8e2d1d | 856 | |
6e9606d2 | 857 | btrfs_put_bbio(rbio->bbio); |
53b381b3 DW |
858 | kfree(rbio); |
859 | } | |
860 | ||
f49732ba | 861 | static void rbio_endio_bio_list(struct bio *cur, blk_status_t err) |
53b381b3 | 862 | { |
f49732ba LB |
863 | struct bio *next; |
864 | ||
865 | while (cur) { | |
866 | next = cur->bi_next; | |
867 | cur->bi_next = NULL; | |
868 | cur->bi_status = err; | |
869 | bio_endio(cur); | |
870 | cur = next; | |
871 | } | |
53b381b3 DW |
872 | } |
873 | ||
874 | /* | |
875 | * this frees the rbio and runs through all the bios in the | |
876 | * bio_list and calls end_io on them | |
877 | */ | |
4e4cbee9 | 878 | static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err) |
53b381b3 DW |
879 | { |
880 | struct bio *cur = bio_list_get(&rbio->bio_list); | |
f49732ba | 881 | struct bio *extra; |
4245215d MX |
882 | |
883 | if (rbio->generic_bio_cnt) | |
884 | btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt); | |
885 | ||
f49732ba LB |
886 | /* |
887 | * At this moment, rbio->bio_list is empty, however since rbio does not | |
888 | * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the | |
889 | * hash list, rbio may be merged with others so that rbio->bio_list | |
890 | * becomes non-empty. | |
891 | * Once unlock_stripe() is done, rbio->bio_list will not be updated any | |
892 | * more and we can call bio_endio() on all queued bios. | |
893 | */ | |
894 | unlock_stripe(rbio); | |
895 | extra = bio_list_get(&rbio->bio_list); | |
896 | __free_raid_bio(rbio); | |
53b381b3 | 897 | |
f49732ba LB |
898 | rbio_endio_bio_list(cur, err); |
899 | if (extra) | |
900 | rbio_endio_bio_list(extra, err); | |
53b381b3 DW |
901 | } |
902 | ||
903 | /* | |
904 | * end io function used by finish_rmw. When we finally | |
905 | * get here, we've written a full stripe | |
906 | */ | |
4246a0b6 | 907 | static void raid_write_end_io(struct bio *bio) |
53b381b3 DW |
908 | { |
909 | struct btrfs_raid_bio *rbio = bio->bi_private; | |
4e4cbee9 | 910 | blk_status_t err = bio->bi_status; |
a6111d11 | 911 | int max_errors; |
53b381b3 DW |
912 | |
913 | if (err) | |
914 | fail_bio_stripe(rbio, bio); | |
915 | ||
916 | bio_put(bio); | |
917 | ||
b89e1b01 | 918 | if (!atomic_dec_and_test(&rbio->stripes_pending)) |
53b381b3 DW |
919 | return; |
920 | ||
58efbc9f | 921 | err = BLK_STS_OK; |
53b381b3 DW |
922 | |
923 | /* OK, we have read all the stripes we need to. */ | |
a6111d11 ZL |
924 | max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ? |
925 | 0 : rbio->bbio->max_errors; | |
926 | if (atomic_read(&rbio->error) > max_errors) | |
4e4cbee9 | 927 | err = BLK_STS_IOERR; |
53b381b3 | 928 | |
4246a0b6 | 929 | rbio_orig_end_io(rbio, err); |
53b381b3 DW |
930 | } |
931 | ||
932 | /* | |
933 | * the read/modify/write code wants to use the original bio for | |
934 | * any pages it included, and then use the rbio for everything | |
935 | * else. This function decides if a given index (stripe number) | |
936 | * and page number in that stripe fall inside the original bio | |
937 | * or the rbio. | |
938 | * | |
939 | * if you set bio_list_only, you'll get a NULL back for any ranges | |
940 | * that are outside the bio_list | |
941 | * | |
942 | * This doesn't take any refs on anything, you get a bare page pointer | |
943 | * and the caller must bump refs as required. | |
944 | * | |
945 | * You must call index_rbio_pages once before you can trust | |
946 | * the answers from this function. | |
947 | */ | |
948 | static struct page *page_in_rbio(struct btrfs_raid_bio *rbio, | |
949 | int index, int pagenr, int bio_list_only) | |
950 | { | |
951 | int chunk_page; | |
952 | struct page *p = NULL; | |
953 | ||
954 | chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr; | |
955 | ||
956 | spin_lock_irq(&rbio->bio_list_lock); | |
957 | p = rbio->bio_pages[chunk_page]; | |
958 | spin_unlock_irq(&rbio->bio_list_lock); | |
959 | ||
960 | if (p || bio_list_only) | |
961 | return p; | |
962 | ||
963 | return rbio->stripe_pages[chunk_page]; | |
964 | } | |
965 | ||
966 | /* | |
967 | * number of pages we need for the entire stripe across all the | |
968 | * drives | |
969 | */ | |
970 | static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes) | |
971 | { | |
09cbfeaf | 972 | return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes; |
53b381b3 DW |
973 | } |
974 | ||
975 | /* | |
976 | * allocation and initial setup for the btrfs_raid_bio. Not | |
977 | * this does not allocate any pages for rbio->pages. | |
978 | */ | |
2ff7e61e JM |
979 | static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info, |
980 | struct btrfs_bio *bbio, | |
981 | u64 stripe_len) | |
53b381b3 DW |
982 | { |
983 | struct btrfs_raid_bio *rbio; | |
984 | int nr_data = 0; | |
2c8cdd6e MX |
985 | int real_stripes = bbio->num_stripes - bbio->num_tgtdevs; |
986 | int num_pages = rbio_nr_pages(stripe_len, real_stripes); | |
5a6ac9ea | 987 | int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE); |
53b381b3 DW |
988 | void *p; |
989 | ||
5a6ac9ea | 990 | rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 + |
bfca9a6d ZL |
991 | DIV_ROUND_UP(stripe_npages, BITS_PER_LONG) * |
992 | sizeof(long), GFP_NOFS); | |
af8e2d1d | 993 | if (!rbio) |
53b381b3 | 994 | return ERR_PTR(-ENOMEM); |
53b381b3 DW |
995 | |
996 | bio_list_init(&rbio->bio_list); | |
997 | INIT_LIST_HEAD(&rbio->plug_list); | |
998 | spin_lock_init(&rbio->bio_list_lock); | |
4ae10b3a | 999 | INIT_LIST_HEAD(&rbio->stripe_cache); |
53b381b3 DW |
1000 | INIT_LIST_HEAD(&rbio->hash_list); |
1001 | rbio->bbio = bbio; | |
2ff7e61e | 1002 | rbio->fs_info = fs_info; |
53b381b3 DW |
1003 | rbio->stripe_len = stripe_len; |
1004 | rbio->nr_pages = num_pages; | |
2c8cdd6e | 1005 | rbio->real_stripes = real_stripes; |
5a6ac9ea | 1006 | rbio->stripe_npages = stripe_npages; |
53b381b3 DW |
1007 | rbio->faila = -1; |
1008 | rbio->failb = -1; | |
dec95574 | 1009 | refcount_set(&rbio->refs, 1); |
b89e1b01 MX |
1010 | atomic_set(&rbio->error, 0); |
1011 | atomic_set(&rbio->stripes_pending, 0); | |
53b381b3 DW |
1012 | |
1013 | /* | |
1014 | * the stripe_pages and bio_pages array point to the extra | |
1015 | * memory we allocated past the end of the rbio | |
1016 | */ | |
1017 | p = rbio + 1; | |
1018 | rbio->stripe_pages = p; | |
1019 | rbio->bio_pages = p + sizeof(struct page *) * num_pages; | |
5a6ac9ea | 1020 | rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2; |
53b381b3 | 1021 | |
10f11900 ZL |
1022 | if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5) |
1023 | nr_data = real_stripes - 1; | |
1024 | else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) | |
2c8cdd6e | 1025 | nr_data = real_stripes - 2; |
53b381b3 | 1026 | else |
10f11900 | 1027 | BUG(); |
53b381b3 DW |
1028 | |
1029 | rbio->nr_data = nr_data; | |
1030 | return rbio; | |
1031 | } | |
1032 | ||
1033 | /* allocate pages for all the stripes in the bio, including parity */ | |
1034 | static int alloc_rbio_pages(struct btrfs_raid_bio *rbio) | |
1035 | { | |
1036 | int i; | |
1037 | struct page *page; | |
1038 | ||
1039 | for (i = 0; i < rbio->nr_pages; i++) { | |
1040 | if (rbio->stripe_pages[i]) | |
1041 | continue; | |
1042 | page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
1043 | if (!page) | |
1044 | return -ENOMEM; | |
1045 | rbio->stripe_pages[i] = page; | |
53b381b3 DW |
1046 | } |
1047 | return 0; | |
1048 | } | |
1049 | ||
b7178a5f | 1050 | /* only allocate pages for p/q stripes */ |
53b381b3 DW |
1051 | static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio) |
1052 | { | |
1053 | int i; | |
1054 | struct page *page; | |
1055 | ||
b7178a5f | 1056 | i = rbio_stripe_page_index(rbio, rbio->nr_data, 0); |
53b381b3 DW |
1057 | |
1058 | for (; i < rbio->nr_pages; i++) { | |
1059 | if (rbio->stripe_pages[i]) | |
1060 | continue; | |
1061 | page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
1062 | if (!page) | |
1063 | return -ENOMEM; | |
1064 | rbio->stripe_pages[i] = page; | |
1065 | } | |
1066 | return 0; | |
1067 | } | |
1068 | ||
1069 | /* | |
1070 | * add a single page from a specific stripe into our list of bios for IO | |
1071 | * this will try to merge into existing bios if possible, and returns | |
1072 | * zero if all went well. | |
1073 | */ | |
48a3b636 ES |
1074 | static int rbio_add_io_page(struct btrfs_raid_bio *rbio, |
1075 | struct bio_list *bio_list, | |
1076 | struct page *page, | |
1077 | int stripe_nr, | |
1078 | unsigned long page_index, | |
1079 | unsigned long bio_max_len) | |
53b381b3 DW |
1080 | { |
1081 | struct bio *last = bio_list->tail; | |
1082 | u64 last_end = 0; | |
1083 | int ret; | |
1084 | struct bio *bio; | |
1085 | struct btrfs_bio_stripe *stripe; | |
1086 | u64 disk_start; | |
1087 | ||
1088 | stripe = &rbio->bbio->stripes[stripe_nr]; | |
09cbfeaf | 1089 | disk_start = stripe->physical + (page_index << PAGE_SHIFT); |
53b381b3 DW |
1090 | |
1091 | /* if the device is missing, just fail this stripe */ | |
1092 | if (!stripe->dev->bdev) | |
1093 | return fail_rbio_index(rbio, stripe_nr); | |
1094 | ||
1095 | /* see if we can add this page onto our existing bio */ | |
1096 | if (last) { | |
4f024f37 KO |
1097 | last_end = (u64)last->bi_iter.bi_sector << 9; |
1098 | last_end += last->bi_iter.bi_size; | |
53b381b3 DW |
1099 | |
1100 | /* | |
1101 | * we can't merge these if they are from different | |
1102 | * devices or if they are not contiguous | |
1103 | */ | |
1104 | if (last_end == disk_start && stripe->dev->bdev && | |
4e4cbee9 | 1105 | !last->bi_status && |
74d46992 CH |
1106 | last->bi_disk == stripe->dev->bdev->bd_disk && |
1107 | last->bi_partno == stripe->dev->bdev->bd_partno) { | |
09cbfeaf KS |
1108 | ret = bio_add_page(last, page, PAGE_SIZE, 0); |
1109 | if (ret == PAGE_SIZE) | |
53b381b3 DW |
1110 | return 0; |
1111 | } | |
1112 | } | |
1113 | ||
1114 | /* put a new bio on the list */ | |
c5e4c3d7 | 1115 | bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); |
4f024f37 | 1116 | bio->bi_iter.bi_size = 0; |
74d46992 | 1117 | bio_set_dev(bio, stripe->dev->bdev); |
4f024f37 | 1118 | bio->bi_iter.bi_sector = disk_start >> 9; |
53b381b3 | 1119 | |
09cbfeaf | 1120 | bio_add_page(bio, page, PAGE_SIZE, 0); |
53b381b3 DW |
1121 | bio_list_add(bio_list, bio); |
1122 | return 0; | |
1123 | } | |
1124 | ||
1125 | /* | |
1126 | * while we're doing the read/modify/write cycle, we could | |
1127 | * have errors in reading pages off the disk. This checks | |
1128 | * for errors and if we're not able to read the page it'll | |
1129 | * trigger parity reconstruction. The rmw will be finished | |
1130 | * after we've reconstructed the failed stripes | |
1131 | */ | |
1132 | static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) | |
1133 | { | |
1134 | if (rbio->faila >= 0 || rbio->failb >= 0) { | |
2c8cdd6e | 1135 | BUG_ON(rbio->faila == rbio->real_stripes - 1); |
53b381b3 DW |
1136 | __raid56_parity_recover(rbio); |
1137 | } else { | |
1138 | finish_rmw(rbio); | |
1139 | } | |
1140 | } | |
1141 | ||
53b381b3 DW |
1142 | /* |
1143 | * helper function to walk our bio list and populate the bio_pages array with | |
1144 | * the result. This seems expensive, but it is faster than constantly | |
1145 | * searching through the bio list as we setup the IO in finish_rmw or stripe | |
1146 | * reconstruction. | |
1147 | * | |
1148 | * This must be called before you trust the answers from page_in_rbio | |
1149 | */ | |
1150 | static void index_rbio_pages(struct btrfs_raid_bio *rbio) | |
1151 | { | |
1152 | struct bio *bio; | |
1153 | u64 start; | |
1154 | unsigned long stripe_offset; | |
1155 | unsigned long page_index; | |
53b381b3 DW |
1156 | |
1157 | spin_lock_irq(&rbio->bio_list_lock); | |
1158 | bio_list_for_each(bio, &rbio->bio_list) { | |
6592e58c FM |
1159 | struct bio_vec bvec; |
1160 | struct bvec_iter iter; | |
1161 | int i = 0; | |
1162 | ||
4f024f37 | 1163 | start = (u64)bio->bi_iter.bi_sector << 9; |
8e5cfb55 | 1164 | stripe_offset = start - rbio->bbio->raid_map[0]; |
09cbfeaf | 1165 | page_index = stripe_offset >> PAGE_SHIFT; |
53b381b3 | 1166 | |
6592e58c FM |
1167 | if (bio_flagged(bio, BIO_CLONED)) |
1168 | bio->bi_iter = btrfs_io_bio(bio)->iter; | |
1169 | ||
1170 | bio_for_each_segment(bvec, bio, iter) { | |
1171 | rbio->bio_pages[page_index + i] = bvec.bv_page; | |
1172 | i++; | |
1173 | } | |
53b381b3 DW |
1174 | } |
1175 | spin_unlock_irq(&rbio->bio_list_lock); | |
1176 | } | |
1177 | ||
1178 | /* | |
1179 | * this is called from one of two situations. We either | |
1180 | * have a full stripe from the higher layers, or we've read all | |
1181 | * the missing bits off disk. | |
1182 | * | |
1183 | * This will calculate the parity and then send down any | |
1184 | * changed blocks. | |
1185 | */ | |
1186 | static noinline void finish_rmw(struct btrfs_raid_bio *rbio) | |
1187 | { | |
1188 | struct btrfs_bio *bbio = rbio->bbio; | |
2c8cdd6e | 1189 | void *pointers[rbio->real_stripes]; |
53b381b3 DW |
1190 | int nr_data = rbio->nr_data; |
1191 | int stripe; | |
1192 | int pagenr; | |
1193 | int p_stripe = -1; | |
1194 | int q_stripe = -1; | |
1195 | struct bio_list bio_list; | |
1196 | struct bio *bio; | |
53b381b3 DW |
1197 | int ret; |
1198 | ||
1199 | bio_list_init(&bio_list); | |
1200 | ||
2c8cdd6e MX |
1201 | if (rbio->real_stripes - rbio->nr_data == 1) { |
1202 | p_stripe = rbio->real_stripes - 1; | |
1203 | } else if (rbio->real_stripes - rbio->nr_data == 2) { | |
1204 | p_stripe = rbio->real_stripes - 2; | |
1205 | q_stripe = rbio->real_stripes - 1; | |
53b381b3 DW |
1206 | } else { |
1207 | BUG(); | |
1208 | } | |
1209 | ||
1210 | /* at this point we either have a full stripe, | |
1211 | * or we've read the full stripe from the drive. | |
1212 | * recalculate the parity and write the new results. | |
1213 | * | |
1214 | * We're not allowed to add any new bios to the | |
1215 | * bio list here, anyone else that wants to | |
1216 | * change this stripe needs to do their own rmw. | |
1217 | */ | |
1218 | spin_lock_irq(&rbio->bio_list_lock); | |
1219 | set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); | |
1220 | spin_unlock_irq(&rbio->bio_list_lock); | |
1221 | ||
b89e1b01 | 1222 | atomic_set(&rbio->error, 0); |
53b381b3 DW |
1223 | |
1224 | /* | |
1225 | * now that we've set rmw_locked, run through the | |
1226 | * bio list one last time and map the page pointers | |
4ae10b3a CM |
1227 | * |
1228 | * We don't cache full rbios because we're assuming | |
1229 | * the higher layers are unlikely to use this area of | |
1230 | * the disk again soon. If they do use it again, | |
1231 | * hopefully they will send another full bio. | |
53b381b3 DW |
1232 | */ |
1233 | index_rbio_pages(rbio); | |
4ae10b3a CM |
1234 | if (!rbio_is_full(rbio)) |
1235 | cache_rbio_pages(rbio); | |
1236 | else | |
1237 | clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); | |
53b381b3 | 1238 | |
915e2290 | 1239 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
53b381b3 DW |
1240 | struct page *p; |
1241 | /* first collect one page from each data stripe */ | |
1242 | for (stripe = 0; stripe < nr_data; stripe++) { | |
1243 | p = page_in_rbio(rbio, stripe, pagenr, 0); | |
1244 | pointers[stripe] = kmap(p); | |
1245 | } | |
1246 | ||
1247 | /* then add the parity stripe */ | |
1248 | p = rbio_pstripe_page(rbio, pagenr); | |
1249 | SetPageUptodate(p); | |
1250 | pointers[stripe++] = kmap(p); | |
1251 | ||
1252 | if (q_stripe != -1) { | |
1253 | ||
1254 | /* | |
1255 | * raid6, add the qstripe and call the | |
1256 | * library function to fill in our p/q | |
1257 | */ | |
1258 | p = rbio_qstripe_page(rbio, pagenr); | |
1259 | SetPageUptodate(p); | |
1260 | pointers[stripe++] = kmap(p); | |
1261 | ||
2c8cdd6e | 1262 | raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, |
53b381b3 DW |
1263 | pointers); |
1264 | } else { | |
1265 | /* raid5 */ | |
1266 | memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); | |
09cbfeaf | 1267 | run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); |
53b381b3 DW |
1268 | } |
1269 | ||
1270 | ||
2c8cdd6e | 1271 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) |
53b381b3 DW |
1272 | kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); |
1273 | } | |
1274 | ||
1275 | /* | |
1276 | * time to start writing. Make bios for everything from the | |
1277 | * higher layers (the bio_list in our rbio) and our p/q. Ignore | |
1278 | * everything else. | |
1279 | */ | |
2c8cdd6e | 1280 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { |
915e2290 | 1281 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
53b381b3 DW |
1282 | struct page *page; |
1283 | if (stripe < rbio->nr_data) { | |
1284 | page = page_in_rbio(rbio, stripe, pagenr, 1); | |
1285 | if (!page) | |
1286 | continue; | |
1287 | } else { | |
1288 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
1289 | } | |
1290 | ||
1291 | ret = rbio_add_io_page(rbio, &bio_list, | |
1292 | page, stripe, pagenr, rbio->stripe_len); | |
1293 | if (ret) | |
1294 | goto cleanup; | |
1295 | } | |
1296 | } | |
1297 | ||
2c8cdd6e MX |
1298 | if (likely(!bbio->num_tgtdevs)) |
1299 | goto write_data; | |
1300 | ||
1301 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { | |
1302 | if (!bbio->tgtdev_map[stripe]) | |
1303 | continue; | |
1304 | ||
915e2290 | 1305 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
2c8cdd6e MX |
1306 | struct page *page; |
1307 | if (stripe < rbio->nr_data) { | |
1308 | page = page_in_rbio(rbio, stripe, pagenr, 1); | |
1309 | if (!page) | |
1310 | continue; | |
1311 | } else { | |
1312 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
1313 | } | |
1314 | ||
1315 | ret = rbio_add_io_page(rbio, &bio_list, page, | |
1316 | rbio->bbio->tgtdev_map[stripe], | |
1317 | pagenr, rbio->stripe_len); | |
1318 | if (ret) | |
1319 | goto cleanup; | |
1320 | } | |
1321 | } | |
1322 | ||
1323 | write_data: | |
b89e1b01 MX |
1324 | atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list)); |
1325 | BUG_ON(atomic_read(&rbio->stripes_pending) == 0); | |
53b381b3 DW |
1326 | |
1327 | while (1) { | |
1328 | bio = bio_list_pop(&bio_list); | |
1329 | if (!bio) | |
1330 | break; | |
1331 | ||
1332 | bio->bi_private = rbio; | |
1333 | bio->bi_end_io = raid_write_end_io; | |
37226b21 | 1334 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
4e49ea4a MC |
1335 | |
1336 | submit_bio(bio); | |
53b381b3 DW |
1337 | } |
1338 | return; | |
1339 | ||
1340 | cleanup: | |
58efbc9f | 1341 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
785884fc LB |
1342 | |
1343 | while ((bio = bio_list_pop(&bio_list))) | |
1344 | bio_put(bio); | |
53b381b3 DW |
1345 | } |
1346 | ||
1347 | /* | |
1348 | * helper to find the stripe number for a given bio. Used to figure out which | |
1349 | * stripe has failed. This expects the bio to correspond to a physical disk, | |
1350 | * so it looks up based on physical sector numbers. | |
1351 | */ | |
1352 | static int find_bio_stripe(struct btrfs_raid_bio *rbio, | |
1353 | struct bio *bio) | |
1354 | { | |
4f024f37 | 1355 | u64 physical = bio->bi_iter.bi_sector; |
53b381b3 DW |
1356 | u64 stripe_start; |
1357 | int i; | |
1358 | struct btrfs_bio_stripe *stripe; | |
1359 | ||
1360 | physical <<= 9; | |
1361 | ||
1362 | for (i = 0; i < rbio->bbio->num_stripes; i++) { | |
1363 | stripe = &rbio->bbio->stripes[i]; | |
1364 | stripe_start = stripe->physical; | |
1365 | if (physical >= stripe_start && | |
2c8cdd6e | 1366 | physical < stripe_start + rbio->stripe_len && |
eb47d87c | 1367 | stripe->dev->bdev && |
74d46992 CH |
1368 | bio->bi_disk == stripe->dev->bdev->bd_disk && |
1369 | bio->bi_partno == stripe->dev->bdev->bd_partno) { | |
53b381b3 DW |
1370 | return i; |
1371 | } | |
1372 | } | |
1373 | return -1; | |
1374 | } | |
1375 | ||
1376 | /* | |
1377 | * helper to find the stripe number for a given | |
1378 | * bio (before mapping). Used to figure out which stripe has | |
1379 | * failed. This looks up based on logical block numbers. | |
1380 | */ | |
1381 | static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, | |
1382 | struct bio *bio) | |
1383 | { | |
4f024f37 | 1384 | u64 logical = bio->bi_iter.bi_sector; |
53b381b3 DW |
1385 | u64 stripe_start; |
1386 | int i; | |
1387 | ||
1388 | logical <<= 9; | |
1389 | ||
1390 | for (i = 0; i < rbio->nr_data; i++) { | |
8e5cfb55 | 1391 | stripe_start = rbio->bbio->raid_map[i]; |
53b381b3 DW |
1392 | if (logical >= stripe_start && |
1393 | logical < stripe_start + rbio->stripe_len) { | |
1394 | return i; | |
1395 | } | |
1396 | } | |
1397 | return -1; | |
1398 | } | |
1399 | ||
1400 | /* | |
1401 | * returns -EIO if we had too many failures | |
1402 | */ | |
1403 | static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed) | |
1404 | { | |
1405 | unsigned long flags; | |
1406 | int ret = 0; | |
1407 | ||
1408 | spin_lock_irqsave(&rbio->bio_list_lock, flags); | |
1409 | ||
1410 | /* we already know this stripe is bad, move on */ | |
1411 | if (rbio->faila == failed || rbio->failb == failed) | |
1412 | goto out; | |
1413 | ||
1414 | if (rbio->faila == -1) { | |
1415 | /* first failure on this rbio */ | |
1416 | rbio->faila = failed; | |
b89e1b01 | 1417 | atomic_inc(&rbio->error); |
53b381b3 DW |
1418 | } else if (rbio->failb == -1) { |
1419 | /* second failure on this rbio */ | |
1420 | rbio->failb = failed; | |
b89e1b01 | 1421 | atomic_inc(&rbio->error); |
53b381b3 DW |
1422 | } else { |
1423 | ret = -EIO; | |
1424 | } | |
1425 | out: | |
1426 | spin_unlock_irqrestore(&rbio->bio_list_lock, flags); | |
1427 | ||
1428 | return ret; | |
1429 | } | |
1430 | ||
1431 | /* | |
1432 | * helper to fail a stripe based on a physical disk | |
1433 | * bio. | |
1434 | */ | |
1435 | static int fail_bio_stripe(struct btrfs_raid_bio *rbio, | |
1436 | struct bio *bio) | |
1437 | { | |
1438 | int failed = find_bio_stripe(rbio, bio); | |
1439 | ||
1440 | if (failed < 0) | |
1441 | return -EIO; | |
1442 | ||
1443 | return fail_rbio_index(rbio, failed); | |
1444 | } | |
1445 | ||
1446 | /* | |
1447 | * this sets each page in the bio uptodate. It should only be used on private | |
1448 | * rbio pages, nothing that comes in from the higher layers | |
1449 | */ | |
1450 | static void set_bio_pages_uptodate(struct bio *bio) | |
1451 | { | |
1fe3bca6 LB |
1452 | struct bio_vec *bvec; |
1453 | int i; | |
6592e58c | 1454 | |
1fe3bca6 | 1455 | ASSERT(!bio_flagged(bio, BIO_CLONED)); |
53b381b3 | 1456 | |
1fe3bca6 LB |
1457 | bio_for_each_segment_all(bvec, bio, i) |
1458 | SetPageUptodate(bvec->bv_page); | |
53b381b3 DW |
1459 | } |
1460 | ||
1461 | /* | |
1462 | * end io for the read phase of the rmw cycle. All the bios here are physical | |
1463 | * stripe bios we've read from the disk so we can recalculate the parity of the | |
1464 | * stripe. | |
1465 | * | |
1466 | * This will usually kick off finish_rmw once all the bios are read in, but it | |
1467 | * may trigger parity reconstruction if we had any errors along the way | |
1468 | */ | |
4246a0b6 | 1469 | static void raid_rmw_end_io(struct bio *bio) |
53b381b3 DW |
1470 | { |
1471 | struct btrfs_raid_bio *rbio = bio->bi_private; | |
1472 | ||
4e4cbee9 | 1473 | if (bio->bi_status) |
53b381b3 DW |
1474 | fail_bio_stripe(rbio, bio); |
1475 | else | |
1476 | set_bio_pages_uptodate(bio); | |
1477 | ||
1478 | bio_put(bio); | |
1479 | ||
b89e1b01 | 1480 | if (!atomic_dec_and_test(&rbio->stripes_pending)) |
53b381b3 DW |
1481 | return; |
1482 | ||
b89e1b01 | 1483 | if (atomic_read(&rbio->error) > rbio->bbio->max_errors) |
53b381b3 DW |
1484 | goto cleanup; |
1485 | ||
1486 | /* | |
1487 | * this will normally call finish_rmw to start our write | |
1488 | * but if there are any failed stripes we'll reconstruct | |
1489 | * from parity first | |
1490 | */ | |
1491 | validate_rbio_for_rmw(rbio); | |
1492 | return; | |
1493 | ||
1494 | cleanup: | |
1495 | ||
58efbc9f | 1496 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
53b381b3 DW |
1497 | } |
1498 | ||
1499 | static void async_rmw_stripe(struct btrfs_raid_bio *rbio) | |
1500 | { | |
0b246afa JM |
1501 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, rmw_work, NULL, NULL); |
1502 | btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); | |
53b381b3 DW |
1503 | } |
1504 | ||
1505 | static void async_read_rebuild(struct btrfs_raid_bio *rbio) | |
1506 | { | |
9e0af237 LB |
1507 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, |
1508 | read_rebuild_work, NULL, NULL); | |
53b381b3 | 1509 | |
0b246afa | 1510 | btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); |
53b381b3 DW |
1511 | } |
1512 | ||
1513 | /* | |
1514 | * the stripe must be locked by the caller. It will | |
1515 | * unlock after all the writes are done | |
1516 | */ | |
1517 | static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) | |
1518 | { | |
1519 | int bios_to_read = 0; | |
53b381b3 DW |
1520 | struct bio_list bio_list; |
1521 | int ret; | |
53b381b3 DW |
1522 | int pagenr; |
1523 | int stripe; | |
1524 | struct bio *bio; | |
1525 | ||
1526 | bio_list_init(&bio_list); | |
1527 | ||
1528 | ret = alloc_rbio_pages(rbio); | |
1529 | if (ret) | |
1530 | goto cleanup; | |
1531 | ||
1532 | index_rbio_pages(rbio); | |
1533 | ||
b89e1b01 | 1534 | atomic_set(&rbio->error, 0); |
53b381b3 DW |
1535 | /* |
1536 | * build a list of bios to read all the missing parts of this | |
1537 | * stripe | |
1538 | */ | |
1539 | for (stripe = 0; stripe < rbio->nr_data; stripe++) { | |
915e2290 | 1540 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
53b381b3 DW |
1541 | struct page *page; |
1542 | /* | |
1543 | * we want to find all the pages missing from | |
1544 | * the rbio and read them from the disk. If | |
1545 | * page_in_rbio finds a page in the bio list | |
1546 | * we don't need to read it off the stripe. | |
1547 | */ | |
1548 | page = page_in_rbio(rbio, stripe, pagenr, 1); | |
1549 | if (page) | |
1550 | continue; | |
1551 | ||
1552 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
4ae10b3a CM |
1553 | /* |
1554 | * the bio cache may have handed us an uptodate | |
1555 | * page. If so, be happy and use it | |
1556 | */ | |
1557 | if (PageUptodate(page)) | |
1558 | continue; | |
1559 | ||
53b381b3 DW |
1560 | ret = rbio_add_io_page(rbio, &bio_list, page, |
1561 | stripe, pagenr, rbio->stripe_len); | |
1562 | if (ret) | |
1563 | goto cleanup; | |
1564 | } | |
1565 | } | |
1566 | ||
1567 | bios_to_read = bio_list_size(&bio_list); | |
1568 | if (!bios_to_read) { | |
1569 | /* | |
1570 | * this can happen if others have merged with | |
1571 | * us, it means there is nothing left to read. | |
1572 | * But if there are missing devices it may not be | |
1573 | * safe to do the full stripe write yet. | |
1574 | */ | |
1575 | goto finish; | |
1576 | } | |
1577 | ||
1578 | /* | |
1579 | * the bbio may be freed once we submit the last bio. Make sure | |
1580 | * not to touch it after that | |
1581 | */ | |
b89e1b01 | 1582 | atomic_set(&rbio->stripes_pending, bios_to_read); |
53b381b3 DW |
1583 | while (1) { |
1584 | bio = bio_list_pop(&bio_list); | |
1585 | if (!bio) | |
1586 | break; | |
1587 | ||
1588 | bio->bi_private = rbio; | |
1589 | bio->bi_end_io = raid_rmw_end_io; | |
37226b21 | 1590 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
53b381b3 | 1591 | |
0b246afa | 1592 | btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); |
53b381b3 | 1593 | |
4e49ea4a | 1594 | submit_bio(bio); |
53b381b3 DW |
1595 | } |
1596 | /* the actual write will happen once the reads are done */ | |
1597 | return 0; | |
1598 | ||
1599 | cleanup: | |
58efbc9f | 1600 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
785884fc LB |
1601 | |
1602 | while ((bio = bio_list_pop(&bio_list))) | |
1603 | bio_put(bio); | |
1604 | ||
53b381b3 DW |
1605 | return -EIO; |
1606 | ||
1607 | finish: | |
1608 | validate_rbio_for_rmw(rbio); | |
1609 | return 0; | |
1610 | } | |
1611 | ||
1612 | /* | |
1613 | * if the upper layers pass in a full stripe, we thank them by only allocating | |
1614 | * enough pages to hold the parity, and sending it all down quickly. | |
1615 | */ | |
1616 | static int full_stripe_write(struct btrfs_raid_bio *rbio) | |
1617 | { | |
1618 | int ret; | |
1619 | ||
1620 | ret = alloc_rbio_parity_pages(rbio); | |
3cd846d1 MX |
1621 | if (ret) { |
1622 | __free_raid_bio(rbio); | |
53b381b3 | 1623 | return ret; |
3cd846d1 | 1624 | } |
53b381b3 DW |
1625 | |
1626 | ret = lock_stripe_add(rbio); | |
1627 | if (ret == 0) | |
1628 | finish_rmw(rbio); | |
1629 | return 0; | |
1630 | } | |
1631 | ||
1632 | /* | |
1633 | * partial stripe writes get handed over to async helpers. | |
1634 | * We're really hoping to merge a few more writes into this | |
1635 | * rbio before calculating new parity | |
1636 | */ | |
1637 | static int partial_stripe_write(struct btrfs_raid_bio *rbio) | |
1638 | { | |
1639 | int ret; | |
1640 | ||
1641 | ret = lock_stripe_add(rbio); | |
1642 | if (ret == 0) | |
1643 | async_rmw_stripe(rbio); | |
1644 | return 0; | |
1645 | } | |
1646 | ||
1647 | /* | |
1648 | * sometimes while we were reading from the drive to | |
1649 | * recalculate parity, enough new bios come into create | |
1650 | * a full stripe. So we do a check here to see if we can | |
1651 | * go directly to finish_rmw | |
1652 | */ | |
1653 | static int __raid56_parity_write(struct btrfs_raid_bio *rbio) | |
1654 | { | |
1655 | /* head off into rmw land if we don't have a full stripe */ | |
1656 | if (!rbio_is_full(rbio)) | |
1657 | return partial_stripe_write(rbio); | |
1658 | return full_stripe_write(rbio); | |
1659 | } | |
1660 | ||
6ac0f488 CM |
1661 | /* |
1662 | * We use plugging call backs to collect full stripes. | |
1663 | * Any time we get a partial stripe write while plugged | |
1664 | * we collect it into a list. When the unplug comes down, | |
1665 | * we sort the list by logical block number and merge | |
1666 | * everything we can into the same rbios | |
1667 | */ | |
1668 | struct btrfs_plug_cb { | |
1669 | struct blk_plug_cb cb; | |
1670 | struct btrfs_fs_info *info; | |
1671 | struct list_head rbio_list; | |
1672 | struct btrfs_work work; | |
1673 | }; | |
1674 | ||
1675 | /* | |
1676 | * rbios on the plug list are sorted for easier merging. | |
1677 | */ | |
1678 | static int plug_cmp(void *priv, struct list_head *a, struct list_head *b) | |
1679 | { | |
1680 | struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio, | |
1681 | plug_list); | |
1682 | struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, | |
1683 | plug_list); | |
4f024f37 KO |
1684 | u64 a_sector = ra->bio_list.head->bi_iter.bi_sector; |
1685 | u64 b_sector = rb->bio_list.head->bi_iter.bi_sector; | |
6ac0f488 CM |
1686 | |
1687 | if (a_sector < b_sector) | |
1688 | return -1; | |
1689 | if (a_sector > b_sector) | |
1690 | return 1; | |
1691 | return 0; | |
1692 | } | |
1693 | ||
1694 | static void run_plug(struct btrfs_plug_cb *plug) | |
1695 | { | |
1696 | struct btrfs_raid_bio *cur; | |
1697 | struct btrfs_raid_bio *last = NULL; | |
1698 | ||
1699 | /* | |
1700 | * sort our plug list then try to merge | |
1701 | * everything we can in hopes of creating full | |
1702 | * stripes. | |
1703 | */ | |
1704 | list_sort(NULL, &plug->rbio_list, plug_cmp); | |
1705 | while (!list_empty(&plug->rbio_list)) { | |
1706 | cur = list_entry(plug->rbio_list.next, | |
1707 | struct btrfs_raid_bio, plug_list); | |
1708 | list_del_init(&cur->plug_list); | |
1709 | ||
1710 | if (rbio_is_full(cur)) { | |
1711 | /* we have a full stripe, send it down */ | |
1712 | full_stripe_write(cur); | |
1713 | continue; | |
1714 | } | |
1715 | if (last) { | |
1716 | if (rbio_can_merge(last, cur)) { | |
1717 | merge_rbio(last, cur); | |
1718 | __free_raid_bio(cur); | |
1719 | continue; | |
1720 | ||
1721 | } | |
1722 | __raid56_parity_write(last); | |
1723 | } | |
1724 | last = cur; | |
1725 | } | |
1726 | if (last) { | |
1727 | __raid56_parity_write(last); | |
1728 | } | |
1729 | kfree(plug); | |
1730 | } | |
1731 | ||
1732 | /* | |
1733 | * if the unplug comes from schedule, we have to push the | |
1734 | * work off to a helper thread | |
1735 | */ | |
1736 | static void unplug_work(struct btrfs_work *work) | |
1737 | { | |
1738 | struct btrfs_plug_cb *plug; | |
1739 | plug = container_of(work, struct btrfs_plug_cb, work); | |
1740 | run_plug(plug); | |
1741 | } | |
1742 | ||
1743 | static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule) | |
1744 | { | |
1745 | struct btrfs_plug_cb *plug; | |
1746 | plug = container_of(cb, struct btrfs_plug_cb, cb); | |
1747 | ||
1748 | if (from_schedule) { | |
9e0af237 LB |
1749 | btrfs_init_work(&plug->work, btrfs_rmw_helper, |
1750 | unplug_work, NULL, NULL); | |
d05a33ac QW |
1751 | btrfs_queue_work(plug->info->rmw_workers, |
1752 | &plug->work); | |
6ac0f488 CM |
1753 | return; |
1754 | } | |
1755 | run_plug(plug); | |
1756 | } | |
1757 | ||
53b381b3 DW |
1758 | /* |
1759 | * our main entry point for writes from the rest of the FS. | |
1760 | */ | |
2ff7e61e | 1761 | int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio, |
8e5cfb55 | 1762 | struct btrfs_bio *bbio, u64 stripe_len) |
53b381b3 DW |
1763 | { |
1764 | struct btrfs_raid_bio *rbio; | |
6ac0f488 CM |
1765 | struct btrfs_plug_cb *plug = NULL; |
1766 | struct blk_plug_cb *cb; | |
4245215d | 1767 | int ret; |
53b381b3 | 1768 | |
2ff7e61e | 1769 | rbio = alloc_rbio(fs_info, bbio, stripe_len); |
af8e2d1d | 1770 | if (IS_ERR(rbio)) { |
6e9606d2 | 1771 | btrfs_put_bbio(bbio); |
53b381b3 | 1772 | return PTR_ERR(rbio); |
af8e2d1d | 1773 | } |
53b381b3 | 1774 | bio_list_add(&rbio->bio_list, bio); |
4f024f37 | 1775 | rbio->bio_list_bytes = bio->bi_iter.bi_size; |
1b94b556 | 1776 | rbio->operation = BTRFS_RBIO_WRITE; |
6ac0f488 | 1777 | |
0b246afa | 1778 | btrfs_bio_counter_inc_noblocked(fs_info); |
4245215d MX |
1779 | rbio->generic_bio_cnt = 1; |
1780 | ||
6ac0f488 CM |
1781 | /* |
1782 | * don't plug on full rbios, just get them out the door | |
1783 | * as quickly as we can | |
1784 | */ | |
4245215d MX |
1785 | if (rbio_is_full(rbio)) { |
1786 | ret = full_stripe_write(rbio); | |
1787 | if (ret) | |
0b246afa | 1788 | btrfs_bio_counter_dec(fs_info); |
4245215d MX |
1789 | return ret; |
1790 | } | |
6ac0f488 | 1791 | |
0b246afa | 1792 | cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug)); |
6ac0f488 CM |
1793 | if (cb) { |
1794 | plug = container_of(cb, struct btrfs_plug_cb, cb); | |
1795 | if (!plug->info) { | |
0b246afa | 1796 | plug->info = fs_info; |
6ac0f488 CM |
1797 | INIT_LIST_HEAD(&plug->rbio_list); |
1798 | } | |
1799 | list_add_tail(&rbio->plug_list, &plug->rbio_list); | |
4245215d | 1800 | ret = 0; |
6ac0f488 | 1801 | } else { |
4245215d MX |
1802 | ret = __raid56_parity_write(rbio); |
1803 | if (ret) | |
0b246afa | 1804 | btrfs_bio_counter_dec(fs_info); |
6ac0f488 | 1805 | } |
4245215d | 1806 | return ret; |
53b381b3 DW |
1807 | } |
1808 | ||
1809 | /* | |
1810 | * all parity reconstruction happens here. We've read in everything | |
1811 | * we can find from the drives and this does the heavy lifting of | |
1812 | * sorting the good from the bad. | |
1813 | */ | |
1814 | static void __raid_recover_end_io(struct btrfs_raid_bio *rbio) | |
1815 | { | |
1816 | int pagenr, stripe; | |
1817 | void **pointers; | |
1818 | int faila = -1, failb = -1; | |
53b381b3 | 1819 | struct page *page; |
58efbc9f | 1820 | blk_status_t err; |
53b381b3 DW |
1821 | int i; |
1822 | ||
31e818fe | 1823 | pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS); |
53b381b3 | 1824 | if (!pointers) { |
58efbc9f | 1825 | err = BLK_STS_RESOURCE; |
53b381b3 DW |
1826 | goto cleanup_io; |
1827 | } | |
1828 | ||
1829 | faila = rbio->faila; | |
1830 | failb = rbio->failb; | |
1831 | ||
b4ee1782 OS |
1832 | if (rbio->operation == BTRFS_RBIO_READ_REBUILD || |
1833 | rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { | |
53b381b3 DW |
1834 | spin_lock_irq(&rbio->bio_list_lock); |
1835 | set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); | |
1836 | spin_unlock_irq(&rbio->bio_list_lock); | |
1837 | } | |
1838 | ||
1839 | index_rbio_pages(rbio); | |
1840 | ||
915e2290 | 1841 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
5a6ac9ea MX |
1842 | /* |
1843 | * Now we just use bitmap to mark the horizontal stripes in | |
1844 | * which we have data when doing parity scrub. | |
1845 | */ | |
1846 | if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB && | |
1847 | !test_bit(pagenr, rbio->dbitmap)) | |
1848 | continue; | |
1849 | ||
53b381b3 DW |
1850 | /* setup our array of pointers with pages |
1851 | * from each stripe | |
1852 | */ | |
2c8cdd6e | 1853 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { |
53b381b3 DW |
1854 | /* |
1855 | * if we're rebuilding a read, we have to use | |
1856 | * pages from the bio list | |
1857 | */ | |
b4ee1782 OS |
1858 | if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || |
1859 | rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && | |
53b381b3 DW |
1860 | (stripe == faila || stripe == failb)) { |
1861 | page = page_in_rbio(rbio, stripe, pagenr, 0); | |
1862 | } else { | |
1863 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
1864 | } | |
1865 | pointers[stripe] = kmap(page); | |
1866 | } | |
1867 | ||
1868 | /* all raid6 handling here */ | |
10f11900 | 1869 | if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) { |
53b381b3 DW |
1870 | /* |
1871 | * single failure, rebuild from parity raid5 | |
1872 | * style | |
1873 | */ | |
1874 | if (failb < 0) { | |
1875 | if (faila == rbio->nr_data) { | |
1876 | /* | |
1877 | * Just the P stripe has failed, without | |
1878 | * a bad data or Q stripe. | |
1879 | * TODO, we should redo the xor here. | |
1880 | */ | |
58efbc9f | 1881 | err = BLK_STS_IOERR; |
53b381b3 DW |
1882 | goto cleanup; |
1883 | } | |
1884 | /* | |
1885 | * a single failure in raid6 is rebuilt | |
1886 | * in the pstripe code below | |
1887 | */ | |
1888 | goto pstripe; | |
1889 | } | |
1890 | ||
1891 | /* make sure our ps and qs are in order */ | |
1892 | if (faila > failb) { | |
1893 | int tmp = failb; | |
1894 | failb = faila; | |
1895 | faila = tmp; | |
1896 | } | |
1897 | ||
1898 | /* if the q stripe is failed, do a pstripe reconstruction | |
1899 | * from the xors. | |
1900 | * If both the q stripe and the P stripe are failed, we're | |
1901 | * here due to a crc mismatch and we can't give them the | |
1902 | * data they want | |
1903 | */ | |
8e5cfb55 ZL |
1904 | if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) { |
1905 | if (rbio->bbio->raid_map[faila] == | |
1906 | RAID5_P_STRIPE) { | |
58efbc9f | 1907 | err = BLK_STS_IOERR; |
53b381b3 DW |
1908 | goto cleanup; |
1909 | } | |
1910 | /* | |
1911 | * otherwise we have one bad data stripe and | |
1912 | * a good P stripe. raid5! | |
1913 | */ | |
1914 | goto pstripe; | |
1915 | } | |
1916 | ||
8e5cfb55 | 1917 | if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) { |
2c8cdd6e | 1918 | raid6_datap_recov(rbio->real_stripes, |
53b381b3 DW |
1919 | PAGE_SIZE, faila, pointers); |
1920 | } else { | |
2c8cdd6e | 1921 | raid6_2data_recov(rbio->real_stripes, |
53b381b3 DW |
1922 | PAGE_SIZE, faila, failb, |
1923 | pointers); | |
1924 | } | |
1925 | } else { | |
1926 | void *p; | |
1927 | ||
1928 | /* rebuild from P stripe here (raid5 or raid6) */ | |
1929 | BUG_ON(failb != -1); | |
1930 | pstripe: | |
1931 | /* Copy parity block into failed block to start with */ | |
1932 | memcpy(pointers[faila], | |
1933 | pointers[rbio->nr_data], | |
09cbfeaf | 1934 | PAGE_SIZE); |
53b381b3 DW |
1935 | |
1936 | /* rearrange the pointer array */ | |
1937 | p = pointers[faila]; | |
1938 | for (stripe = faila; stripe < rbio->nr_data - 1; stripe++) | |
1939 | pointers[stripe] = pointers[stripe + 1]; | |
1940 | pointers[rbio->nr_data - 1] = p; | |
1941 | ||
1942 | /* xor in the rest */ | |
09cbfeaf | 1943 | run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE); |
53b381b3 DW |
1944 | } |
1945 | /* if we're doing this rebuild as part of an rmw, go through | |
1946 | * and set all of our private rbio pages in the | |
1947 | * failed stripes as uptodate. This way finish_rmw will | |
1948 | * know they can be trusted. If this was a read reconstruction, | |
1949 | * other endio functions will fiddle the uptodate bits | |
1950 | */ | |
1b94b556 | 1951 | if (rbio->operation == BTRFS_RBIO_WRITE) { |
915e2290 | 1952 | for (i = 0; i < rbio->stripe_npages; i++) { |
53b381b3 DW |
1953 | if (faila != -1) { |
1954 | page = rbio_stripe_page(rbio, faila, i); | |
1955 | SetPageUptodate(page); | |
1956 | } | |
1957 | if (failb != -1) { | |
1958 | page = rbio_stripe_page(rbio, failb, i); | |
1959 | SetPageUptodate(page); | |
1960 | } | |
1961 | } | |
1962 | } | |
2c8cdd6e | 1963 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { |
53b381b3 DW |
1964 | /* |
1965 | * if we're rebuilding a read, we have to use | |
1966 | * pages from the bio list | |
1967 | */ | |
b4ee1782 OS |
1968 | if ((rbio->operation == BTRFS_RBIO_READ_REBUILD || |
1969 | rbio->operation == BTRFS_RBIO_REBUILD_MISSING) && | |
53b381b3 DW |
1970 | (stripe == faila || stripe == failb)) { |
1971 | page = page_in_rbio(rbio, stripe, pagenr, 0); | |
1972 | } else { | |
1973 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
1974 | } | |
1975 | kunmap(page); | |
1976 | } | |
1977 | } | |
1978 | ||
58efbc9f | 1979 | err = BLK_STS_OK; |
53b381b3 DW |
1980 | cleanup: |
1981 | kfree(pointers); | |
1982 | ||
1983 | cleanup_io: | |
1b94b556 | 1984 | if (rbio->operation == BTRFS_RBIO_READ_REBUILD) { |
58efbc9f | 1985 | if (err == BLK_STS_OK) |
4ae10b3a CM |
1986 | cache_rbio_pages(rbio); |
1987 | else | |
1988 | clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); | |
1989 | ||
22365979 | 1990 | rbio_orig_end_io(rbio, err); |
b4ee1782 | 1991 | } else if (rbio->operation == BTRFS_RBIO_REBUILD_MISSING) { |
4246a0b6 | 1992 | rbio_orig_end_io(rbio, err); |
58efbc9f | 1993 | } else if (err == BLK_STS_OK) { |
53b381b3 DW |
1994 | rbio->faila = -1; |
1995 | rbio->failb = -1; | |
5a6ac9ea MX |
1996 | |
1997 | if (rbio->operation == BTRFS_RBIO_WRITE) | |
1998 | finish_rmw(rbio); | |
1999 | else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) | |
2000 | finish_parity_scrub(rbio, 0); | |
2001 | else | |
2002 | BUG(); | |
53b381b3 | 2003 | } else { |
4246a0b6 | 2004 | rbio_orig_end_io(rbio, err); |
53b381b3 DW |
2005 | } |
2006 | } | |
2007 | ||
2008 | /* | |
2009 | * This is called only for stripes we've read from disk to | |
2010 | * reconstruct the parity. | |
2011 | */ | |
4246a0b6 | 2012 | static void raid_recover_end_io(struct bio *bio) |
53b381b3 DW |
2013 | { |
2014 | struct btrfs_raid_bio *rbio = bio->bi_private; | |
2015 | ||
2016 | /* | |
2017 | * we only read stripe pages off the disk, set them | |
2018 | * up to date if there were no errors | |
2019 | */ | |
4e4cbee9 | 2020 | if (bio->bi_status) |
53b381b3 DW |
2021 | fail_bio_stripe(rbio, bio); |
2022 | else | |
2023 | set_bio_pages_uptodate(bio); | |
2024 | bio_put(bio); | |
2025 | ||
b89e1b01 | 2026 | if (!atomic_dec_and_test(&rbio->stripes_pending)) |
53b381b3 DW |
2027 | return; |
2028 | ||
b89e1b01 | 2029 | if (atomic_read(&rbio->error) > rbio->bbio->max_errors) |
58efbc9f | 2030 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
53b381b3 DW |
2031 | else |
2032 | __raid_recover_end_io(rbio); | |
2033 | } | |
2034 | ||
2035 | /* | |
2036 | * reads everything we need off the disk to reconstruct | |
2037 | * the parity. endio handlers trigger final reconstruction | |
2038 | * when the IO is done. | |
2039 | * | |
2040 | * This is used both for reads from the higher layers and for | |
2041 | * parity construction required to finish a rmw cycle. | |
2042 | */ | |
2043 | static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) | |
2044 | { | |
2045 | int bios_to_read = 0; | |
53b381b3 DW |
2046 | struct bio_list bio_list; |
2047 | int ret; | |
53b381b3 DW |
2048 | int pagenr; |
2049 | int stripe; | |
2050 | struct bio *bio; | |
2051 | ||
2052 | bio_list_init(&bio_list); | |
2053 | ||
2054 | ret = alloc_rbio_pages(rbio); | |
2055 | if (ret) | |
2056 | goto cleanup; | |
2057 | ||
b89e1b01 | 2058 | atomic_set(&rbio->error, 0); |
53b381b3 DW |
2059 | |
2060 | /* | |
4ae10b3a CM |
2061 | * read everything that hasn't failed. Thanks to the |
2062 | * stripe cache, it is possible that some or all of these | |
2063 | * pages are going to be uptodate. | |
53b381b3 | 2064 | */ |
2c8cdd6e | 2065 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { |
5588383e | 2066 | if (rbio->faila == stripe || rbio->failb == stripe) { |
b89e1b01 | 2067 | atomic_inc(&rbio->error); |
53b381b3 | 2068 | continue; |
5588383e | 2069 | } |
53b381b3 | 2070 | |
915e2290 | 2071 | for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) { |
53b381b3 DW |
2072 | struct page *p; |
2073 | ||
2074 | /* | |
2075 | * the rmw code may have already read this | |
2076 | * page in | |
2077 | */ | |
2078 | p = rbio_stripe_page(rbio, stripe, pagenr); | |
2079 | if (PageUptodate(p)) | |
2080 | continue; | |
2081 | ||
2082 | ret = rbio_add_io_page(rbio, &bio_list, | |
2083 | rbio_stripe_page(rbio, stripe, pagenr), | |
2084 | stripe, pagenr, rbio->stripe_len); | |
2085 | if (ret < 0) | |
2086 | goto cleanup; | |
2087 | } | |
2088 | } | |
2089 | ||
2090 | bios_to_read = bio_list_size(&bio_list); | |
2091 | if (!bios_to_read) { | |
2092 | /* | |
2093 | * we might have no bios to read just because the pages | |
2094 | * were up to date, or we might have no bios to read because | |
2095 | * the devices were gone. | |
2096 | */ | |
b89e1b01 | 2097 | if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) { |
53b381b3 DW |
2098 | __raid_recover_end_io(rbio); |
2099 | goto out; | |
2100 | } else { | |
2101 | goto cleanup; | |
2102 | } | |
2103 | } | |
2104 | ||
2105 | /* | |
2106 | * the bbio may be freed once we submit the last bio. Make sure | |
2107 | * not to touch it after that | |
2108 | */ | |
b89e1b01 | 2109 | atomic_set(&rbio->stripes_pending, bios_to_read); |
53b381b3 DW |
2110 | while (1) { |
2111 | bio = bio_list_pop(&bio_list); | |
2112 | if (!bio) | |
2113 | break; | |
2114 | ||
2115 | bio->bi_private = rbio; | |
2116 | bio->bi_end_io = raid_recover_end_io; | |
37226b21 | 2117 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
53b381b3 | 2118 | |
0b246afa | 2119 | btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); |
53b381b3 | 2120 | |
4e49ea4a | 2121 | submit_bio(bio); |
53b381b3 DW |
2122 | } |
2123 | out: | |
2124 | return 0; | |
2125 | ||
2126 | cleanup: | |
b4ee1782 OS |
2127 | if (rbio->operation == BTRFS_RBIO_READ_REBUILD || |
2128 | rbio->operation == BTRFS_RBIO_REBUILD_MISSING) | |
58efbc9f | 2129 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
785884fc LB |
2130 | |
2131 | while ((bio = bio_list_pop(&bio_list))) | |
2132 | bio_put(bio); | |
2133 | ||
53b381b3 DW |
2134 | return -EIO; |
2135 | } | |
2136 | ||
2137 | /* | |
2138 | * the main entry point for reads from the higher layers. This | |
2139 | * is really only called when the normal read path had a failure, | |
2140 | * so we assume the bio they send down corresponds to a failed part | |
2141 | * of the drive. | |
2142 | */ | |
2ff7e61e | 2143 | int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio, |
8e5cfb55 ZL |
2144 | struct btrfs_bio *bbio, u64 stripe_len, |
2145 | int mirror_num, int generic_io) | |
53b381b3 DW |
2146 | { |
2147 | struct btrfs_raid_bio *rbio; | |
2148 | int ret; | |
2149 | ||
abad60c6 LB |
2150 | if (generic_io) { |
2151 | ASSERT(bbio->mirror_num == mirror_num); | |
2152 | btrfs_io_bio(bio)->mirror_num = mirror_num; | |
2153 | } | |
2154 | ||
2ff7e61e | 2155 | rbio = alloc_rbio(fs_info, bbio, stripe_len); |
af8e2d1d | 2156 | if (IS_ERR(rbio)) { |
6e9606d2 ZL |
2157 | if (generic_io) |
2158 | btrfs_put_bbio(bbio); | |
53b381b3 | 2159 | return PTR_ERR(rbio); |
af8e2d1d | 2160 | } |
53b381b3 | 2161 | |
1b94b556 | 2162 | rbio->operation = BTRFS_RBIO_READ_REBUILD; |
53b381b3 | 2163 | bio_list_add(&rbio->bio_list, bio); |
4f024f37 | 2164 | rbio->bio_list_bytes = bio->bi_iter.bi_size; |
53b381b3 DW |
2165 | |
2166 | rbio->faila = find_logical_bio_stripe(rbio, bio); | |
2167 | if (rbio->faila == -1) { | |
0b246afa | 2168 | btrfs_warn(fs_info, |
e46a28ca LB |
2169 | "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)", |
2170 | __func__, (u64)bio->bi_iter.bi_sector << 9, | |
2171 | (u64)bio->bi_iter.bi_size, bbio->map_type); | |
6e9606d2 ZL |
2172 | if (generic_io) |
2173 | btrfs_put_bbio(bbio); | |
53b381b3 DW |
2174 | kfree(rbio); |
2175 | return -EIO; | |
2176 | } | |
2177 | ||
4245215d | 2178 | if (generic_io) { |
0b246afa | 2179 | btrfs_bio_counter_inc_noblocked(fs_info); |
4245215d MX |
2180 | rbio->generic_bio_cnt = 1; |
2181 | } else { | |
6e9606d2 | 2182 | btrfs_get_bbio(bbio); |
4245215d MX |
2183 | } |
2184 | ||
53b381b3 | 2185 | /* |
cc41f4fe LB |
2186 | * Loop retry: |
2187 | * for 'mirror == 2', reconstruct from all other stripes. | |
2188 | * for 'mirror_num > 2', select a stripe to fail on every retry. | |
53b381b3 | 2189 | */ |
cc41f4fe LB |
2190 | if (mirror_num > 2) { |
2191 | /* | |
2192 | * 'mirror == 3' is to fail the p stripe and | |
2193 | * reconstruct from the q stripe. 'mirror > 3' is to | |
2194 | * fail a data stripe and reconstruct from p+q stripe. | |
2195 | */ | |
2196 | rbio->failb = rbio->real_stripes - (mirror_num - 1); | |
2197 | ASSERT(rbio->failb > 0); | |
2198 | if (rbio->failb <= rbio->faila) | |
2199 | rbio->failb--; | |
2200 | } | |
53b381b3 DW |
2201 | |
2202 | ret = lock_stripe_add(rbio); | |
2203 | ||
2204 | /* | |
2205 | * __raid56_parity_recover will end the bio with | |
2206 | * any errors it hits. We don't want to return | |
2207 | * its error value up the stack because our caller | |
2208 | * will end up calling bio_endio with any nonzero | |
2209 | * return | |
2210 | */ | |
2211 | if (ret == 0) | |
2212 | __raid56_parity_recover(rbio); | |
2213 | /* | |
2214 | * our rbio has been added to the list of | |
2215 | * rbios that will be handled after the | |
2216 | * currently lock owner is done | |
2217 | */ | |
2218 | return 0; | |
2219 | ||
2220 | } | |
2221 | ||
2222 | static void rmw_work(struct btrfs_work *work) | |
2223 | { | |
2224 | struct btrfs_raid_bio *rbio; | |
2225 | ||
2226 | rbio = container_of(work, struct btrfs_raid_bio, work); | |
2227 | raid56_rmw_stripe(rbio); | |
2228 | } | |
2229 | ||
2230 | static void read_rebuild_work(struct btrfs_work *work) | |
2231 | { | |
2232 | struct btrfs_raid_bio *rbio; | |
2233 | ||
2234 | rbio = container_of(work, struct btrfs_raid_bio, work); | |
2235 | __raid56_parity_recover(rbio); | |
2236 | } | |
5a6ac9ea MX |
2237 | |
2238 | /* | |
2239 | * The following code is used to scrub/replace the parity stripe | |
2240 | * | |
ae6529c3 QW |
2241 | * Caller must have already increased bio_counter for getting @bbio. |
2242 | * | |
5a6ac9ea MX |
2243 | * Note: We need make sure all the pages that add into the scrub/replace |
2244 | * raid bio are correct and not be changed during the scrub/replace. That | |
2245 | * is those pages just hold metadata or file data with checksum. | |
2246 | */ | |
2247 | ||
2248 | struct btrfs_raid_bio * | |
2ff7e61e | 2249 | raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, |
8e5cfb55 ZL |
2250 | struct btrfs_bio *bbio, u64 stripe_len, |
2251 | struct btrfs_device *scrub_dev, | |
5a6ac9ea MX |
2252 | unsigned long *dbitmap, int stripe_nsectors) |
2253 | { | |
2254 | struct btrfs_raid_bio *rbio; | |
2255 | int i; | |
2256 | ||
2ff7e61e | 2257 | rbio = alloc_rbio(fs_info, bbio, stripe_len); |
5a6ac9ea MX |
2258 | if (IS_ERR(rbio)) |
2259 | return NULL; | |
2260 | bio_list_add(&rbio->bio_list, bio); | |
2261 | /* | |
2262 | * This is a special bio which is used to hold the completion handler | |
2263 | * and make the scrub rbio is similar to the other types | |
2264 | */ | |
2265 | ASSERT(!bio->bi_iter.bi_size); | |
2266 | rbio->operation = BTRFS_RBIO_PARITY_SCRUB; | |
2267 | ||
9cd3a7eb LB |
2268 | /* |
2269 | * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted | |
2270 | * to the end position, so this search can start from the first parity | |
2271 | * stripe. | |
2272 | */ | |
2273 | for (i = rbio->nr_data; i < rbio->real_stripes; i++) { | |
5a6ac9ea MX |
2274 | if (bbio->stripes[i].dev == scrub_dev) { |
2275 | rbio->scrubp = i; | |
2276 | break; | |
2277 | } | |
2278 | } | |
9cd3a7eb | 2279 | ASSERT(i < rbio->real_stripes); |
5a6ac9ea MX |
2280 | |
2281 | /* Now we just support the sectorsize equals to page size */ | |
0b246afa | 2282 | ASSERT(fs_info->sectorsize == PAGE_SIZE); |
5a6ac9ea MX |
2283 | ASSERT(rbio->stripe_npages == stripe_nsectors); |
2284 | bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors); | |
2285 | ||
ae6529c3 QW |
2286 | /* |
2287 | * We have already increased bio_counter when getting bbio, record it | |
2288 | * so we can free it at rbio_orig_end_io(). | |
2289 | */ | |
2290 | rbio->generic_bio_cnt = 1; | |
2291 | ||
5a6ac9ea MX |
2292 | return rbio; |
2293 | } | |
2294 | ||
b4ee1782 OS |
2295 | /* Used for both parity scrub and missing. */ |
2296 | void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page, | |
2297 | u64 logical) | |
5a6ac9ea MX |
2298 | { |
2299 | int stripe_offset; | |
2300 | int index; | |
2301 | ||
8e5cfb55 ZL |
2302 | ASSERT(logical >= rbio->bbio->raid_map[0]); |
2303 | ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] + | |
5a6ac9ea | 2304 | rbio->stripe_len * rbio->nr_data); |
8e5cfb55 | 2305 | stripe_offset = (int)(logical - rbio->bbio->raid_map[0]); |
09cbfeaf | 2306 | index = stripe_offset >> PAGE_SHIFT; |
5a6ac9ea MX |
2307 | rbio->bio_pages[index] = page; |
2308 | } | |
2309 | ||
2310 | /* | |
2311 | * We just scrub the parity that we have correct data on the same horizontal, | |
2312 | * so we needn't allocate all pages for all the stripes. | |
2313 | */ | |
2314 | static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio) | |
2315 | { | |
2316 | int i; | |
2317 | int bit; | |
2318 | int index; | |
2319 | struct page *page; | |
2320 | ||
2321 | for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) { | |
2c8cdd6e | 2322 | for (i = 0; i < rbio->real_stripes; i++) { |
5a6ac9ea MX |
2323 | index = i * rbio->stripe_npages + bit; |
2324 | if (rbio->stripe_pages[index]) | |
2325 | continue; | |
2326 | ||
2327 | page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
2328 | if (!page) | |
2329 | return -ENOMEM; | |
2330 | rbio->stripe_pages[index] = page; | |
5a6ac9ea MX |
2331 | } |
2332 | } | |
2333 | return 0; | |
2334 | } | |
2335 | ||
5a6ac9ea MX |
2336 | static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio, |
2337 | int need_check) | |
2338 | { | |
76035976 | 2339 | struct btrfs_bio *bbio = rbio->bbio; |
2c8cdd6e | 2340 | void *pointers[rbio->real_stripes]; |
76035976 | 2341 | DECLARE_BITMAP(pbitmap, rbio->stripe_npages); |
5a6ac9ea MX |
2342 | int nr_data = rbio->nr_data; |
2343 | int stripe; | |
2344 | int pagenr; | |
2345 | int p_stripe = -1; | |
2346 | int q_stripe = -1; | |
2347 | struct page *p_page = NULL; | |
2348 | struct page *q_page = NULL; | |
2349 | struct bio_list bio_list; | |
2350 | struct bio *bio; | |
76035976 | 2351 | int is_replace = 0; |
5a6ac9ea MX |
2352 | int ret; |
2353 | ||
2354 | bio_list_init(&bio_list); | |
2355 | ||
2c8cdd6e MX |
2356 | if (rbio->real_stripes - rbio->nr_data == 1) { |
2357 | p_stripe = rbio->real_stripes - 1; | |
2358 | } else if (rbio->real_stripes - rbio->nr_data == 2) { | |
2359 | p_stripe = rbio->real_stripes - 2; | |
2360 | q_stripe = rbio->real_stripes - 1; | |
5a6ac9ea MX |
2361 | } else { |
2362 | BUG(); | |
2363 | } | |
2364 | ||
76035976 MX |
2365 | if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) { |
2366 | is_replace = 1; | |
2367 | bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages); | |
2368 | } | |
2369 | ||
5a6ac9ea MX |
2370 | /* |
2371 | * Because the higher layers(scrubber) are unlikely to | |
2372 | * use this area of the disk again soon, so don't cache | |
2373 | * it. | |
2374 | */ | |
2375 | clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); | |
2376 | ||
2377 | if (!need_check) | |
2378 | goto writeback; | |
2379 | ||
2380 | p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
2381 | if (!p_page) | |
2382 | goto cleanup; | |
2383 | SetPageUptodate(p_page); | |
2384 | ||
2385 | if (q_stripe != -1) { | |
2386 | q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM); | |
2387 | if (!q_page) { | |
2388 | __free_page(p_page); | |
2389 | goto cleanup; | |
2390 | } | |
2391 | SetPageUptodate(q_page); | |
2392 | } | |
2393 | ||
2394 | atomic_set(&rbio->error, 0); | |
2395 | ||
2396 | for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { | |
2397 | struct page *p; | |
2398 | void *parity; | |
2399 | /* first collect one page from each data stripe */ | |
2400 | for (stripe = 0; stripe < nr_data; stripe++) { | |
2401 | p = page_in_rbio(rbio, stripe, pagenr, 0); | |
2402 | pointers[stripe] = kmap(p); | |
2403 | } | |
2404 | ||
2405 | /* then add the parity stripe */ | |
2406 | pointers[stripe++] = kmap(p_page); | |
2407 | ||
2408 | if (q_stripe != -1) { | |
2409 | ||
2410 | /* | |
2411 | * raid6, add the qstripe and call the | |
2412 | * library function to fill in our p/q | |
2413 | */ | |
2414 | pointers[stripe++] = kmap(q_page); | |
2415 | ||
2c8cdd6e | 2416 | raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE, |
5a6ac9ea MX |
2417 | pointers); |
2418 | } else { | |
2419 | /* raid5 */ | |
2420 | memcpy(pointers[nr_data], pointers[0], PAGE_SIZE); | |
09cbfeaf | 2421 | run_xor(pointers + 1, nr_data - 1, PAGE_SIZE); |
5a6ac9ea MX |
2422 | } |
2423 | ||
01327610 | 2424 | /* Check scrubbing parity and repair it */ |
5a6ac9ea MX |
2425 | p = rbio_stripe_page(rbio, rbio->scrubp, pagenr); |
2426 | parity = kmap(p); | |
09cbfeaf KS |
2427 | if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE)) |
2428 | memcpy(parity, pointers[rbio->scrubp], PAGE_SIZE); | |
5a6ac9ea MX |
2429 | else |
2430 | /* Parity is right, needn't writeback */ | |
2431 | bitmap_clear(rbio->dbitmap, pagenr, 1); | |
2432 | kunmap(p); | |
2433 | ||
e4b55128 | 2434 | for (stripe = 0; stripe < nr_data; stripe++) |
5a6ac9ea | 2435 | kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); |
e4b55128 | 2436 | kunmap(p_page); |
5a6ac9ea MX |
2437 | } |
2438 | ||
2439 | __free_page(p_page); | |
2440 | if (q_page) | |
2441 | __free_page(q_page); | |
2442 | ||
2443 | writeback: | |
2444 | /* | |
2445 | * time to start writing. Make bios for everything from the | |
2446 | * higher layers (the bio_list in our rbio) and our p/q. Ignore | |
2447 | * everything else. | |
2448 | */ | |
2449 | for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { | |
2450 | struct page *page; | |
2451 | ||
2452 | page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); | |
2453 | ret = rbio_add_io_page(rbio, &bio_list, | |
2454 | page, rbio->scrubp, pagenr, rbio->stripe_len); | |
2455 | if (ret) | |
2456 | goto cleanup; | |
2457 | } | |
2458 | ||
76035976 MX |
2459 | if (!is_replace) |
2460 | goto submit_write; | |
2461 | ||
2462 | for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) { | |
2463 | struct page *page; | |
2464 | ||
2465 | page = rbio_stripe_page(rbio, rbio->scrubp, pagenr); | |
2466 | ret = rbio_add_io_page(rbio, &bio_list, page, | |
2467 | bbio->tgtdev_map[rbio->scrubp], | |
2468 | pagenr, rbio->stripe_len); | |
2469 | if (ret) | |
2470 | goto cleanup; | |
2471 | } | |
2472 | ||
2473 | submit_write: | |
5a6ac9ea MX |
2474 | nr_data = bio_list_size(&bio_list); |
2475 | if (!nr_data) { | |
2476 | /* Every parity is right */ | |
58efbc9f | 2477 | rbio_orig_end_io(rbio, BLK_STS_OK); |
5a6ac9ea MX |
2478 | return; |
2479 | } | |
2480 | ||
2481 | atomic_set(&rbio->stripes_pending, nr_data); | |
2482 | ||
2483 | while (1) { | |
2484 | bio = bio_list_pop(&bio_list); | |
2485 | if (!bio) | |
2486 | break; | |
2487 | ||
2488 | bio->bi_private = rbio; | |
a6111d11 | 2489 | bio->bi_end_io = raid_write_end_io; |
37226b21 | 2490 | bio_set_op_attrs(bio, REQ_OP_WRITE, 0); |
4e49ea4a MC |
2491 | |
2492 | submit_bio(bio); | |
5a6ac9ea MX |
2493 | } |
2494 | return; | |
2495 | ||
2496 | cleanup: | |
58efbc9f | 2497 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
785884fc LB |
2498 | |
2499 | while ((bio = bio_list_pop(&bio_list))) | |
2500 | bio_put(bio); | |
5a6ac9ea MX |
2501 | } |
2502 | ||
2503 | static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe) | |
2504 | { | |
2505 | if (stripe >= 0 && stripe < rbio->nr_data) | |
2506 | return 1; | |
2507 | return 0; | |
2508 | } | |
2509 | ||
2510 | /* | |
2511 | * While we're doing the parity check and repair, we could have errors | |
2512 | * in reading pages off the disk. This checks for errors and if we're | |
2513 | * not able to read the page it'll trigger parity reconstruction. The | |
2514 | * parity scrub will be finished after we've reconstructed the failed | |
2515 | * stripes | |
2516 | */ | |
2517 | static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio) | |
2518 | { | |
2519 | if (atomic_read(&rbio->error) > rbio->bbio->max_errors) | |
2520 | goto cleanup; | |
2521 | ||
2522 | if (rbio->faila >= 0 || rbio->failb >= 0) { | |
2523 | int dfail = 0, failp = -1; | |
2524 | ||
2525 | if (is_data_stripe(rbio, rbio->faila)) | |
2526 | dfail++; | |
2527 | else if (is_parity_stripe(rbio->faila)) | |
2528 | failp = rbio->faila; | |
2529 | ||
2530 | if (is_data_stripe(rbio, rbio->failb)) | |
2531 | dfail++; | |
2532 | else if (is_parity_stripe(rbio->failb)) | |
2533 | failp = rbio->failb; | |
2534 | ||
2535 | /* | |
2536 | * Because we can not use a scrubbing parity to repair | |
2537 | * the data, so the capability of the repair is declined. | |
2538 | * (In the case of RAID5, we can not repair anything) | |
2539 | */ | |
2540 | if (dfail > rbio->bbio->max_errors - 1) | |
2541 | goto cleanup; | |
2542 | ||
2543 | /* | |
2544 | * If all data is good, only parity is correctly, just | |
2545 | * repair the parity. | |
2546 | */ | |
2547 | if (dfail == 0) { | |
2548 | finish_parity_scrub(rbio, 0); | |
2549 | return; | |
2550 | } | |
2551 | ||
2552 | /* | |
2553 | * Here means we got one corrupted data stripe and one | |
2554 | * corrupted parity on RAID6, if the corrupted parity | |
01327610 | 2555 | * is scrubbing parity, luckily, use the other one to repair |
5a6ac9ea MX |
2556 | * the data, or we can not repair the data stripe. |
2557 | */ | |
2558 | if (failp != rbio->scrubp) | |
2559 | goto cleanup; | |
2560 | ||
2561 | __raid_recover_end_io(rbio); | |
2562 | } else { | |
2563 | finish_parity_scrub(rbio, 1); | |
2564 | } | |
2565 | return; | |
2566 | ||
2567 | cleanup: | |
58efbc9f | 2568 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
5a6ac9ea MX |
2569 | } |
2570 | ||
2571 | /* | |
2572 | * end io for the read phase of the rmw cycle. All the bios here are physical | |
2573 | * stripe bios we've read from the disk so we can recalculate the parity of the | |
2574 | * stripe. | |
2575 | * | |
2576 | * This will usually kick off finish_rmw once all the bios are read in, but it | |
2577 | * may trigger parity reconstruction if we had any errors along the way | |
2578 | */ | |
4246a0b6 | 2579 | static void raid56_parity_scrub_end_io(struct bio *bio) |
5a6ac9ea MX |
2580 | { |
2581 | struct btrfs_raid_bio *rbio = bio->bi_private; | |
2582 | ||
4e4cbee9 | 2583 | if (bio->bi_status) |
5a6ac9ea MX |
2584 | fail_bio_stripe(rbio, bio); |
2585 | else | |
2586 | set_bio_pages_uptodate(bio); | |
2587 | ||
2588 | bio_put(bio); | |
2589 | ||
2590 | if (!atomic_dec_and_test(&rbio->stripes_pending)) | |
2591 | return; | |
2592 | ||
2593 | /* | |
2594 | * this will normally call finish_rmw to start our write | |
2595 | * but if there are any failed stripes we'll reconstruct | |
2596 | * from parity first | |
2597 | */ | |
2598 | validate_rbio_for_parity_scrub(rbio); | |
2599 | } | |
2600 | ||
2601 | static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio) | |
2602 | { | |
2603 | int bios_to_read = 0; | |
5a6ac9ea MX |
2604 | struct bio_list bio_list; |
2605 | int ret; | |
2606 | int pagenr; | |
2607 | int stripe; | |
2608 | struct bio *bio; | |
2609 | ||
785884fc LB |
2610 | bio_list_init(&bio_list); |
2611 | ||
5a6ac9ea MX |
2612 | ret = alloc_rbio_essential_pages(rbio); |
2613 | if (ret) | |
2614 | goto cleanup; | |
2615 | ||
5a6ac9ea MX |
2616 | atomic_set(&rbio->error, 0); |
2617 | /* | |
2618 | * build a list of bios to read all the missing parts of this | |
2619 | * stripe | |
2620 | */ | |
2c8cdd6e | 2621 | for (stripe = 0; stripe < rbio->real_stripes; stripe++) { |
5a6ac9ea MX |
2622 | for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) { |
2623 | struct page *page; | |
2624 | /* | |
2625 | * we want to find all the pages missing from | |
2626 | * the rbio and read them from the disk. If | |
2627 | * page_in_rbio finds a page in the bio list | |
2628 | * we don't need to read it off the stripe. | |
2629 | */ | |
2630 | page = page_in_rbio(rbio, stripe, pagenr, 1); | |
2631 | if (page) | |
2632 | continue; | |
2633 | ||
2634 | page = rbio_stripe_page(rbio, stripe, pagenr); | |
2635 | /* | |
2636 | * the bio cache may have handed us an uptodate | |
2637 | * page. If so, be happy and use it | |
2638 | */ | |
2639 | if (PageUptodate(page)) | |
2640 | continue; | |
2641 | ||
2642 | ret = rbio_add_io_page(rbio, &bio_list, page, | |
2643 | stripe, pagenr, rbio->stripe_len); | |
2644 | if (ret) | |
2645 | goto cleanup; | |
2646 | } | |
2647 | } | |
2648 | ||
2649 | bios_to_read = bio_list_size(&bio_list); | |
2650 | if (!bios_to_read) { | |
2651 | /* | |
2652 | * this can happen if others have merged with | |
2653 | * us, it means there is nothing left to read. | |
2654 | * But if there are missing devices it may not be | |
2655 | * safe to do the full stripe write yet. | |
2656 | */ | |
2657 | goto finish; | |
2658 | } | |
2659 | ||
2660 | /* | |
2661 | * the bbio may be freed once we submit the last bio. Make sure | |
2662 | * not to touch it after that | |
2663 | */ | |
2664 | atomic_set(&rbio->stripes_pending, bios_to_read); | |
2665 | while (1) { | |
2666 | bio = bio_list_pop(&bio_list); | |
2667 | if (!bio) | |
2668 | break; | |
2669 | ||
2670 | bio->bi_private = rbio; | |
2671 | bio->bi_end_io = raid56_parity_scrub_end_io; | |
37226b21 | 2672 | bio_set_op_attrs(bio, REQ_OP_READ, 0); |
5a6ac9ea | 2673 | |
0b246afa | 2674 | btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56); |
5a6ac9ea | 2675 | |
4e49ea4a | 2676 | submit_bio(bio); |
5a6ac9ea MX |
2677 | } |
2678 | /* the actual write will happen once the reads are done */ | |
2679 | return; | |
2680 | ||
2681 | cleanup: | |
58efbc9f | 2682 | rbio_orig_end_io(rbio, BLK_STS_IOERR); |
785884fc LB |
2683 | |
2684 | while ((bio = bio_list_pop(&bio_list))) | |
2685 | bio_put(bio); | |
2686 | ||
5a6ac9ea MX |
2687 | return; |
2688 | ||
2689 | finish: | |
2690 | validate_rbio_for_parity_scrub(rbio); | |
2691 | } | |
2692 | ||
2693 | static void scrub_parity_work(struct btrfs_work *work) | |
2694 | { | |
2695 | struct btrfs_raid_bio *rbio; | |
2696 | ||
2697 | rbio = container_of(work, struct btrfs_raid_bio, work); | |
2698 | raid56_parity_scrub_stripe(rbio); | |
2699 | } | |
2700 | ||
2701 | static void async_scrub_parity(struct btrfs_raid_bio *rbio) | |
2702 | { | |
2703 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, | |
2704 | scrub_parity_work, NULL, NULL); | |
2705 | ||
0b246afa | 2706 | btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); |
5a6ac9ea MX |
2707 | } |
2708 | ||
2709 | void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio) | |
2710 | { | |
2711 | if (!lock_stripe_add(rbio)) | |
2712 | async_scrub_parity(rbio); | |
2713 | } | |
b4ee1782 OS |
2714 | |
2715 | /* The following code is used for dev replace of a missing RAID 5/6 device. */ | |
2716 | ||
2717 | struct btrfs_raid_bio * | |
2ff7e61e | 2718 | raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio, |
b4ee1782 OS |
2719 | struct btrfs_bio *bbio, u64 length) |
2720 | { | |
2721 | struct btrfs_raid_bio *rbio; | |
2722 | ||
2ff7e61e | 2723 | rbio = alloc_rbio(fs_info, bbio, length); |
b4ee1782 OS |
2724 | if (IS_ERR(rbio)) |
2725 | return NULL; | |
2726 | ||
2727 | rbio->operation = BTRFS_RBIO_REBUILD_MISSING; | |
2728 | bio_list_add(&rbio->bio_list, bio); | |
2729 | /* | |
2730 | * This is a special bio which is used to hold the completion handler | |
2731 | * and make the scrub rbio is similar to the other types | |
2732 | */ | |
2733 | ASSERT(!bio->bi_iter.bi_size); | |
2734 | ||
2735 | rbio->faila = find_logical_bio_stripe(rbio, bio); | |
2736 | if (rbio->faila == -1) { | |
2737 | BUG(); | |
2738 | kfree(rbio); | |
2739 | return NULL; | |
2740 | } | |
2741 | ||
ae6529c3 QW |
2742 | /* |
2743 | * When we get bbio, we have already increased bio_counter, record it | |
2744 | * so we can free it at rbio_orig_end_io() | |
2745 | */ | |
2746 | rbio->generic_bio_cnt = 1; | |
2747 | ||
b4ee1782 OS |
2748 | return rbio; |
2749 | } | |
2750 | ||
2751 | static void missing_raid56_work(struct btrfs_work *work) | |
2752 | { | |
2753 | struct btrfs_raid_bio *rbio; | |
2754 | ||
2755 | rbio = container_of(work, struct btrfs_raid_bio, work); | |
2756 | __raid56_parity_recover(rbio); | |
2757 | } | |
2758 | ||
2759 | static void async_missing_raid56(struct btrfs_raid_bio *rbio) | |
2760 | { | |
2761 | btrfs_init_work(&rbio->work, btrfs_rmw_helper, | |
2762 | missing_raid56_work, NULL, NULL); | |
2763 | ||
2764 | btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work); | |
2765 | } | |
2766 | ||
2767 | void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio) | |
2768 | { | |
2769 | if (!lock_stripe_add(rbio)) | |
2770 | async_missing_raid56(rbio); | |
2771 | } |