]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/btrfs/reada.c
UBUNTU: Ubuntu-5.11.0-22.23
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / reada.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2011 STRATO. All rights reserved.
4 */
5
6 #include <linux/sched.h>
7 #include <linux/pagemap.h>
8 #include <linux/writeback.h>
9 #include <linux/blkdev.h>
10 #include <linux/slab.h>
11 #include <linux/workqueue.h>
12 #include "ctree.h"
13 #include "volumes.h"
14 #include "disk-io.h"
15 #include "transaction.h"
16 #include "dev-replace.h"
17 #include "block-group.h"
18
19 #undef DEBUG
20
21 /*
22 * This is the implementation for the generic read ahead framework.
23 *
24 * To trigger a readahead, btrfs_reada_add must be called. It will start
25 * a read ahead for the given range [start, end) on tree root. The returned
26 * handle can either be used to wait on the readahead to finish
27 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
28 *
29 * The read ahead works as follows:
30 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
31 * reada_start_machine will then search for extents to prefetch and trigger
32 * some reads. When a read finishes for a node, all contained node/leaf
33 * pointers that lie in the given range will also be enqueued. The reads will
34 * be triggered in sequential order, thus giving a big win over a naive
35 * enumeration. It will also make use of multi-device layouts. Each disk
36 * will have its on read pointer and all disks will by utilized in parallel.
37 * Also will no two disks read both sides of a mirror simultaneously, as this
38 * would waste seeking capacity. Instead both disks will read different parts
39 * of the filesystem.
40 * Any number of readaheads can be started in parallel. The read order will be
41 * determined globally, i.e. 2 parallel readaheads will normally finish faster
42 * than the 2 started one after another.
43 */
44
45 #define MAX_IN_FLIGHT 6
46
47 struct reada_extctl {
48 struct list_head list;
49 struct reada_control *rc;
50 u64 generation;
51 };
52
53 struct reada_extent {
54 u64 logical;
55 u64 owner_root;
56 struct btrfs_key top;
57 struct list_head extctl;
58 int refcnt;
59 spinlock_t lock;
60 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
61 int nzones;
62 int scheduled;
63 int level;
64 };
65
66 struct reada_zone {
67 u64 start;
68 u64 end;
69 u64 elems;
70 struct list_head list;
71 spinlock_t lock;
72 int locked;
73 struct btrfs_device *device;
74 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
75 * self */
76 int ndevs;
77 struct kref refcnt;
78 };
79
80 struct reada_machine_work {
81 struct btrfs_work work;
82 struct btrfs_fs_info *fs_info;
83 };
84
85 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
86 static void reada_control_release(struct kref *kref);
87 static void reada_zone_release(struct kref *kref);
88 static void reada_start_machine(struct btrfs_fs_info *fs_info);
89 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
90
91 static int reada_add_block(struct reada_control *rc, u64 logical,
92 struct btrfs_key *top, u64 owner_root,
93 u64 generation, int level);
94
95 /* recurses */
96 /* in case of err, eb might be NULL */
97 static void __readahead_hook(struct btrfs_fs_info *fs_info,
98 struct reada_extent *re, struct extent_buffer *eb,
99 int err)
100 {
101 int nritems;
102 int i;
103 u64 bytenr;
104 u64 generation;
105 struct list_head list;
106
107 spin_lock(&re->lock);
108 /*
109 * just take the full list from the extent. afterwards we
110 * don't need the lock anymore
111 */
112 list_replace_init(&re->extctl, &list);
113 re->scheduled = 0;
114 spin_unlock(&re->lock);
115
116 /*
117 * this is the error case, the extent buffer has not been
118 * read correctly. We won't access anything from it and
119 * just cleanup our data structures. Effectively this will
120 * cut the branch below this node from read ahead.
121 */
122 if (err)
123 goto cleanup;
124
125 /*
126 * FIXME: currently we just set nritems to 0 if this is a leaf,
127 * effectively ignoring the content. In a next step we could
128 * trigger more readahead depending from the content, e.g.
129 * fetch the checksums for the extents in the leaf.
130 */
131 if (!btrfs_header_level(eb))
132 goto cleanup;
133
134 nritems = btrfs_header_nritems(eb);
135 generation = btrfs_header_generation(eb);
136 for (i = 0; i < nritems; i++) {
137 struct reada_extctl *rec;
138 u64 n_gen;
139 struct btrfs_key key;
140 struct btrfs_key next_key;
141
142 btrfs_node_key_to_cpu(eb, &key, i);
143 if (i + 1 < nritems)
144 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
145 else
146 next_key = re->top;
147 bytenr = btrfs_node_blockptr(eb, i);
148 n_gen = btrfs_node_ptr_generation(eb, i);
149
150 list_for_each_entry(rec, &list, list) {
151 struct reada_control *rc = rec->rc;
152
153 /*
154 * if the generation doesn't match, just ignore this
155 * extctl. This will probably cut off a branch from
156 * prefetch. Alternatively one could start a new (sub-)
157 * prefetch for this branch, starting again from root.
158 * FIXME: move the generation check out of this loop
159 */
160 #ifdef DEBUG
161 if (rec->generation != generation) {
162 btrfs_debug(fs_info,
163 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
164 key.objectid, key.type, key.offset,
165 rec->generation, generation);
166 }
167 #endif
168 if (rec->generation == generation &&
169 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
170 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
171 reada_add_block(rc, bytenr, &next_key,
172 btrfs_header_owner(eb), n_gen,
173 btrfs_header_level(eb) - 1);
174 }
175 }
176
177 cleanup:
178 /*
179 * free extctl records
180 */
181 while (!list_empty(&list)) {
182 struct reada_control *rc;
183 struct reada_extctl *rec;
184
185 rec = list_first_entry(&list, struct reada_extctl, list);
186 list_del(&rec->list);
187 rc = rec->rc;
188 kfree(rec);
189
190 kref_get(&rc->refcnt);
191 if (atomic_dec_and_test(&rc->elems)) {
192 kref_put(&rc->refcnt, reada_control_release);
193 wake_up(&rc->wait);
194 }
195 kref_put(&rc->refcnt, reada_control_release);
196
197 reada_extent_put(fs_info, re); /* one ref for each entry */
198 }
199
200 return;
201 }
202
203 int btree_readahead_hook(struct extent_buffer *eb, int err)
204 {
205 struct btrfs_fs_info *fs_info = eb->fs_info;
206 int ret = 0;
207 struct reada_extent *re;
208
209 /* find extent */
210 spin_lock(&fs_info->reada_lock);
211 re = radix_tree_lookup(&fs_info->reada_tree,
212 eb->start >> PAGE_SHIFT);
213 if (re)
214 re->refcnt++;
215 spin_unlock(&fs_info->reada_lock);
216 if (!re) {
217 ret = -1;
218 goto start_machine;
219 }
220
221 __readahead_hook(fs_info, re, eb, err);
222 reada_extent_put(fs_info, re); /* our ref */
223
224 start_machine:
225 reada_start_machine(fs_info);
226 return ret;
227 }
228
229 static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
230 struct btrfs_bio *bbio)
231 {
232 struct btrfs_fs_info *fs_info = dev->fs_info;
233 int ret;
234 struct reada_zone *zone;
235 struct btrfs_block_group *cache = NULL;
236 u64 start;
237 u64 end;
238 int i;
239
240 zone = NULL;
241 spin_lock(&fs_info->reada_lock);
242 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
243 logical >> PAGE_SHIFT, 1);
244 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
245 kref_get(&zone->refcnt);
246 spin_unlock(&fs_info->reada_lock);
247 return zone;
248 }
249
250 spin_unlock(&fs_info->reada_lock);
251
252 cache = btrfs_lookup_block_group(fs_info, logical);
253 if (!cache)
254 return NULL;
255
256 start = cache->start;
257 end = start + cache->length - 1;
258 btrfs_put_block_group(cache);
259
260 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
261 if (!zone)
262 return NULL;
263
264 ret = radix_tree_preload(GFP_KERNEL);
265 if (ret) {
266 kfree(zone);
267 return NULL;
268 }
269
270 zone->start = start;
271 zone->end = end;
272 INIT_LIST_HEAD(&zone->list);
273 spin_lock_init(&zone->lock);
274 zone->locked = 0;
275 kref_init(&zone->refcnt);
276 zone->elems = 0;
277 zone->device = dev; /* our device always sits at index 0 */
278 for (i = 0; i < bbio->num_stripes; ++i) {
279 /* bounds have already been checked */
280 zone->devs[i] = bbio->stripes[i].dev;
281 }
282 zone->ndevs = bbio->num_stripes;
283
284 spin_lock(&fs_info->reada_lock);
285 ret = radix_tree_insert(&dev->reada_zones,
286 (unsigned long)(zone->end >> PAGE_SHIFT),
287 zone);
288
289 if (ret == -EEXIST) {
290 kfree(zone);
291 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
292 logical >> PAGE_SHIFT, 1);
293 if (ret == 1 && logical >= zone->start && logical <= zone->end)
294 kref_get(&zone->refcnt);
295 else
296 zone = NULL;
297 }
298 spin_unlock(&fs_info->reada_lock);
299 radix_tree_preload_end();
300
301 return zone;
302 }
303
304 static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
305 u64 logical,
306 struct btrfs_key *top,
307 u64 owner_root, int level)
308 {
309 int ret;
310 struct reada_extent *re = NULL;
311 struct reada_extent *re_exist = NULL;
312 struct btrfs_bio *bbio = NULL;
313 struct btrfs_device *dev;
314 struct btrfs_device *prev_dev;
315 u64 length;
316 int real_stripes;
317 int nzones = 0;
318 unsigned long index = logical >> PAGE_SHIFT;
319 int dev_replace_is_ongoing;
320 int have_zone = 0;
321
322 spin_lock(&fs_info->reada_lock);
323 re = radix_tree_lookup(&fs_info->reada_tree, index);
324 if (re)
325 re->refcnt++;
326 spin_unlock(&fs_info->reada_lock);
327
328 if (re)
329 return re;
330
331 re = kzalloc(sizeof(*re), GFP_KERNEL);
332 if (!re)
333 return NULL;
334
335 re->logical = logical;
336 re->top = *top;
337 INIT_LIST_HEAD(&re->extctl);
338 spin_lock_init(&re->lock);
339 re->refcnt = 1;
340 re->owner_root = owner_root;
341 re->level = level;
342
343 /*
344 * map block
345 */
346 length = fs_info->nodesize;
347 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
348 &length, &bbio, 0);
349 if (ret || !bbio || length < fs_info->nodesize)
350 goto error;
351
352 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
353 btrfs_err(fs_info,
354 "readahead: more than %d copies not supported",
355 BTRFS_MAX_MIRRORS);
356 goto error;
357 }
358
359 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
360 for (nzones = 0; nzones < real_stripes; ++nzones) {
361 struct reada_zone *zone;
362
363 dev = bbio->stripes[nzones].dev;
364
365 /* cannot read ahead on missing device. */
366 if (!dev->bdev)
367 continue;
368
369 zone = reada_find_zone(dev, logical, bbio);
370 if (!zone)
371 continue;
372
373 re->zones[re->nzones++] = zone;
374 spin_lock(&zone->lock);
375 if (!zone->elems)
376 kref_get(&zone->refcnt);
377 ++zone->elems;
378 spin_unlock(&zone->lock);
379 spin_lock(&fs_info->reada_lock);
380 kref_put(&zone->refcnt, reada_zone_release);
381 spin_unlock(&fs_info->reada_lock);
382 }
383 if (re->nzones == 0) {
384 /* not a single zone found, error and out */
385 goto error;
386 }
387
388 /* Insert extent in reada tree + all per-device trees, all or nothing */
389 down_read(&fs_info->dev_replace.rwsem);
390 ret = radix_tree_preload(GFP_KERNEL);
391 if (ret) {
392 up_read(&fs_info->dev_replace.rwsem);
393 goto error;
394 }
395
396 spin_lock(&fs_info->reada_lock);
397 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
398 if (ret == -EEXIST) {
399 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
400 re_exist->refcnt++;
401 spin_unlock(&fs_info->reada_lock);
402 radix_tree_preload_end();
403 up_read(&fs_info->dev_replace.rwsem);
404 goto error;
405 }
406 if (ret) {
407 spin_unlock(&fs_info->reada_lock);
408 radix_tree_preload_end();
409 up_read(&fs_info->dev_replace.rwsem);
410 goto error;
411 }
412 radix_tree_preload_end();
413 prev_dev = NULL;
414 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
415 &fs_info->dev_replace);
416 for (nzones = 0; nzones < re->nzones; ++nzones) {
417 dev = re->zones[nzones]->device;
418
419 if (dev == prev_dev) {
420 /*
421 * in case of DUP, just add the first zone. As both
422 * are on the same device, there's nothing to gain
423 * from adding both.
424 * Also, it wouldn't work, as the tree is per device
425 * and adding would fail with EEXIST
426 */
427 continue;
428 }
429 if (!dev->bdev)
430 continue;
431
432 if (test_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state))
433 continue;
434
435 if (dev_replace_is_ongoing &&
436 dev == fs_info->dev_replace.tgtdev) {
437 /*
438 * as this device is selected for reading only as
439 * a last resort, skip it for read ahead.
440 */
441 continue;
442 }
443 prev_dev = dev;
444 ret = radix_tree_insert(&dev->reada_extents, index, re);
445 if (ret) {
446 while (--nzones >= 0) {
447 dev = re->zones[nzones]->device;
448 BUG_ON(dev == NULL);
449 /* ignore whether the entry was inserted */
450 radix_tree_delete(&dev->reada_extents, index);
451 }
452 radix_tree_delete(&fs_info->reada_tree, index);
453 spin_unlock(&fs_info->reada_lock);
454 up_read(&fs_info->dev_replace.rwsem);
455 goto error;
456 }
457 have_zone = 1;
458 }
459 if (!have_zone)
460 radix_tree_delete(&fs_info->reada_tree, index);
461 spin_unlock(&fs_info->reada_lock);
462 up_read(&fs_info->dev_replace.rwsem);
463
464 if (!have_zone)
465 goto error;
466
467 btrfs_put_bbio(bbio);
468 return re;
469
470 error:
471 for (nzones = 0; nzones < re->nzones; ++nzones) {
472 struct reada_zone *zone;
473
474 zone = re->zones[nzones];
475 kref_get(&zone->refcnt);
476 spin_lock(&zone->lock);
477 --zone->elems;
478 if (zone->elems == 0) {
479 /*
480 * no fs_info->reada_lock needed, as this can't be
481 * the last ref
482 */
483 kref_put(&zone->refcnt, reada_zone_release);
484 }
485 spin_unlock(&zone->lock);
486
487 spin_lock(&fs_info->reada_lock);
488 kref_put(&zone->refcnt, reada_zone_release);
489 spin_unlock(&fs_info->reada_lock);
490 }
491 btrfs_put_bbio(bbio);
492 kfree(re);
493 return re_exist;
494 }
495
496 static void reada_extent_put(struct btrfs_fs_info *fs_info,
497 struct reada_extent *re)
498 {
499 int i;
500 unsigned long index = re->logical >> PAGE_SHIFT;
501
502 spin_lock(&fs_info->reada_lock);
503 if (--re->refcnt) {
504 spin_unlock(&fs_info->reada_lock);
505 return;
506 }
507
508 radix_tree_delete(&fs_info->reada_tree, index);
509 for (i = 0; i < re->nzones; ++i) {
510 struct reada_zone *zone = re->zones[i];
511
512 radix_tree_delete(&zone->device->reada_extents, index);
513 }
514
515 spin_unlock(&fs_info->reada_lock);
516
517 for (i = 0; i < re->nzones; ++i) {
518 struct reada_zone *zone = re->zones[i];
519
520 kref_get(&zone->refcnt);
521 spin_lock(&zone->lock);
522 --zone->elems;
523 if (zone->elems == 0) {
524 /* no fs_info->reada_lock needed, as this can't be
525 * the last ref */
526 kref_put(&zone->refcnt, reada_zone_release);
527 }
528 spin_unlock(&zone->lock);
529
530 spin_lock(&fs_info->reada_lock);
531 kref_put(&zone->refcnt, reada_zone_release);
532 spin_unlock(&fs_info->reada_lock);
533 }
534
535 kfree(re);
536 }
537
538 static void reada_zone_release(struct kref *kref)
539 {
540 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
541
542 lockdep_assert_held(&zone->device->fs_info->reada_lock);
543
544 radix_tree_delete(&zone->device->reada_zones,
545 zone->end >> PAGE_SHIFT);
546
547 kfree(zone);
548 }
549
550 static void reada_control_release(struct kref *kref)
551 {
552 struct reada_control *rc = container_of(kref, struct reada_control,
553 refcnt);
554
555 kfree(rc);
556 }
557
558 static int reada_add_block(struct reada_control *rc, u64 logical,
559 struct btrfs_key *top, u64 owner_root,
560 u64 generation, int level)
561 {
562 struct btrfs_fs_info *fs_info = rc->fs_info;
563 struct reada_extent *re;
564 struct reada_extctl *rec;
565
566 /* takes one ref */
567 re = reada_find_extent(fs_info, logical, top, owner_root, level);
568 if (!re)
569 return -1;
570
571 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
572 if (!rec) {
573 reada_extent_put(fs_info, re);
574 return -ENOMEM;
575 }
576
577 rec->rc = rc;
578 rec->generation = generation;
579 atomic_inc(&rc->elems);
580
581 spin_lock(&re->lock);
582 list_add_tail(&rec->list, &re->extctl);
583 spin_unlock(&re->lock);
584
585 /* leave the ref on the extent */
586
587 return 0;
588 }
589
590 /*
591 * called with fs_info->reada_lock held
592 */
593 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
594 {
595 int i;
596 unsigned long index = zone->end >> PAGE_SHIFT;
597
598 for (i = 0; i < zone->ndevs; ++i) {
599 struct reada_zone *peer;
600 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
601 if (peer && peer->device != zone->device)
602 peer->locked = lock;
603 }
604 }
605
606 /*
607 * called with fs_info->reada_lock held
608 */
609 static int reada_pick_zone(struct btrfs_device *dev)
610 {
611 struct reada_zone *top_zone = NULL;
612 struct reada_zone *top_locked_zone = NULL;
613 u64 top_elems = 0;
614 u64 top_locked_elems = 0;
615 unsigned long index = 0;
616 int ret;
617
618 if (dev->reada_curr_zone) {
619 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
620 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
621 dev->reada_curr_zone = NULL;
622 }
623 /* pick the zone with the most elements */
624 while (1) {
625 struct reada_zone *zone;
626
627 ret = radix_tree_gang_lookup(&dev->reada_zones,
628 (void **)&zone, index, 1);
629 if (ret == 0)
630 break;
631 index = (zone->end >> PAGE_SHIFT) + 1;
632 if (zone->locked) {
633 if (zone->elems > top_locked_elems) {
634 top_locked_elems = zone->elems;
635 top_locked_zone = zone;
636 }
637 } else {
638 if (zone->elems > top_elems) {
639 top_elems = zone->elems;
640 top_zone = zone;
641 }
642 }
643 }
644 if (top_zone)
645 dev->reada_curr_zone = top_zone;
646 else if (top_locked_zone)
647 dev->reada_curr_zone = top_locked_zone;
648 else
649 return 0;
650
651 dev->reada_next = dev->reada_curr_zone->start;
652 kref_get(&dev->reada_curr_zone->refcnt);
653 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
654
655 return 1;
656 }
657
658 static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
659 u64 owner_root, int level, int mirror_num,
660 struct extent_buffer **eb)
661 {
662 struct extent_buffer *buf = NULL;
663 int ret;
664
665 buf = btrfs_find_create_tree_block(fs_info, bytenr, owner_root, level);
666 if (IS_ERR(buf))
667 return 0;
668
669 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
670
671 ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
672 if (ret) {
673 free_extent_buffer_stale(buf);
674 return ret;
675 }
676
677 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
678 free_extent_buffer_stale(buf);
679 return -EIO;
680 } else if (extent_buffer_uptodate(buf)) {
681 *eb = buf;
682 } else {
683 free_extent_buffer(buf);
684 }
685 return 0;
686 }
687
688 static int reada_start_machine_dev(struct btrfs_device *dev)
689 {
690 struct btrfs_fs_info *fs_info = dev->fs_info;
691 struct reada_extent *re = NULL;
692 int mirror_num = 0;
693 struct extent_buffer *eb = NULL;
694 u64 logical;
695 int ret;
696 int i;
697
698 spin_lock(&fs_info->reada_lock);
699 if (dev->reada_curr_zone == NULL) {
700 ret = reada_pick_zone(dev);
701 if (!ret) {
702 spin_unlock(&fs_info->reada_lock);
703 return 0;
704 }
705 }
706 /*
707 * FIXME currently we issue the reads one extent at a time. If we have
708 * a contiguous block of extents, we could also coagulate them or use
709 * plugging to speed things up
710 */
711 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
712 dev->reada_next >> PAGE_SHIFT, 1);
713 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
714 ret = reada_pick_zone(dev);
715 if (!ret) {
716 spin_unlock(&fs_info->reada_lock);
717 return 0;
718 }
719 re = NULL;
720 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
721 dev->reada_next >> PAGE_SHIFT, 1);
722 }
723 if (ret == 0) {
724 spin_unlock(&fs_info->reada_lock);
725 return 0;
726 }
727 dev->reada_next = re->logical + fs_info->nodesize;
728 re->refcnt++;
729
730 spin_unlock(&fs_info->reada_lock);
731
732 spin_lock(&re->lock);
733 if (re->scheduled || list_empty(&re->extctl)) {
734 spin_unlock(&re->lock);
735 reada_extent_put(fs_info, re);
736 return 0;
737 }
738 re->scheduled = 1;
739 spin_unlock(&re->lock);
740
741 /*
742 * find mirror num
743 */
744 for (i = 0; i < re->nzones; ++i) {
745 if (re->zones[i]->device == dev) {
746 mirror_num = i + 1;
747 break;
748 }
749 }
750 logical = re->logical;
751
752 atomic_inc(&dev->reada_in_flight);
753 ret = reada_tree_block_flagged(fs_info, logical, re->owner_root,
754 re->level, mirror_num, &eb);
755 if (ret)
756 __readahead_hook(fs_info, re, NULL, ret);
757 else if (eb)
758 __readahead_hook(fs_info, re, eb, ret);
759
760 if (eb)
761 free_extent_buffer(eb);
762
763 atomic_dec(&dev->reada_in_flight);
764 reada_extent_put(fs_info, re);
765
766 return 1;
767
768 }
769
770 static void reada_start_machine_worker(struct btrfs_work *work)
771 {
772 struct reada_machine_work *rmw;
773 int old_ioprio;
774
775 rmw = container_of(work, struct reada_machine_work, work);
776
777 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
778 task_nice_ioprio(current));
779 set_task_ioprio(current, BTRFS_IOPRIO_READA);
780 __reada_start_machine(rmw->fs_info);
781 set_task_ioprio(current, old_ioprio);
782
783 atomic_dec(&rmw->fs_info->reada_works_cnt);
784
785 kfree(rmw);
786 }
787
788 /* Try to start up to 10k READA requests for a group of devices */
789 static int reada_start_for_fsdevs(struct btrfs_fs_devices *fs_devices)
790 {
791 u64 enqueued;
792 u64 total = 0;
793 struct btrfs_device *device;
794
795 do {
796 enqueued = 0;
797 list_for_each_entry(device, &fs_devices->devices, dev_list) {
798 if (atomic_read(&device->reada_in_flight) <
799 MAX_IN_FLIGHT)
800 enqueued += reada_start_machine_dev(device);
801 }
802 total += enqueued;
803 } while (enqueued && total < 10000);
804
805 return total;
806 }
807
808 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
809 {
810 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
811 int i;
812 u64 enqueued = 0;
813
814 mutex_lock(&fs_devices->device_list_mutex);
815
816 enqueued += reada_start_for_fsdevs(fs_devices);
817 list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list)
818 enqueued += reada_start_for_fsdevs(seed_devs);
819
820 mutex_unlock(&fs_devices->device_list_mutex);
821 if (enqueued == 0)
822 return;
823
824 /*
825 * If everything is already in the cache, this is effectively single
826 * threaded. To a) not hold the caller for too long and b) to utilize
827 * more cores, we broke the loop above after 10000 iterations and now
828 * enqueue to workers to finish it. This will distribute the load to
829 * the cores.
830 */
831 for (i = 0; i < 2; ++i) {
832 reada_start_machine(fs_info);
833 if (atomic_read(&fs_info->reada_works_cnt) >
834 BTRFS_MAX_MIRRORS * 2)
835 break;
836 }
837 }
838
839 static void reada_start_machine(struct btrfs_fs_info *fs_info)
840 {
841 struct reada_machine_work *rmw;
842
843 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
844 if (!rmw) {
845 /* FIXME we cannot handle this properly right now */
846 BUG();
847 }
848 btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
849 rmw->fs_info = fs_info;
850
851 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
852 atomic_inc(&fs_info->reada_works_cnt);
853 }
854
855 #ifdef DEBUG
856 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
857 {
858 struct btrfs_device *device;
859 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
860 unsigned long index;
861 int ret;
862 int i;
863 int j;
864 int cnt;
865
866 spin_lock(&fs_info->reada_lock);
867 list_for_each_entry(device, &fs_devices->devices, dev_list) {
868 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
869 atomic_read(&device->reada_in_flight));
870 index = 0;
871 while (1) {
872 struct reada_zone *zone;
873 ret = radix_tree_gang_lookup(&device->reada_zones,
874 (void **)&zone, index, 1);
875 if (ret == 0)
876 break;
877 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
878 zone->start, zone->end, zone->elems,
879 zone->locked);
880 for (j = 0; j < zone->ndevs; ++j) {
881 pr_cont(" %lld",
882 zone->devs[j]->devid);
883 }
884 if (device->reada_curr_zone == zone)
885 pr_cont(" curr off %llu",
886 device->reada_next - zone->start);
887 pr_cont("\n");
888 index = (zone->end >> PAGE_SHIFT) + 1;
889 }
890 cnt = 0;
891 index = 0;
892 while (all) {
893 struct reada_extent *re = NULL;
894
895 ret = radix_tree_gang_lookup(&device->reada_extents,
896 (void **)&re, index, 1);
897 if (ret == 0)
898 break;
899 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
900 re->logical, fs_info->nodesize,
901 list_empty(&re->extctl), re->scheduled);
902
903 for (i = 0; i < re->nzones; ++i) {
904 pr_cont(" zone %llu-%llu devs",
905 re->zones[i]->start,
906 re->zones[i]->end);
907 for (j = 0; j < re->zones[i]->ndevs; ++j) {
908 pr_cont(" %lld",
909 re->zones[i]->devs[j]->devid);
910 }
911 }
912 pr_cont("\n");
913 index = (re->logical >> PAGE_SHIFT) + 1;
914 if (++cnt > 15)
915 break;
916 }
917 }
918
919 index = 0;
920 cnt = 0;
921 while (all) {
922 struct reada_extent *re = NULL;
923
924 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
925 index, 1);
926 if (ret == 0)
927 break;
928 if (!re->scheduled) {
929 index = (re->logical >> PAGE_SHIFT) + 1;
930 continue;
931 }
932 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
933 re->logical, fs_info->nodesize,
934 list_empty(&re->extctl), re->scheduled);
935 for (i = 0; i < re->nzones; ++i) {
936 pr_cont(" zone %llu-%llu devs",
937 re->zones[i]->start,
938 re->zones[i]->end);
939 for (j = 0; j < re->zones[i]->ndevs; ++j) {
940 pr_cont(" %lld",
941 re->zones[i]->devs[j]->devid);
942 }
943 }
944 pr_cont("\n");
945 index = (re->logical >> PAGE_SHIFT) + 1;
946 }
947 spin_unlock(&fs_info->reada_lock);
948 }
949 #endif
950
951 /*
952 * interface
953 */
954 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
955 struct btrfs_key *key_start, struct btrfs_key *key_end)
956 {
957 struct reada_control *rc;
958 u64 start;
959 u64 generation;
960 int ret;
961 int level;
962 struct extent_buffer *node;
963 static struct btrfs_key max_key = {
964 .objectid = (u64)-1,
965 .type = (u8)-1,
966 .offset = (u64)-1
967 };
968
969 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
970 if (!rc)
971 return ERR_PTR(-ENOMEM);
972
973 rc->fs_info = root->fs_info;
974 rc->key_start = *key_start;
975 rc->key_end = *key_end;
976 atomic_set(&rc->elems, 0);
977 init_waitqueue_head(&rc->wait);
978 kref_init(&rc->refcnt);
979 kref_get(&rc->refcnt); /* one ref for having elements */
980
981 node = btrfs_root_node(root);
982 start = node->start;
983 generation = btrfs_header_generation(node);
984 level = btrfs_header_level(node);
985 free_extent_buffer(node);
986
987 ret = reada_add_block(rc, start, &max_key, root->root_key.objectid,
988 generation, level);
989 if (ret) {
990 kfree(rc);
991 return ERR_PTR(ret);
992 }
993
994 reada_start_machine(root->fs_info);
995
996 return rc;
997 }
998
999 #ifdef DEBUG
1000 int btrfs_reada_wait(void *handle)
1001 {
1002 struct reada_control *rc = handle;
1003 struct btrfs_fs_info *fs_info = rc->fs_info;
1004
1005 while (atomic_read(&rc->elems)) {
1006 if (!atomic_read(&fs_info->reada_works_cnt))
1007 reada_start_machine(fs_info);
1008 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
1009 5 * HZ);
1010 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
1011 }
1012
1013 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
1014
1015 kref_put(&rc->refcnt, reada_control_release);
1016
1017 return 0;
1018 }
1019 #else
1020 int btrfs_reada_wait(void *handle)
1021 {
1022 struct reada_control *rc = handle;
1023 struct btrfs_fs_info *fs_info = rc->fs_info;
1024
1025 while (atomic_read(&rc->elems)) {
1026 if (!atomic_read(&fs_info->reada_works_cnt))
1027 reada_start_machine(fs_info);
1028 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
1029 (HZ + 9) / 10);
1030 }
1031
1032 kref_put(&rc->refcnt, reada_control_release);
1033
1034 return 0;
1035 }
1036 #endif
1037
1038 void btrfs_reada_detach(void *handle)
1039 {
1040 struct reada_control *rc = handle;
1041
1042 kref_put(&rc->refcnt, reada_control_release);
1043 }
1044
1045 /*
1046 * Before removing a device (device replace or device remove ioctls), call this
1047 * function to wait for all existing readahead requests on the device and to
1048 * make sure no one queues more readahead requests for the device.
1049 *
1050 * Must be called without holding neither the device list mutex nor the device
1051 * replace semaphore, otherwise it will deadlock.
1052 */
1053 void btrfs_reada_remove_dev(struct btrfs_device *dev)
1054 {
1055 struct btrfs_fs_info *fs_info = dev->fs_info;
1056
1057 /* Serialize with readahead extent creation at reada_find_extent(). */
1058 spin_lock(&fs_info->reada_lock);
1059 set_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
1060 spin_unlock(&fs_info->reada_lock);
1061
1062 /*
1063 * There might be readahead requests added to the radix trees which
1064 * were not yet added to the readahead work queue. We need to start
1065 * them and wait for their completion, otherwise we can end up with
1066 * use-after-free problems when dropping the last reference on the
1067 * readahead extents and their zones, as they need to access the
1068 * device structure.
1069 */
1070 reada_start_machine(fs_info);
1071 btrfs_flush_workqueue(fs_info->readahead_workers);
1072 }
1073
1074 /*
1075 * If when removing a device (device replace or device remove ioctls) an error
1076 * happens after calling btrfs_reada_remove_dev(), call this to undo what that
1077 * function did. This is safe to call even if btrfs_reada_remove_dev() was not
1078 * called before.
1079 */
1080 void btrfs_reada_undo_remove_dev(struct btrfs_device *dev)
1081 {
1082 spin_lock(&dev->fs_info->reada_lock);
1083 clear_bit(BTRFS_DEV_STATE_NO_READA, &dev->dev_state);
1084 spin_unlock(&dev->fs_info->reada_lock);
1085 }