]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - fs/btrfs/reada.c
Merge branches 'pm-domains', 'pm-docs' and 'pm-devfreq'
[mirror_ubuntu-zesty-kernel.git] / fs / btrfs / reada.c
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/pagemap.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/rbtree.h>
24 #include <linux/slab.h>
25 #include <linux/workqueue.h>
26 #include "ctree.h"
27 #include "volumes.h"
28 #include "disk-io.h"
29 #include "transaction.h"
30 #include "dev-replace.h"
31
32 #undef DEBUG
33
34 /*
35 * This is the implementation for the generic read ahead framework.
36 *
37 * To trigger a readahead, btrfs_reada_add must be called. It will start
38 * a read ahead for the given range [start, end) on tree root. The returned
39 * handle can either be used to wait on the readahead to finish
40 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
41 *
42 * The read ahead works as follows:
43 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
44 * reada_start_machine will then search for extents to prefetch and trigger
45 * some reads. When a read finishes for a node, all contained node/leaf
46 * pointers that lie in the given range will also be enqueued. The reads will
47 * be triggered in sequential order, thus giving a big win over a naive
48 * enumeration. It will also make use of multi-device layouts. Each disk
49 * will have its on read pointer and all disks will by utilized in parallel.
50 * Also will no two disks read both sides of a mirror simultaneously, as this
51 * would waste seeking capacity. Instead both disks will read different parts
52 * of the filesystem.
53 * Any number of readaheads can be started in parallel. The read order will be
54 * determined globally, i.e. 2 parallel readaheads will normally finish faster
55 * than the 2 started one after another.
56 */
57
58 #define MAX_IN_FLIGHT 6
59
60 struct reada_extctl {
61 struct list_head list;
62 struct reada_control *rc;
63 u64 generation;
64 };
65
66 struct reada_extent {
67 u64 logical;
68 struct btrfs_key top;
69 int err;
70 struct list_head extctl;
71 int refcnt;
72 spinlock_t lock;
73 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
74 int nzones;
75 int scheduled;
76 };
77
78 struct reada_zone {
79 u64 start;
80 u64 end;
81 u64 elems;
82 struct list_head list;
83 spinlock_t lock;
84 int locked;
85 struct btrfs_device *device;
86 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
87 * self */
88 int ndevs;
89 struct kref refcnt;
90 };
91
92 struct reada_machine_work {
93 struct btrfs_work work;
94 struct btrfs_fs_info *fs_info;
95 };
96
97 static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
98 static void reada_control_release(struct kref *kref);
99 static void reada_zone_release(struct kref *kref);
100 static void reada_start_machine(struct btrfs_fs_info *fs_info);
101 static void __reada_start_machine(struct btrfs_fs_info *fs_info);
102
103 static int reada_add_block(struct reada_control *rc, u64 logical,
104 struct btrfs_key *top, u64 generation);
105
106 /* recurses */
107 /* in case of err, eb might be NULL */
108 static void __readahead_hook(struct btrfs_fs_info *fs_info,
109 struct reada_extent *re, struct extent_buffer *eb,
110 int err)
111 {
112 int nritems;
113 int i;
114 u64 bytenr;
115 u64 generation;
116 struct list_head list;
117
118 spin_lock(&re->lock);
119 /*
120 * just take the full list from the extent. afterwards we
121 * don't need the lock anymore
122 */
123 list_replace_init(&re->extctl, &list);
124 re->scheduled = 0;
125 spin_unlock(&re->lock);
126
127 /*
128 * this is the error case, the extent buffer has not been
129 * read correctly. We won't access anything from it and
130 * just cleanup our data structures. Effectively this will
131 * cut the branch below this node from read ahead.
132 */
133 if (err)
134 goto cleanup;
135
136 /*
137 * FIXME: currently we just set nritems to 0 if this is a leaf,
138 * effectively ignoring the content. In a next step we could
139 * trigger more readahead depending from the content, e.g.
140 * fetch the checksums for the extents in the leaf.
141 */
142 if (!btrfs_header_level(eb))
143 goto cleanup;
144
145 nritems = btrfs_header_nritems(eb);
146 generation = btrfs_header_generation(eb);
147 for (i = 0; i < nritems; i++) {
148 struct reada_extctl *rec;
149 u64 n_gen;
150 struct btrfs_key key;
151 struct btrfs_key next_key;
152
153 btrfs_node_key_to_cpu(eb, &key, i);
154 if (i + 1 < nritems)
155 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
156 else
157 next_key = re->top;
158 bytenr = btrfs_node_blockptr(eb, i);
159 n_gen = btrfs_node_ptr_generation(eb, i);
160
161 list_for_each_entry(rec, &list, list) {
162 struct reada_control *rc = rec->rc;
163
164 /*
165 * if the generation doesn't match, just ignore this
166 * extctl. This will probably cut off a branch from
167 * prefetch. Alternatively one could start a new (sub-)
168 * prefetch for this branch, starting again from root.
169 * FIXME: move the generation check out of this loop
170 */
171 #ifdef DEBUG
172 if (rec->generation != generation) {
173 btrfs_debug(fs_info,
174 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
175 key.objectid, key.type, key.offset,
176 rec->generation, generation);
177 }
178 #endif
179 if (rec->generation == generation &&
180 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
181 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
182 reada_add_block(rc, bytenr, &next_key, n_gen);
183 }
184 }
185
186 cleanup:
187 /*
188 * free extctl records
189 */
190 while (!list_empty(&list)) {
191 struct reada_control *rc;
192 struct reada_extctl *rec;
193
194 rec = list_first_entry(&list, struct reada_extctl, list);
195 list_del(&rec->list);
196 rc = rec->rc;
197 kfree(rec);
198
199 kref_get(&rc->refcnt);
200 if (atomic_dec_and_test(&rc->elems)) {
201 kref_put(&rc->refcnt, reada_control_release);
202 wake_up(&rc->wait);
203 }
204 kref_put(&rc->refcnt, reada_control_release);
205
206 reada_extent_put(fs_info, re); /* one ref for each entry */
207 }
208
209 return;
210 }
211
212 int btree_readahead_hook(struct btrfs_fs_info *fs_info,
213 struct extent_buffer *eb, int err)
214 {
215 int ret = 0;
216 struct reada_extent *re;
217
218 /* find extent */
219 spin_lock(&fs_info->reada_lock);
220 re = radix_tree_lookup(&fs_info->reada_tree,
221 eb->start >> PAGE_SHIFT);
222 if (re)
223 re->refcnt++;
224 spin_unlock(&fs_info->reada_lock);
225 if (!re) {
226 ret = -1;
227 goto start_machine;
228 }
229
230 __readahead_hook(fs_info, re, eb, err);
231 reada_extent_put(fs_info, re); /* our ref */
232
233 start_machine:
234 reada_start_machine(fs_info);
235 return ret;
236 }
237
238 static struct reada_zone *reada_find_zone(struct btrfs_fs_info *fs_info,
239 struct btrfs_device *dev, u64 logical,
240 struct btrfs_bio *bbio)
241 {
242 int ret;
243 struct reada_zone *zone;
244 struct btrfs_block_group_cache *cache = NULL;
245 u64 start;
246 u64 end;
247 int i;
248
249 zone = NULL;
250 spin_lock(&fs_info->reada_lock);
251 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
252 logical >> PAGE_SHIFT, 1);
253 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
254 kref_get(&zone->refcnt);
255 spin_unlock(&fs_info->reada_lock);
256 return zone;
257 }
258
259 spin_unlock(&fs_info->reada_lock);
260
261 cache = btrfs_lookup_block_group(fs_info, logical);
262 if (!cache)
263 return NULL;
264
265 start = cache->key.objectid;
266 end = start + cache->key.offset - 1;
267 btrfs_put_block_group(cache);
268
269 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
270 if (!zone)
271 return NULL;
272
273 zone->start = start;
274 zone->end = end;
275 INIT_LIST_HEAD(&zone->list);
276 spin_lock_init(&zone->lock);
277 zone->locked = 0;
278 kref_init(&zone->refcnt);
279 zone->elems = 0;
280 zone->device = dev; /* our device always sits at index 0 */
281 for (i = 0; i < bbio->num_stripes; ++i) {
282 /* bounds have already been checked */
283 zone->devs[i] = bbio->stripes[i].dev;
284 }
285 zone->ndevs = bbio->num_stripes;
286
287 spin_lock(&fs_info->reada_lock);
288 ret = radix_tree_insert(&dev->reada_zones,
289 (unsigned long)(zone->end >> PAGE_SHIFT),
290 zone);
291
292 if (ret == -EEXIST) {
293 kfree(zone);
294 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
295 logical >> PAGE_SHIFT, 1);
296 if (ret == 1 && logical >= zone->start && logical <= zone->end)
297 kref_get(&zone->refcnt);
298 else
299 zone = NULL;
300 }
301 spin_unlock(&fs_info->reada_lock);
302
303 return zone;
304 }
305
306 static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
307 u64 logical,
308 struct btrfs_key *top)
309 {
310 int ret;
311 struct reada_extent *re = NULL;
312 struct reada_extent *re_exist = NULL;
313 struct btrfs_bio *bbio = NULL;
314 struct btrfs_device *dev;
315 struct btrfs_device *prev_dev;
316 u32 blocksize;
317 u64 length;
318 int real_stripes;
319 int nzones = 0;
320 unsigned long index = logical >> PAGE_SHIFT;
321 int dev_replace_is_ongoing;
322 int have_zone = 0;
323
324 spin_lock(&fs_info->reada_lock);
325 re = radix_tree_lookup(&fs_info->reada_tree, index);
326 if (re)
327 re->refcnt++;
328 spin_unlock(&fs_info->reada_lock);
329
330 if (re)
331 return re;
332
333 re = kzalloc(sizeof(*re), GFP_KERNEL);
334 if (!re)
335 return NULL;
336
337 blocksize = fs_info->nodesize;
338 re->logical = logical;
339 re->top = *top;
340 INIT_LIST_HEAD(&re->extctl);
341 spin_lock_init(&re->lock);
342 re->refcnt = 1;
343
344 /*
345 * map block
346 */
347 length = blocksize;
348 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
349 &length, &bbio, 0);
350 if (ret || !bbio || length < blocksize)
351 goto error;
352
353 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
354 btrfs_err(fs_info,
355 "readahead: more than %d copies not supported",
356 BTRFS_MAX_MIRRORS);
357 goto error;
358 }
359
360 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
361 for (nzones = 0; nzones < real_stripes; ++nzones) {
362 struct reada_zone *zone;
363
364 dev = bbio->stripes[nzones].dev;
365
366 /* cannot read ahead on missing device. */
367 if (!dev->bdev)
368 continue;
369
370 zone = reada_find_zone(fs_info, dev, logical, bbio);
371 if (!zone)
372 continue;
373
374 re->zones[re->nzones++] = zone;
375 spin_lock(&zone->lock);
376 if (!zone->elems)
377 kref_get(&zone->refcnt);
378 ++zone->elems;
379 spin_unlock(&zone->lock);
380 spin_lock(&fs_info->reada_lock);
381 kref_put(&zone->refcnt, reada_zone_release);
382 spin_unlock(&fs_info->reada_lock);
383 }
384 if (re->nzones == 0) {
385 /* not a single zone found, error and out */
386 goto error;
387 }
388
389 /* insert extent in reada_tree + all per-device trees, all or nothing */
390 btrfs_dev_replace_lock(&fs_info->dev_replace, 0);
391 spin_lock(&fs_info->reada_lock);
392 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
393 if (ret == -EEXIST) {
394 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
395 re_exist->refcnt++;
396 spin_unlock(&fs_info->reada_lock);
397 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
398 goto error;
399 }
400 if (ret) {
401 spin_unlock(&fs_info->reada_lock);
402 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
403 goto error;
404 }
405 prev_dev = NULL;
406 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
407 &fs_info->dev_replace);
408 for (nzones = 0; nzones < re->nzones; ++nzones) {
409 dev = re->zones[nzones]->device;
410
411 if (dev == prev_dev) {
412 /*
413 * in case of DUP, just add the first zone. As both
414 * are on the same device, there's nothing to gain
415 * from adding both.
416 * Also, it wouldn't work, as the tree is per device
417 * and adding would fail with EEXIST
418 */
419 continue;
420 }
421 if (!dev->bdev)
422 continue;
423
424 if (dev_replace_is_ongoing &&
425 dev == fs_info->dev_replace.tgtdev) {
426 /*
427 * as this device is selected for reading only as
428 * a last resort, skip it for read ahead.
429 */
430 continue;
431 }
432 prev_dev = dev;
433 ret = radix_tree_insert(&dev->reada_extents, index, re);
434 if (ret) {
435 while (--nzones >= 0) {
436 dev = re->zones[nzones]->device;
437 BUG_ON(dev == NULL);
438 /* ignore whether the entry was inserted */
439 radix_tree_delete(&dev->reada_extents, index);
440 }
441 radix_tree_delete(&fs_info->reada_tree, index);
442 spin_unlock(&fs_info->reada_lock);
443 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
444 goto error;
445 }
446 have_zone = 1;
447 }
448 spin_unlock(&fs_info->reada_lock);
449 btrfs_dev_replace_unlock(&fs_info->dev_replace, 0);
450
451 if (!have_zone)
452 goto error;
453
454 btrfs_put_bbio(bbio);
455 return re;
456
457 error:
458 for (nzones = 0; nzones < re->nzones; ++nzones) {
459 struct reada_zone *zone;
460
461 zone = re->zones[nzones];
462 kref_get(&zone->refcnt);
463 spin_lock(&zone->lock);
464 --zone->elems;
465 if (zone->elems == 0) {
466 /*
467 * no fs_info->reada_lock needed, as this can't be
468 * the last ref
469 */
470 kref_put(&zone->refcnt, reada_zone_release);
471 }
472 spin_unlock(&zone->lock);
473
474 spin_lock(&fs_info->reada_lock);
475 kref_put(&zone->refcnt, reada_zone_release);
476 spin_unlock(&fs_info->reada_lock);
477 }
478 btrfs_put_bbio(bbio);
479 kfree(re);
480 return re_exist;
481 }
482
483 static void reada_extent_put(struct btrfs_fs_info *fs_info,
484 struct reada_extent *re)
485 {
486 int i;
487 unsigned long index = re->logical >> PAGE_SHIFT;
488
489 spin_lock(&fs_info->reada_lock);
490 if (--re->refcnt) {
491 spin_unlock(&fs_info->reada_lock);
492 return;
493 }
494
495 radix_tree_delete(&fs_info->reada_tree, index);
496 for (i = 0; i < re->nzones; ++i) {
497 struct reada_zone *zone = re->zones[i];
498
499 radix_tree_delete(&zone->device->reada_extents, index);
500 }
501
502 spin_unlock(&fs_info->reada_lock);
503
504 for (i = 0; i < re->nzones; ++i) {
505 struct reada_zone *zone = re->zones[i];
506
507 kref_get(&zone->refcnt);
508 spin_lock(&zone->lock);
509 --zone->elems;
510 if (zone->elems == 0) {
511 /* no fs_info->reada_lock needed, as this can't be
512 * the last ref */
513 kref_put(&zone->refcnt, reada_zone_release);
514 }
515 spin_unlock(&zone->lock);
516
517 spin_lock(&fs_info->reada_lock);
518 kref_put(&zone->refcnt, reada_zone_release);
519 spin_unlock(&fs_info->reada_lock);
520 }
521
522 kfree(re);
523 }
524
525 static void reada_zone_release(struct kref *kref)
526 {
527 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
528
529 radix_tree_delete(&zone->device->reada_zones,
530 zone->end >> PAGE_SHIFT);
531
532 kfree(zone);
533 }
534
535 static void reada_control_release(struct kref *kref)
536 {
537 struct reada_control *rc = container_of(kref, struct reada_control,
538 refcnt);
539
540 kfree(rc);
541 }
542
543 static int reada_add_block(struct reada_control *rc, u64 logical,
544 struct btrfs_key *top, u64 generation)
545 {
546 struct btrfs_fs_info *fs_info = rc->fs_info;
547 struct reada_extent *re;
548 struct reada_extctl *rec;
549
550 /* takes one ref */
551 re = reada_find_extent(fs_info, logical, top);
552 if (!re)
553 return -1;
554
555 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
556 if (!rec) {
557 reada_extent_put(fs_info, re);
558 return -ENOMEM;
559 }
560
561 rec->rc = rc;
562 rec->generation = generation;
563 atomic_inc(&rc->elems);
564
565 spin_lock(&re->lock);
566 list_add_tail(&rec->list, &re->extctl);
567 spin_unlock(&re->lock);
568
569 /* leave the ref on the extent */
570
571 return 0;
572 }
573
574 /*
575 * called with fs_info->reada_lock held
576 */
577 static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
578 {
579 int i;
580 unsigned long index = zone->end >> PAGE_SHIFT;
581
582 for (i = 0; i < zone->ndevs; ++i) {
583 struct reada_zone *peer;
584 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
585 if (peer && peer->device != zone->device)
586 peer->locked = lock;
587 }
588 }
589
590 /*
591 * called with fs_info->reada_lock held
592 */
593 static int reada_pick_zone(struct btrfs_device *dev)
594 {
595 struct reada_zone *top_zone = NULL;
596 struct reada_zone *top_locked_zone = NULL;
597 u64 top_elems = 0;
598 u64 top_locked_elems = 0;
599 unsigned long index = 0;
600 int ret;
601
602 if (dev->reada_curr_zone) {
603 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
604 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
605 dev->reada_curr_zone = NULL;
606 }
607 /* pick the zone with the most elements */
608 while (1) {
609 struct reada_zone *zone;
610
611 ret = radix_tree_gang_lookup(&dev->reada_zones,
612 (void **)&zone, index, 1);
613 if (ret == 0)
614 break;
615 index = (zone->end >> PAGE_SHIFT) + 1;
616 if (zone->locked) {
617 if (zone->elems > top_locked_elems) {
618 top_locked_elems = zone->elems;
619 top_locked_zone = zone;
620 }
621 } else {
622 if (zone->elems > top_elems) {
623 top_elems = zone->elems;
624 top_zone = zone;
625 }
626 }
627 }
628 if (top_zone)
629 dev->reada_curr_zone = top_zone;
630 else if (top_locked_zone)
631 dev->reada_curr_zone = top_locked_zone;
632 else
633 return 0;
634
635 dev->reada_next = dev->reada_curr_zone->start;
636 kref_get(&dev->reada_curr_zone->refcnt);
637 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
638
639 return 1;
640 }
641
642 static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
643 struct btrfs_device *dev)
644 {
645 struct reada_extent *re = NULL;
646 int mirror_num = 0;
647 struct extent_buffer *eb = NULL;
648 u64 logical;
649 int ret;
650 int i;
651
652 spin_lock(&fs_info->reada_lock);
653 if (dev->reada_curr_zone == NULL) {
654 ret = reada_pick_zone(dev);
655 if (!ret) {
656 spin_unlock(&fs_info->reada_lock);
657 return 0;
658 }
659 }
660 /*
661 * FIXME currently we issue the reads one extent at a time. If we have
662 * a contiguous block of extents, we could also coagulate them or use
663 * plugging to speed things up
664 */
665 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
666 dev->reada_next >> PAGE_SHIFT, 1);
667 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
668 ret = reada_pick_zone(dev);
669 if (!ret) {
670 spin_unlock(&fs_info->reada_lock);
671 return 0;
672 }
673 re = NULL;
674 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
675 dev->reada_next >> PAGE_SHIFT, 1);
676 }
677 if (ret == 0) {
678 spin_unlock(&fs_info->reada_lock);
679 return 0;
680 }
681 dev->reada_next = re->logical + fs_info->nodesize;
682 re->refcnt++;
683
684 spin_unlock(&fs_info->reada_lock);
685
686 spin_lock(&re->lock);
687 if (re->scheduled || list_empty(&re->extctl)) {
688 spin_unlock(&re->lock);
689 reada_extent_put(fs_info, re);
690 return 0;
691 }
692 re->scheduled = 1;
693 spin_unlock(&re->lock);
694
695 /*
696 * find mirror num
697 */
698 for (i = 0; i < re->nzones; ++i) {
699 if (re->zones[i]->device == dev) {
700 mirror_num = i + 1;
701 break;
702 }
703 }
704 logical = re->logical;
705
706 atomic_inc(&dev->reada_in_flight);
707 ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
708 if (ret)
709 __readahead_hook(fs_info, re, NULL, ret);
710 else if (eb)
711 __readahead_hook(fs_info, re, eb, ret);
712
713 if (eb)
714 free_extent_buffer(eb);
715
716 atomic_dec(&dev->reada_in_flight);
717 reada_extent_put(fs_info, re);
718
719 return 1;
720
721 }
722
723 static void reada_start_machine_worker(struct btrfs_work *work)
724 {
725 struct reada_machine_work *rmw;
726 struct btrfs_fs_info *fs_info;
727 int old_ioprio;
728
729 rmw = container_of(work, struct reada_machine_work, work);
730 fs_info = rmw->fs_info;
731
732 kfree(rmw);
733
734 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
735 task_nice_ioprio(current));
736 set_task_ioprio(current, BTRFS_IOPRIO_READA);
737 __reada_start_machine(fs_info);
738 set_task_ioprio(current, old_ioprio);
739
740 atomic_dec(&fs_info->reada_works_cnt);
741 }
742
743 static void __reada_start_machine(struct btrfs_fs_info *fs_info)
744 {
745 struct btrfs_device *device;
746 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
747 u64 enqueued;
748 u64 total = 0;
749 int i;
750
751 do {
752 enqueued = 0;
753 mutex_lock(&fs_devices->device_list_mutex);
754 list_for_each_entry(device, &fs_devices->devices, dev_list) {
755 if (atomic_read(&device->reada_in_flight) <
756 MAX_IN_FLIGHT)
757 enqueued += reada_start_machine_dev(fs_info,
758 device);
759 }
760 mutex_unlock(&fs_devices->device_list_mutex);
761 total += enqueued;
762 } while (enqueued && total < 10000);
763
764 if (enqueued == 0)
765 return;
766
767 /*
768 * If everything is already in the cache, this is effectively single
769 * threaded. To a) not hold the caller for too long and b) to utilize
770 * more cores, we broke the loop above after 10000 iterations and now
771 * enqueue to workers to finish it. This will distribute the load to
772 * the cores.
773 */
774 for (i = 0; i < 2; ++i) {
775 reada_start_machine(fs_info);
776 if (atomic_read(&fs_info->reada_works_cnt) >
777 BTRFS_MAX_MIRRORS * 2)
778 break;
779 }
780 }
781
782 static void reada_start_machine(struct btrfs_fs_info *fs_info)
783 {
784 struct reada_machine_work *rmw;
785
786 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
787 if (!rmw) {
788 /* FIXME we cannot handle this properly right now */
789 BUG();
790 }
791 btrfs_init_work(&rmw->work, btrfs_readahead_helper,
792 reada_start_machine_worker, NULL, NULL);
793 rmw->fs_info = fs_info;
794
795 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
796 atomic_inc(&fs_info->reada_works_cnt);
797 }
798
799 #ifdef DEBUG
800 static void dump_devs(struct btrfs_fs_info *fs_info, int all)
801 {
802 struct btrfs_device *device;
803 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
804 unsigned long index;
805 int ret;
806 int i;
807 int j;
808 int cnt;
809
810 spin_lock(&fs_info->reada_lock);
811 list_for_each_entry(device, &fs_devices->devices, dev_list) {
812 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
813 atomic_read(&device->reada_in_flight));
814 index = 0;
815 while (1) {
816 struct reada_zone *zone;
817 ret = radix_tree_gang_lookup(&device->reada_zones,
818 (void **)&zone, index, 1);
819 if (ret == 0)
820 break;
821 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
822 zone->start, zone->end, zone->elems,
823 zone->locked);
824 for (j = 0; j < zone->ndevs; ++j) {
825 pr_cont(" %lld",
826 zone->devs[j]->devid);
827 }
828 if (device->reada_curr_zone == zone)
829 pr_cont(" curr off %llu",
830 device->reada_next - zone->start);
831 pr_cont("\n");
832 index = (zone->end >> PAGE_SHIFT) + 1;
833 }
834 cnt = 0;
835 index = 0;
836 while (all) {
837 struct reada_extent *re = NULL;
838
839 ret = radix_tree_gang_lookup(&device->reada_extents,
840 (void **)&re, index, 1);
841 if (ret == 0)
842 break;
843 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
844 re->logical, fs_info->nodesize,
845 list_empty(&re->extctl), re->scheduled);
846
847 for (i = 0; i < re->nzones; ++i) {
848 pr_cont(" zone %llu-%llu devs",
849 re->zones[i]->start,
850 re->zones[i]->end);
851 for (j = 0; j < re->zones[i]->ndevs; ++j) {
852 pr_cont(" %lld",
853 re->zones[i]->devs[j]->devid);
854 }
855 }
856 pr_cont("\n");
857 index = (re->logical >> PAGE_SHIFT) + 1;
858 if (++cnt > 15)
859 break;
860 }
861 }
862
863 index = 0;
864 cnt = 0;
865 while (all) {
866 struct reada_extent *re = NULL;
867
868 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
869 index, 1);
870 if (ret == 0)
871 break;
872 if (!re->scheduled) {
873 index = (re->logical >> PAGE_SHIFT) + 1;
874 continue;
875 }
876 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
877 re->logical, fs_info->nodesize,
878 list_empty(&re->extctl), re->scheduled);
879 for (i = 0; i < re->nzones; ++i) {
880 pr_cont(" zone %llu-%llu devs",
881 re->zones[i]->start,
882 re->zones[i]->end);
883 for (j = 0; j < re->zones[i]->ndevs; ++j) {
884 pr_cont(" %lld",
885 re->zones[i]->devs[j]->devid);
886 }
887 }
888 pr_cont("\n");
889 index = (re->logical >> PAGE_SHIFT) + 1;
890 }
891 spin_unlock(&fs_info->reada_lock);
892 }
893 #endif
894
895 /*
896 * interface
897 */
898 struct reada_control *btrfs_reada_add(struct btrfs_root *root,
899 struct btrfs_key *key_start, struct btrfs_key *key_end)
900 {
901 struct reada_control *rc;
902 u64 start;
903 u64 generation;
904 int ret;
905 struct extent_buffer *node;
906 static struct btrfs_key max_key = {
907 .objectid = (u64)-1,
908 .type = (u8)-1,
909 .offset = (u64)-1
910 };
911
912 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
913 if (!rc)
914 return ERR_PTR(-ENOMEM);
915
916 rc->fs_info = root->fs_info;
917 rc->key_start = *key_start;
918 rc->key_end = *key_end;
919 atomic_set(&rc->elems, 0);
920 init_waitqueue_head(&rc->wait);
921 kref_init(&rc->refcnt);
922 kref_get(&rc->refcnt); /* one ref for having elements */
923
924 node = btrfs_root_node(root);
925 start = node->start;
926 generation = btrfs_header_generation(node);
927 free_extent_buffer(node);
928
929 ret = reada_add_block(rc, start, &max_key, generation);
930 if (ret) {
931 kfree(rc);
932 return ERR_PTR(ret);
933 }
934
935 reada_start_machine(root->fs_info);
936
937 return rc;
938 }
939
940 #ifdef DEBUG
941 int btrfs_reada_wait(void *handle)
942 {
943 struct reada_control *rc = handle;
944 struct btrfs_fs_info *fs_info = rc->fs_info;
945
946 while (atomic_read(&rc->elems)) {
947 if (!atomic_read(&fs_info->reada_works_cnt))
948 reada_start_machine(fs_info);
949 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
950 5 * HZ);
951 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
952 }
953
954 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
955
956 kref_put(&rc->refcnt, reada_control_release);
957
958 return 0;
959 }
960 #else
961 int btrfs_reada_wait(void *handle)
962 {
963 struct reada_control *rc = handle;
964 struct btrfs_fs_info *fs_info = rc->fs_info;
965
966 while (atomic_read(&rc->elems)) {
967 if (!atomic_read(&fs_info->reada_works_cnt))
968 reada_start_machine(fs_info);
969 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
970 (HZ + 9) / 10);
971 }
972
973 kref_put(&rc->refcnt, reada_control_release);
974
975 return 0;
976 }
977 #endif
978
979 void btrfs_reada_detach(void *handle)
980 {
981 struct reada_control *rc = handle;
982
983 kref_put(&rc->refcnt, reada_control_release);
984 }