]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - fs/btrfs/reada.c
exfat: replace memcpy with structure assignment
[mirror_ubuntu-hirsute-kernel.git] / fs / btrfs / reada.c
CommitLineData
c1d7c514 1// SPDX-License-Identifier: GPL-2.0
7414a03f
AJ
2/*
3 * Copyright (C) 2011 STRATO. All rights reserved.
7414a03f
AJ
4 */
5
6#include <linux/sched.h>
7#include <linux/pagemap.h>
8#include <linux/writeback.h>
9#include <linux/blkdev.h>
7414a03f
AJ
10#include <linux/slab.h>
11#include <linux/workqueue.h>
12#include "ctree.h"
13#include "volumes.h"
14#include "disk-io.h"
15#include "transaction.h"
8dabb742 16#include "dev-replace.h"
aac0023c 17#include "block-group.h"
7414a03f
AJ
18
19#undef DEBUG
20
21/*
22 * This is the implementation for the generic read ahead framework.
23 *
24 * To trigger a readahead, btrfs_reada_add must be called. It will start
25 * a read ahead for the given range [start, end) on tree root. The returned
26 * handle can either be used to wait on the readahead to finish
27 * (btrfs_reada_wait), or to send it to the background (btrfs_reada_detach).
28 *
29 * The read ahead works as follows:
30 * On btrfs_reada_add, the root of the tree is inserted into a radix_tree.
31 * reada_start_machine will then search for extents to prefetch and trigger
32 * some reads. When a read finishes for a node, all contained node/leaf
33 * pointers that lie in the given range will also be enqueued. The reads will
34 * be triggered in sequential order, thus giving a big win over a naive
35 * enumeration. It will also make use of multi-device layouts. Each disk
36 * will have its on read pointer and all disks will by utilized in parallel.
37 * Also will no two disks read both sides of a mirror simultaneously, as this
38 * would waste seeking capacity. Instead both disks will read different parts
39 * of the filesystem.
40 * Any number of readaheads can be started in parallel. The read order will be
41 * determined globally, i.e. 2 parallel readaheads will normally finish faster
42 * than the 2 started one after another.
43 */
44
7414a03f
AJ
45#define MAX_IN_FLIGHT 6
46
47struct reada_extctl {
48 struct list_head list;
49 struct reada_control *rc;
50 u64 generation;
51};
52
53struct reada_extent {
54 u64 logical;
55 struct btrfs_key top;
7414a03f 56 struct list_head extctl;
99621b44 57 int refcnt;
7414a03f 58 spinlock_t lock;
94598ba8 59 struct reada_zone *zones[BTRFS_MAX_MIRRORS];
7414a03f 60 int nzones;
895a11b8 61 int scheduled;
7414a03f
AJ
62};
63
64struct reada_zone {
65 u64 start;
66 u64 end;
67 u64 elems;
68 struct list_head list;
69 spinlock_t lock;
70 int locked;
71 struct btrfs_device *device;
94598ba8
SB
72 struct btrfs_device *devs[BTRFS_MAX_MIRRORS]; /* full list, incl
73 * self */
7414a03f
AJ
74 int ndevs;
75 struct kref refcnt;
76};
77
78struct reada_machine_work {
d458b054 79 struct btrfs_work work;
7414a03f
AJ
80 struct btrfs_fs_info *fs_info;
81};
82
83static void reada_extent_put(struct btrfs_fs_info *, struct reada_extent *);
84static void reada_control_release(struct kref *kref);
85static void reada_zone_release(struct kref *kref);
86static void reada_start_machine(struct btrfs_fs_info *fs_info);
87static void __reada_start_machine(struct btrfs_fs_info *fs_info);
88
89static int reada_add_block(struct reada_control *rc, u64 logical,
1e7970c0 90 struct btrfs_key *top, u64 generation);
7414a03f
AJ
91
92/* recurses */
93/* in case of err, eb might be NULL */
02873e43
ZL
94static void __readahead_hook(struct btrfs_fs_info *fs_info,
95 struct reada_extent *re, struct extent_buffer *eb,
bcdc51b2 96 int err)
7414a03f 97{
7414a03f
AJ
98 int nritems;
99 int i;
100 u64 bytenr;
101 u64 generation;
7414a03f 102 struct list_head list;
7414a03f 103
7414a03f
AJ
104 spin_lock(&re->lock);
105 /*
106 * just take the full list from the extent. afterwards we
107 * don't need the lock anymore
108 */
109 list_replace_init(&re->extctl, &list);
895a11b8 110 re->scheduled = 0;
7414a03f
AJ
111 spin_unlock(&re->lock);
112
57f16e08
ZL
113 /*
114 * this is the error case, the extent buffer has not been
115 * read correctly. We won't access anything from it and
116 * just cleanup our data structures. Effectively this will
117 * cut the branch below this node from read ahead.
118 */
119 if (err)
120 goto cleanup;
7414a03f 121
57f16e08
ZL
122 /*
123 * FIXME: currently we just set nritems to 0 if this is a leaf,
124 * effectively ignoring the content. In a next step we could
125 * trigger more readahead depending from the content, e.g.
126 * fetch the checksums for the extents in the leaf.
127 */
04998b33 128 if (!btrfs_header_level(eb))
57f16e08
ZL
129 goto cleanup;
130
131 nritems = btrfs_header_nritems(eb);
132 generation = btrfs_header_generation(eb);
7414a03f
AJ
133 for (i = 0; i < nritems; i++) {
134 struct reada_extctl *rec;
135 u64 n_gen;
136 struct btrfs_key key;
137 struct btrfs_key next_key;
138
139 btrfs_node_key_to_cpu(eb, &key, i);
140 if (i + 1 < nritems)
141 btrfs_node_key_to_cpu(eb, &next_key, i + 1);
142 else
143 next_key = re->top;
144 bytenr = btrfs_node_blockptr(eb, i);
145 n_gen = btrfs_node_ptr_generation(eb, i);
146
147 list_for_each_entry(rec, &list, list) {
148 struct reada_control *rc = rec->rc;
149
150 /*
151 * if the generation doesn't match, just ignore this
152 * extctl. This will probably cut off a branch from
153 * prefetch. Alternatively one could start a new (sub-)
154 * prefetch for this branch, starting again from root.
155 * FIXME: move the generation check out of this loop
156 */
157#ifdef DEBUG
158 if (rec->generation != generation) {
02873e43
ZL
159 btrfs_debug(fs_info,
160 "generation mismatch for (%llu,%d,%llu) %llu != %llu",
161 key.objectid, key.type, key.offset,
162 rec->generation, generation);
7414a03f
AJ
163 }
164#endif
165 if (rec->generation == generation &&
166 btrfs_comp_cpu_keys(&key, &rc->key_end) < 0 &&
167 btrfs_comp_cpu_keys(&next_key, &rc->key_start) > 0)
1e7970c0 168 reada_add_block(rc, bytenr, &next_key, n_gen);
7414a03f
AJ
169 }
170 }
57f16e08
ZL
171
172cleanup:
7414a03f
AJ
173 /*
174 * free extctl records
175 */
176 while (!list_empty(&list)) {
177 struct reada_control *rc;
178 struct reada_extctl *rec;
179
180 rec = list_first_entry(&list, struct reada_extctl, list);
181 list_del(&rec->list);
182 rc = rec->rc;
183 kfree(rec);
184
185 kref_get(&rc->refcnt);
186 if (atomic_dec_and_test(&rc->elems)) {
187 kref_put(&rc->refcnt, reada_control_release);
188 wake_up(&rc->wait);
189 }
190 kref_put(&rc->refcnt, reada_control_release);
191
192 reada_extent_put(fs_info, re); /* one ref for each entry */
193 }
7414a03f 194
6e39dbe8 195 return;
7414a03f
AJ
196}
197
d48d71aa 198int btree_readahead_hook(struct extent_buffer *eb, int err)
7414a03f 199{
d48d71aa 200 struct btrfs_fs_info *fs_info = eb->fs_info;
6e39dbe8
ZL
201 int ret = 0;
202 struct reada_extent *re;
7414a03f 203
6e39dbe8
ZL
204 /* find extent */
205 spin_lock(&fs_info->reada_lock);
206 re = radix_tree_lookup(&fs_info->reada_tree,
fc2e901f 207 eb->start >> PAGE_SHIFT);
6e39dbe8
ZL
208 if (re)
209 re->refcnt++;
210 spin_unlock(&fs_info->reada_lock);
211 if (!re) {
212 ret = -1;
213 goto start_machine;
214 }
7414a03f 215
bcdc51b2 216 __readahead_hook(fs_info, re, eb, err);
6e39dbe8 217 reada_extent_put(fs_info, re); /* our ref */
7414a03f 218
6e39dbe8
ZL
219start_machine:
220 reada_start_machine(fs_info);
7414a03f
AJ
221 return ret;
222}
223
0ceaf282 224static struct reada_zone *reada_find_zone(struct btrfs_device *dev, u64 logical,
21ca543e 225 struct btrfs_bio *bbio)
7414a03f 226{
0ceaf282 227 struct btrfs_fs_info *fs_info = dev->fs_info;
7414a03f 228 int ret;
7414a03f 229 struct reada_zone *zone;
32da5386 230 struct btrfs_block_group *cache = NULL;
7414a03f
AJ
231 u64 start;
232 u64 end;
233 int i;
234
7414a03f
AJ
235 zone = NULL;
236 spin_lock(&fs_info->reada_lock);
237 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
09cbfeaf 238 logical >> PAGE_SHIFT, 1);
c37f49c7 239 if (ret == 1 && logical >= zone->start && logical <= zone->end) {
7414a03f 240 kref_get(&zone->refcnt);
7414a03f 241 spin_unlock(&fs_info->reada_lock);
c37f49c7 242 return zone;
7414a03f
AJ
243 }
244
c37f49c7
ZL
245 spin_unlock(&fs_info->reada_lock);
246
7414a03f
AJ
247 cache = btrfs_lookup_block_group(fs_info, logical);
248 if (!cache)
249 return NULL;
250
b3470b5d
DS
251 start = cache->start;
252 end = start + cache->length - 1;
7414a03f
AJ
253 btrfs_put_block_group(cache);
254
ed0244fa 255 zone = kzalloc(sizeof(*zone), GFP_KERNEL);
7414a03f
AJ
256 if (!zone)
257 return NULL;
258
cc8385b5
DS
259 ret = radix_tree_preload(GFP_KERNEL);
260 if (ret) {
261 kfree(zone);
262 return NULL;
263 }
264
7414a03f
AJ
265 zone->start = start;
266 zone->end = end;
267 INIT_LIST_HEAD(&zone->list);
268 spin_lock_init(&zone->lock);
269 zone->locked = 0;
270 kref_init(&zone->refcnt);
271 zone->elems = 0;
272 zone->device = dev; /* our device always sits at index 0 */
21ca543e 273 for (i = 0; i < bbio->num_stripes; ++i) {
7414a03f 274 /* bounds have already been checked */
21ca543e 275 zone->devs[i] = bbio->stripes[i].dev;
7414a03f 276 }
21ca543e 277 zone->ndevs = bbio->num_stripes;
7414a03f
AJ
278
279 spin_lock(&fs_info->reada_lock);
280 ret = radix_tree_insert(&dev->reada_zones,
09cbfeaf 281 (unsigned long)(zone->end >> PAGE_SHIFT),
7414a03f 282 zone);
7414a03f 283
8c9c2bf7 284 if (ret == -EEXIST) {
7414a03f 285 kfree(zone);
8c9c2bf7 286 ret = radix_tree_gang_lookup(&dev->reada_zones, (void **)&zone,
09cbfeaf 287 logical >> PAGE_SHIFT, 1);
8e9aa51f 288 if (ret == 1 && logical >= zone->start && logical <= zone->end)
8c9c2bf7 289 kref_get(&zone->refcnt);
8e9aa51f
ZL
290 else
291 zone = NULL;
7414a03f 292 }
8c9c2bf7 293 spin_unlock(&fs_info->reada_lock);
cc8385b5 294 radix_tree_preload_end();
7414a03f
AJ
295
296 return zone;
297}
298
2ff7e61e 299static struct reada_extent *reada_find_extent(struct btrfs_fs_info *fs_info,
7414a03f 300 u64 logical,
1e7970c0 301 struct btrfs_key *top)
7414a03f
AJ
302{
303 int ret;
7414a03f 304 struct reada_extent *re = NULL;
8c9c2bf7 305 struct reada_extent *re_exist = NULL;
21ca543e 306 struct btrfs_bio *bbio = NULL;
7414a03f 307 struct btrfs_device *dev;
207a232c 308 struct btrfs_device *prev_dev;
7414a03f 309 u64 length;
7cb2c420 310 int real_stripes;
7414a03f 311 int nzones = 0;
09cbfeaf 312 unsigned long index = logical >> PAGE_SHIFT;
8dabb742 313 int dev_replace_is_ongoing;
31945021 314 int have_zone = 0;
7414a03f 315
7414a03f
AJ
316 spin_lock(&fs_info->reada_lock);
317 re = radix_tree_lookup(&fs_info->reada_tree, index);
318 if (re)
99621b44 319 re->refcnt++;
7414a03f
AJ
320 spin_unlock(&fs_info->reada_lock);
321
8c9c2bf7 322 if (re)
7414a03f
AJ
323 return re;
324
ed0244fa 325 re = kzalloc(sizeof(*re), GFP_KERNEL);
7414a03f
AJ
326 if (!re)
327 return NULL;
328
7414a03f 329 re->logical = logical;
7414a03f
AJ
330 re->top = *top;
331 INIT_LIST_HEAD(&re->extctl);
332 spin_lock_init(&re->lock);
99621b44 333 re->refcnt = 1;
7414a03f
AJ
334
335 /*
336 * map block
337 */
994a5d2b 338 length = fs_info->nodesize;
cf8cddd3
CH
339 ret = btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS, logical,
340 &length, &bbio, 0);
994a5d2b 341 if (ret || !bbio || length < fs_info->nodesize)
7414a03f
AJ
342 goto error;
343
94598ba8 344 if (bbio->num_stripes > BTRFS_MAX_MIRRORS) {
0b246afa 345 btrfs_err(fs_info,
efe120a0
FH
346 "readahead: more than %d copies not supported",
347 BTRFS_MAX_MIRRORS);
7414a03f
AJ
348 goto error;
349 }
350
7cb2c420
OS
351 real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
352 for (nzones = 0; nzones < real_stripes; ++nzones) {
7414a03f
AJ
353 struct reada_zone *zone;
354
21ca543e 355 dev = bbio->stripes[nzones].dev;
7aff8cf4
ZL
356
357 /* cannot read ahead on missing device. */
bece2e82 358 if (!dev->bdev)
7aff8cf4
ZL
359 continue;
360
0ceaf282 361 zone = reada_find_zone(dev, logical, bbio);
7414a03f 362 if (!zone)
6a159d2a 363 continue;
7414a03f 364
6a159d2a 365 re->zones[re->nzones++] = zone;
7414a03f
AJ
366 spin_lock(&zone->lock);
367 if (!zone->elems)
368 kref_get(&zone->refcnt);
369 ++zone->elems;
370 spin_unlock(&zone->lock);
371 spin_lock(&fs_info->reada_lock);
372 kref_put(&zone->refcnt, reada_zone_release);
373 spin_unlock(&fs_info->reada_lock);
374 }
6a159d2a 375 if (re->nzones == 0) {
7414a03f
AJ
376 /* not a single zone found, error and out */
377 goto error;
378 }
379
ceb21a8d 380 /* Insert extent in reada tree + all per-device trees, all or nothing */
cb5583dd 381 down_read(&fs_info->dev_replace.rwsem);
7ef70b4d 382 ret = radix_tree_preload(GFP_KERNEL);
ceb21a8d 383 if (ret) {
cb5583dd 384 up_read(&fs_info->dev_replace.rwsem);
7ef70b4d 385 goto error;
ceb21a8d 386 }
7ef70b4d 387
7414a03f
AJ
388 spin_lock(&fs_info->reada_lock);
389 ret = radix_tree_insert(&fs_info->reada_tree, index, re);
8c9c2bf7
AJ
390 if (ret == -EEXIST) {
391 re_exist = radix_tree_lookup(&fs_info->reada_tree, index);
99621b44 392 re_exist->refcnt++;
8c9c2bf7 393 spin_unlock(&fs_info->reada_lock);
7ef70b4d 394 radix_tree_preload_end();
cb5583dd 395 up_read(&fs_info->dev_replace.rwsem);
8c9c2bf7
AJ
396 goto error;
397 }
7414a03f
AJ
398 if (ret) {
399 spin_unlock(&fs_info->reada_lock);
7ef70b4d 400 radix_tree_preload_end();
cb5583dd 401 up_read(&fs_info->dev_replace.rwsem);
7414a03f
AJ
402 goto error;
403 }
7ef70b4d 404 radix_tree_preload_end();
207a232c 405 prev_dev = NULL;
8dabb742
SB
406 dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(
407 &fs_info->dev_replace);
6a159d2a
ZL
408 for (nzones = 0; nzones < re->nzones; ++nzones) {
409 dev = re->zones[nzones]->device;
410
207a232c
AJ
411 if (dev == prev_dev) {
412 /*
413 * in case of DUP, just add the first zone. As both
414 * are on the same device, there's nothing to gain
415 * from adding both.
416 * Also, it wouldn't work, as the tree is per device
417 * and adding would fail with EEXIST
418 */
419 continue;
420 }
7aff8cf4
ZL
421 if (!dev->bdev)
422 continue;
423
8dabb742
SB
424 if (dev_replace_is_ongoing &&
425 dev == fs_info->dev_replace.tgtdev) {
426 /*
427 * as this device is selected for reading only as
428 * a last resort, skip it for read ahead.
429 */
430 continue;
431 }
207a232c 432 prev_dev = dev;
7414a03f
AJ
433 ret = radix_tree_insert(&dev->reada_extents, index, re);
434 if (ret) {
6a159d2a
ZL
435 while (--nzones >= 0) {
436 dev = re->zones[nzones]->device;
7414a03f 437 BUG_ON(dev == NULL);
ff023aac 438 /* ignore whether the entry was inserted */
7414a03f
AJ
439 radix_tree_delete(&dev->reada_extents, index);
440 }
7414a03f
AJ
441 radix_tree_delete(&fs_info->reada_tree, index);
442 spin_unlock(&fs_info->reada_lock);
cb5583dd 443 up_read(&fs_info->dev_replace.rwsem);
7414a03f
AJ
444 goto error;
445 }
31945021 446 have_zone = 1;
7414a03f
AJ
447 }
448 spin_unlock(&fs_info->reada_lock);
cb5583dd 449 up_read(&fs_info->dev_replace.rwsem);
7414a03f 450
31945021
ZL
451 if (!have_zone)
452 goto error;
453
6e9606d2 454 btrfs_put_bbio(bbio);
7414a03f
AJ
455 return re;
456
457error:
6a159d2a 458 for (nzones = 0; nzones < re->nzones; ++nzones) {
7414a03f
AJ
459 struct reada_zone *zone;
460
7414a03f
AJ
461 zone = re->zones[nzones];
462 kref_get(&zone->refcnt);
463 spin_lock(&zone->lock);
464 --zone->elems;
465 if (zone->elems == 0) {
466 /*
467 * no fs_info->reada_lock needed, as this can't be
468 * the last ref
469 */
470 kref_put(&zone->refcnt, reada_zone_release);
471 }
472 spin_unlock(&zone->lock);
473
474 spin_lock(&fs_info->reada_lock);
475 kref_put(&zone->refcnt, reada_zone_release);
476 spin_unlock(&fs_info->reada_lock);
477 }
6e9606d2 478 btrfs_put_bbio(bbio);
7414a03f 479 kfree(re);
8c9c2bf7 480 return re_exist;
7414a03f
AJ
481}
482
7414a03f
AJ
483static void reada_extent_put(struct btrfs_fs_info *fs_info,
484 struct reada_extent *re)
485{
486 int i;
09cbfeaf 487 unsigned long index = re->logical >> PAGE_SHIFT;
7414a03f
AJ
488
489 spin_lock(&fs_info->reada_lock);
99621b44 490 if (--re->refcnt) {
7414a03f
AJ
491 spin_unlock(&fs_info->reada_lock);
492 return;
493 }
494
495 radix_tree_delete(&fs_info->reada_tree, index);
496 for (i = 0; i < re->nzones; ++i) {
497 struct reada_zone *zone = re->zones[i];
498
499 radix_tree_delete(&zone->device->reada_extents, index);
500 }
501
502 spin_unlock(&fs_info->reada_lock);
503
504 for (i = 0; i < re->nzones; ++i) {
505 struct reada_zone *zone = re->zones[i];
506
507 kref_get(&zone->refcnt);
508 spin_lock(&zone->lock);
509 --zone->elems;
510 if (zone->elems == 0) {
511 /* no fs_info->reada_lock needed, as this can't be
512 * the last ref */
513 kref_put(&zone->refcnt, reada_zone_release);
514 }
515 spin_unlock(&zone->lock);
516
517 spin_lock(&fs_info->reada_lock);
518 kref_put(&zone->refcnt, reada_zone_release);
519 spin_unlock(&fs_info->reada_lock);
520 }
7414a03f
AJ
521
522 kfree(re);
523}
524
525static void reada_zone_release(struct kref *kref)
526{
527 struct reada_zone *zone = container_of(kref, struct reada_zone, refcnt);
528
529 radix_tree_delete(&zone->device->reada_zones,
09cbfeaf 530 zone->end >> PAGE_SHIFT);
7414a03f
AJ
531
532 kfree(zone);
533}
534
535static void reada_control_release(struct kref *kref)
536{
537 struct reada_control *rc = container_of(kref, struct reada_control,
538 refcnt);
539
540 kfree(rc);
541}
542
543static int reada_add_block(struct reada_control *rc, u64 logical,
1e7970c0 544 struct btrfs_key *top, u64 generation)
7414a03f 545{
c28f158e 546 struct btrfs_fs_info *fs_info = rc->fs_info;
7414a03f
AJ
547 struct reada_extent *re;
548 struct reada_extctl *rec;
549
c28f158e 550 /* takes one ref */
2ff7e61e 551 re = reada_find_extent(fs_info, logical, top);
7414a03f
AJ
552 if (!re)
553 return -1;
554
ed0244fa 555 rec = kzalloc(sizeof(*rec), GFP_KERNEL);
7414a03f 556 if (!rec) {
c28f158e 557 reada_extent_put(fs_info, re);
ddd664f4 558 return -ENOMEM;
7414a03f
AJ
559 }
560
561 rec->rc = rc;
562 rec->generation = generation;
563 atomic_inc(&rc->elems);
564
565 spin_lock(&re->lock);
566 list_add_tail(&rec->list, &re->extctl);
567 spin_unlock(&re->lock);
568
569 /* leave the ref on the extent */
570
571 return 0;
572}
573
574/*
575 * called with fs_info->reada_lock held
576 */
577static void reada_peer_zones_set_lock(struct reada_zone *zone, int lock)
578{
579 int i;
09cbfeaf 580 unsigned long index = zone->end >> PAGE_SHIFT;
7414a03f
AJ
581
582 for (i = 0; i < zone->ndevs; ++i) {
583 struct reada_zone *peer;
584 peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
585 if (peer && peer->device != zone->device)
586 peer->locked = lock;
587 }
588}
589
590/*
591 * called with fs_info->reada_lock held
592 */
593static int reada_pick_zone(struct btrfs_device *dev)
594{
595 struct reada_zone *top_zone = NULL;
596 struct reada_zone *top_locked_zone = NULL;
597 u64 top_elems = 0;
598 u64 top_locked_elems = 0;
599 unsigned long index = 0;
600 int ret;
601
602 if (dev->reada_curr_zone) {
603 reada_peer_zones_set_lock(dev->reada_curr_zone, 0);
604 kref_put(&dev->reada_curr_zone->refcnt, reada_zone_release);
605 dev->reada_curr_zone = NULL;
606 }
607 /* pick the zone with the most elements */
608 while (1) {
609 struct reada_zone *zone;
610
611 ret = radix_tree_gang_lookup(&dev->reada_zones,
612 (void **)&zone, index, 1);
613 if (ret == 0)
614 break;
09cbfeaf 615 index = (zone->end >> PAGE_SHIFT) + 1;
7414a03f
AJ
616 if (zone->locked) {
617 if (zone->elems > top_locked_elems) {
618 top_locked_elems = zone->elems;
619 top_locked_zone = zone;
620 }
621 } else {
622 if (zone->elems > top_elems) {
623 top_elems = zone->elems;
624 top_zone = zone;
625 }
626 }
627 }
628 if (top_zone)
629 dev->reada_curr_zone = top_zone;
630 else if (top_locked_zone)
631 dev->reada_curr_zone = top_locked_zone;
632 else
633 return 0;
634
635 dev->reada_next = dev->reada_curr_zone->start;
636 kref_get(&dev->reada_curr_zone->refcnt);
637 reada_peer_zones_set_lock(dev->reada_curr_zone, 1);
638
639 return 1;
640}
641
4f84bd7f
NB
642static int reada_tree_block_flagged(struct btrfs_fs_info *fs_info, u64 bytenr,
643 int mirror_num, struct extent_buffer **eb)
644{
645 struct extent_buffer *buf = NULL;
646 int ret;
647
648 buf = btrfs_find_create_tree_block(fs_info, bytenr);
649 if (IS_ERR(buf))
650 return 0;
651
652 set_bit(EXTENT_BUFFER_READAHEAD, &buf->bflags);
653
654 ret = read_extent_buffer_pages(buf, WAIT_PAGE_LOCK, mirror_num);
655 if (ret) {
656 free_extent_buffer_stale(buf);
657 return ret;
658 }
659
660 if (test_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags)) {
661 free_extent_buffer_stale(buf);
662 return -EIO;
663 } else if (extent_buffer_uptodate(buf)) {
664 *eb = buf;
665 } else {
666 free_extent_buffer(buf);
667 }
668 return 0;
669}
670
5721b8ad 671static int reada_start_machine_dev(struct btrfs_device *dev)
7414a03f 672{
5721b8ad 673 struct btrfs_fs_info *fs_info = dev->fs_info;
7414a03f
AJ
674 struct reada_extent *re = NULL;
675 int mirror_num = 0;
676 struct extent_buffer *eb = NULL;
677 u64 logical;
7414a03f
AJ
678 int ret;
679 int i;
7414a03f
AJ
680
681 spin_lock(&fs_info->reada_lock);
682 if (dev->reada_curr_zone == NULL) {
683 ret = reada_pick_zone(dev);
684 if (!ret) {
685 spin_unlock(&fs_info->reada_lock);
686 return 0;
687 }
688 }
689 /*
690 * FIXME currently we issue the reads one extent at a time. If we have
691 * a contiguous block of extents, we could also coagulate them or use
692 * plugging to speed things up
693 */
694 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
09cbfeaf 695 dev->reada_next >> PAGE_SHIFT, 1);
50378530 696 if (ret == 0 || re->logical > dev->reada_curr_zone->end) {
7414a03f
AJ
697 ret = reada_pick_zone(dev);
698 if (!ret) {
699 spin_unlock(&fs_info->reada_lock);
700 return 0;
701 }
702 re = NULL;
703 ret = radix_tree_gang_lookup(&dev->reada_extents, (void **)&re,
09cbfeaf 704 dev->reada_next >> PAGE_SHIFT, 1);
7414a03f
AJ
705 }
706 if (ret == 0) {
707 spin_unlock(&fs_info->reada_lock);
708 return 0;
709 }
da17066c 710 dev->reada_next = re->logical + fs_info->nodesize;
99621b44 711 re->refcnt++;
7414a03f
AJ
712
713 spin_unlock(&fs_info->reada_lock);
714
a3f7fde2 715 spin_lock(&re->lock);
895a11b8 716 if (re->scheduled || list_empty(&re->extctl)) {
a3f7fde2
ZL
717 spin_unlock(&re->lock);
718 reada_extent_put(fs_info, re);
719 return 0;
720 }
895a11b8 721 re->scheduled = 1;
a3f7fde2
ZL
722 spin_unlock(&re->lock);
723
7414a03f
AJ
724 /*
725 * find mirror num
726 */
727 for (i = 0; i < re->nzones; ++i) {
728 if (re->zones[i]->device == dev) {
729 mirror_num = i + 1;
730 break;
731 }
732 }
733 logical = re->logical;
7414a03f 734
7414a03f 735 atomic_inc(&dev->reada_in_flight);
2ff7e61e 736 ret = reada_tree_block_flagged(fs_info, logical, mirror_num, &eb);
7414a03f 737 if (ret)
bcdc51b2 738 __readahead_hook(fs_info, re, NULL, ret);
7414a03f 739 else if (eb)
bcdc51b2 740 __readahead_hook(fs_info, re, eb, ret);
7414a03f
AJ
741
742 if (eb)
743 free_extent_buffer(eb);
744
895a11b8 745 atomic_dec(&dev->reada_in_flight);
b257cf50
ZL
746 reada_extent_put(fs_info, re);
747
7414a03f
AJ
748 return 1;
749
750}
751
d458b054 752static void reada_start_machine_worker(struct btrfs_work *work)
7414a03f
AJ
753{
754 struct reada_machine_work *rmw;
3d136a11 755 int old_ioprio;
7414a03f
AJ
756
757 rmw = container_of(work, struct reada_machine_work, work);
7414a03f 758
3d136a11
SB
759 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
760 task_nice_ioprio(current));
761 set_task_ioprio(current, BTRFS_IOPRIO_READA);
e732fe95 762 __reada_start_machine(rmw->fs_info);
3d136a11 763 set_task_ioprio(current, old_ioprio);
2fefd558 764
e732fe95
OS
765 atomic_dec(&rmw->fs_info->reada_works_cnt);
766
767 kfree(rmw);
7414a03f
AJ
768}
769
770static void __reada_start_machine(struct btrfs_fs_info *fs_info)
771{
772 struct btrfs_device *device;
773 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
774 u64 enqueued;
775 u64 total = 0;
776 int i;
777
c4e0540d 778again:
7414a03f
AJ
779 do {
780 enqueued = 0;
ce7791ff 781 mutex_lock(&fs_devices->device_list_mutex);
7414a03f
AJ
782 list_for_each_entry(device, &fs_devices->devices, dev_list) {
783 if (atomic_read(&device->reada_in_flight) <
784 MAX_IN_FLIGHT)
5721b8ad 785 enqueued += reada_start_machine_dev(device);
7414a03f 786 }
ce7791ff 787 mutex_unlock(&fs_devices->device_list_mutex);
7414a03f
AJ
788 total += enqueued;
789 } while (enqueued && total < 10000);
c4e0540d
NA
790 if (fs_devices->seed) {
791 fs_devices = fs_devices->seed;
792 goto again;
793 }
7414a03f
AJ
794
795 if (enqueued == 0)
796 return;
797
798 /*
799 * If everything is already in the cache, this is effectively single
800 * threaded. To a) not hold the caller for too long and b) to utilize
801 * more cores, we broke the loop above after 10000 iterations and now
802 * enqueue to workers to finish it. This will distribute the load to
803 * the cores.
804 */
2fefd558 805 for (i = 0; i < 2; ++i) {
7414a03f 806 reada_start_machine(fs_info);
2fefd558
ZL
807 if (atomic_read(&fs_info->reada_works_cnt) >
808 BTRFS_MAX_MIRRORS * 2)
809 break;
810 }
7414a03f
AJ
811}
812
813static void reada_start_machine(struct btrfs_fs_info *fs_info)
814{
815 struct reada_machine_work *rmw;
816
ed0244fa 817 rmw = kzalloc(sizeof(*rmw), GFP_KERNEL);
7414a03f
AJ
818 if (!rmw) {
819 /* FIXME we cannot handle this properly right now */
820 BUG();
821 }
a0cac0ec 822 btrfs_init_work(&rmw->work, reada_start_machine_worker, NULL, NULL);
7414a03f
AJ
823 rmw->fs_info = fs_info;
824
736cfa15 825 btrfs_queue_work(fs_info->readahead_workers, &rmw->work);
2fefd558 826 atomic_inc(&fs_info->reada_works_cnt);
7414a03f
AJ
827}
828
829#ifdef DEBUG
830static void dump_devs(struct btrfs_fs_info *fs_info, int all)
831{
832 struct btrfs_device *device;
833 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
834 unsigned long index;
835 int ret;
836 int i;
837 int j;
838 int cnt;
839
840 spin_lock(&fs_info->reada_lock);
841 list_for_each_entry(device, &fs_devices->devices, dev_list) {
ab8d0fc4 842 btrfs_debug(fs_info, "dev %lld has %d in flight", device->devid,
7414a03f
AJ
843 atomic_read(&device->reada_in_flight));
844 index = 0;
845 while (1) {
846 struct reada_zone *zone;
847 ret = radix_tree_gang_lookup(&device->reada_zones,
848 (void **)&zone, index, 1);
849 if (ret == 0)
850 break;
62e85577 851 pr_debug(" zone %llu-%llu elems %llu locked %d devs",
ab8d0fc4
JM
852 zone->start, zone->end, zone->elems,
853 zone->locked);
7414a03f 854 for (j = 0; j < zone->ndevs; ++j) {
62e85577 855 pr_cont(" %lld",
7414a03f
AJ
856 zone->devs[j]->devid);
857 }
858 if (device->reada_curr_zone == zone)
62e85577 859 pr_cont(" curr off %llu",
7414a03f 860 device->reada_next - zone->start);
62e85577 861 pr_cont("\n");
09cbfeaf 862 index = (zone->end >> PAGE_SHIFT) + 1;
7414a03f
AJ
863 }
864 cnt = 0;
865 index = 0;
866 while (all) {
867 struct reada_extent *re = NULL;
868
869 ret = radix_tree_gang_lookup(&device->reada_extents,
870 (void **)&re, index, 1);
871 if (ret == 0)
872 break;
62e85577 873 pr_debug(" re: logical %llu size %u empty %d scheduled %d",
da17066c 874 re->logical, fs_info->nodesize,
895a11b8 875 list_empty(&re->extctl), re->scheduled);
7414a03f
AJ
876
877 for (i = 0; i < re->nzones; ++i) {
62e85577 878 pr_cont(" zone %llu-%llu devs",
7414a03f
AJ
879 re->zones[i]->start,
880 re->zones[i]->end);
881 for (j = 0; j < re->zones[i]->ndevs; ++j) {
62e85577 882 pr_cont(" %lld",
7414a03f
AJ
883 re->zones[i]->devs[j]->devid);
884 }
885 }
62e85577 886 pr_cont("\n");
09cbfeaf 887 index = (re->logical >> PAGE_SHIFT) + 1;
7414a03f
AJ
888 if (++cnt > 15)
889 break;
890 }
891 }
892
893 index = 0;
894 cnt = 0;
895 while (all) {
896 struct reada_extent *re = NULL;
897
898 ret = radix_tree_gang_lookup(&fs_info->reada_tree, (void **)&re,
899 index, 1);
900 if (ret == 0)
901 break;
895a11b8 902 if (!re->scheduled) {
09cbfeaf 903 index = (re->logical >> PAGE_SHIFT) + 1;
7414a03f
AJ
904 continue;
905 }
62e85577 906 pr_debug("re: logical %llu size %u list empty %d scheduled %d",
da17066c 907 re->logical, fs_info->nodesize,
895a11b8 908 list_empty(&re->extctl), re->scheduled);
7414a03f 909 for (i = 0; i < re->nzones; ++i) {
62e85577 910 pr_cont(" zone %llu-%llu devs",
7414a03f
AJ
911 re->zones[i]->start,
912 re->zones[i]->end);
8afd6841 913 for (j = 0; j < re->zones[i]->ndevs; ++j) {
62e85577 914 pr_cont(" %lld",
8afd6841 915 re->zones[i]->devs[j]->devid);
7414a03f
AJ
916 }
917 }
62e85577 918 pr_cont("\n");
09cbfeaf 919 index = (re->logical >> PAGE_SHIFT) + 1;
7414a03f
AJ
920 }
921 spin_unlock(&fs_info->reada_lock);
922}
923#endif
924
925/*
926 * interface
927 */
928struct reada_control *btrfs_reada_add(struct btrfs_root *root,
929 struct btrfs_key *key_start, struct btrfs_key *key_end)
930{
931 struct reada_control *rc;
932 u64 start;
933 u64 generation;
ddd664f4 934 int ret;
7414a03f
AJ
935 struct extent_buffer *node;
936 static struct btrfs_key max_key = {
937 .objectid = (u64)-1,
938 .type = (u8)-1,
939 .offset = (u64)-1
940 };
941
ed0244fa 942 rc = kzalloc(sizeof(*rc), GFP_KERNEL);
7414a03f
AJ
943 if (!rc)
944 return ERR_PTR(-ENOMEM);
945
c28f158e 946 rc->fs_info = root->fs_info;
7414a03f
AJ
947 rc->key_start = *key_start;
948 rc->key_end = *key_end;
949 atomic_set(&rc->elems, 0);
950 init_waitqueue_head(&rc->wait);
951 kref_init(&rc->refcnt);
952 kref_get(&rc->refcnt); /* one ref for having elements */
953
954 node = btrfs_root_node(root);
955 start = node->start;
7414a03f
AJ
956 generation = btrfs_header_generation(node);
957 free_extent_buffer(node);
958
1e7970c0 959 ret = reada_add_block(rc, start, &max_key, generation);
ddd664f4 960 if (ret) {
ff023aac 961 kfree(rc);
ddd664f4 962 return ERR_PTR(ret);
ff023aac 963 }
7414a03f
AJ
964
965 reada_start_machine(root->fs_info);
966
967 return rc;
968}
969
970#ifdef DEBUG
971int btrfs_reada_wait(void *handle)
972{
973 struct reada_control *rc = handle;
c28f158e 974 struct btrfs_fs_info *fs_info = rc->fs_info;
7414a03f
AJ
975
976 while (atomic_read(&rc->elems)) {
4fe7a0e1
ZL
977 if (!atomic_read(&fs_info->reada_works_cnt))
978 reada_start_machine(fs_info);
7414a03f
AJ
979 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
980 5 * HZ);
0b246afa 981 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
7414a03f
AJ
982 }
983
0b246afa 984 dump_devs(fs_info, atomic_read(&rc->elems) < 10 ? 1 : 0);
7414a03f
AJ
985
986 kref_put(&rc->refcnt, reada_control_release);
987
988 return 0;
989}
990#else
991int btrfs_reada_wait(void *handle)
992{
993 struct reada_control *rc = handle;
c28f158e 994 struct btrfs_fs_info *fs_info = rc->fs_info;
7414a03f
AJ
995
996 while (atomic_read(&rc->elems)) {
4fe7a0e1
ZL
997 if (!atomic_read(&fs_info->reada_works_cnt))
998 reada_start_machine(fs_info);
999 wait_event_timeout(rc->wait, atomic_read(&rc->elems) == 0,
1000 (HZ + 9) / 10);
7414a03f
AJ
1001 }
1002
1003 kref_put(&rc->refcnt, reada_control_release);
1004
1005 return 0;
1006}
1007#endif
1008
1009void btrfs_reada_detach(void *handle)
1010{
1011 struct reada_control *rc = handle;
1012
1013 kref_put(&rc->refcnt, reada_control_release);
1014}