]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/readahead.c
1 // SPDX-License-Identifier: GPL-2.0-only
3 * mm/readahead.c - address_space-level file readahead.
5 * Copyright (C) 2002, Linus Torvalds
7 * 09Apr2002 Andrew Morton
11 #include <linux/kernel.h>
12 #include <linux/dax.h>
13 #include <linux/gfp.h>
14 #include <linux/export.h>
15 #include <linux/blkdev.h>
16 #include <linux/backing-dev.h>
17 #include <linux/task_io_accounting_ops.h>
18 #include <linux/pagevec.h>
19 #include <linux/pagemap.h>
20 #include <linux/syscalls.h>
21 #include <linux/file.h>
22 #include <linux/mm_inline.h>
23 #include <linux/blk-cgroup.h>
24 #include <linux/fadvise.h>
25 #include <linux/sched/mm.h>
30 * Initialise a struct file's readahead state. Assumes that the caller has
34 file_ra_state_init(struct file_ra_state
*ra
, struct address_space
*mapping
)
36 ra
->ra_pages
= inode_to_bdi(mapping
->host
)->ra_pages
;
39 EXPORT_SYMBOL_GPL(file_ra_state_init
);
42 * see if a page needs releasing upon read_cache_pages() failure
43 * - the caller of read_cache_pages() may have set PG_private or PG_fscache
44 * before calling, such as the NFS fs marking pages that are cached locally
45 * on disk, thus we need to give the fs a chance to clean up in the event of
48 static void read_cache_pages_invalidate_page(struct address_space
*mapping
,
51 if (page_has_private(page
)) {
52 if (!trylock_page(page
))
54 page
->mapping
= mapping
;
55 do_invalidatepage(page
, 0, PAGE_SIZE
);
63 * release a list of pages, invalidating them first if need be
65 static void read_cache_pages_invalidate_pages(struct address_space
*mapping
,
66 struct list_head
*pages
)
70 while (!list_empty(pages
)) {
71 victim
= lru_to_page(pages
);
72 list_del(&victim
->lru
);
73 read_cache_pages_invalidate_page(mapping
, victim
);
78 * read_cache_pages - populate an address space with some pages & start reads against them
79 * @mapping: the address_space
80 * @pages: The address of a list_head which contains the target pages. These
81 * pages have their ->index populated and are otherwise uninitialised.
82 * @filler: callback routine for filling a single page.
83 * @data: private data for the callback routine.
85 * Hides the details of the LRU cache etc from the filesystems.
87 * Returns: %0 on success, error return by @filler otherwise
89 int read_cache_pages(struct address_space
*mapping
, struct list_head
*pages
,
90 int (*filler
)(void *, struct page
*), void *data
)
95 while (!list_empty(pages
)) {
96 page
= lru_to_page(pages
);
98 if (add_to_page_cache_lru(page
, mapping
, page
->index
,
99 readahead_gfp_mask(mapping
))) {
100 read_cache_pages_invalidate_page(mapping
, page
);
105 ret
= filler(data
, page
);
107 read_cache_pages_invalidate_pages(mapping
, pages
);
110 task_io_account_read(PAGE_SIZE
);
115 EXPORT_SYMBOL(read_cache_pages
);
117 static void read_pages(struct readahead_control
*rac
, struct list_head
*pages
,
120 const struct address_space_operations
*aops
= rac
->mapping
->a_ops
;
122 struct blk_plug plug
;
124 if (!readahead_count(rac
))
127 blk_start_plug(&plug
);
129 if (aops
->readahead
) {
130 aops
->readahead(rac
);
131 /* Clean up the remaining pages */
132 while ((page
= readahead_page(rac
))) {
136 } else if (aops
->readpages
) {
137 aops
->readpages(rac
->file
, rac
->mapping
, pages
,
138 readahead_count(rac
));
139 /* Clean up the remaining pages */
140 put_pages_list(pages
);
141 rac
->_index
+= rac
->_nr_pages
;
144 while ((page
= readahead_page(rac
))) {
145 aops
->readpage(rac
->file
, page
);
150 blk_finish_plug(&plug
);
152 BUG_ON(!list_empty(pages
));
153 BUG_ON(readahead_count(rac
));
161 * page_cache_ra_unbounded - Start unchecked readahead.
162 * @ractl: Readahead control.
163 * @nr_to_read: The number of pages to read.
164 * @lookahead_size: Where to start the next readahead.
166 * This function is for filesystems to call when they want to start
167 * readahead beyond a file's stated i_size. This is almost certainly
168 * not the function you want to call. Use page_cache_async_readahead()
169 * or page_cache_sync_readahead() instead.
171 * Context: File is referenced by caller. Mutexes may be held by caller.
172 * May sleep, but will not reenter filesystem to reclaim memory.
174 void page_cache_ra_unbounded(struct readahead_control
*ractl
,
175 unsigned long nr_to_read
, unsigned long lookahead_size
)
177 struct address_space
*mapping
= ractl
->mapping
;
178 unsigned long index
= readahead_index(ractl
);
179 LIST_HEAD(page_pool
);
180 gfp_t gfp_mask
= readahead_gfp_mask(mapping
);
184 * Partway through the readahead operation, we will have added
185 * locked pages to the page cache, but will not yet have submitted
186 * them for I/O. Adding another page may need to allocate memory,
187 * which can trigger memory reclaim. Telling the VM we're in
188 * the middle of a filesystem operation will cause it to not
189 * touch file-backed pages, preventing a deadlock. Most (all?)
190 * filesystems already specify __GFP_NOFS in their mapping's
191 * gfp_mask, but let's be explicit here.
193 unsigned int nofs
= memalloc_nofs_save();
195 filemap_invalidate_lock_shared(mapping
);
197 * Preallocate as many pages as we will need.
199 for (i
= 0; i
< nr_to_read
; i
++) {
200 struct page
*page
= xa_load(&mapping
->i_pages
, index
+ i
);
202 if (page
&& !xa_is_value(page
)) {
204 * Page already present? Kick off the current batch
205 * of contiguous pages before continuing with the
206 * next batch. This page may be the one we would
207 * have intended to mark as Readahead, but we don't
208 * have a stable reference to this page, and it's
209 * not worth getting one just for that.
211 read_pages(ractl
, &page_pool
, true);
212 i
= ractl
->_index
+ ractl
->_nr_pages
- index
- 1;
216 page
= __page_cache_alloc(gfp_mask
);
219 if (mapping
->a_ops
->readpages
) {
220 page
->index
= index
+ i
;
221 list_add(&page
->lru
, &page_pool
);
222 } else if (add_to_page_cache_lru(page
, mapping
, index
+ i
,
225 read_pages(ractl
, &page_pool
, true);
226 i
= ractl
->_index
+ ractl
->_nr_pages
- index
- 1;
229 if (i
== nr_to_read
- lookahead_size
)
230 SetPageReadahead(page
);
235 * Now start the IO. We ignore I/O errors - if the page is not
236 * uptodate then the caller will launch readpage again, and
237 * will then handle the error.
239 read_pages(ractl
, &page_pool
, false);
240 filemap_invalidate_unlock_shared(mapping
);
241 memalloc_nofs_restore(nofs
);
243 EXPORT_SYMBOL_GPL(page_cache_ra_unbounded
);
246 * do_page_cache_ra() actually reads a chunk of disk. It allocates
247 * the pages first, then submits them for I/O. This avoids the very bad
248 * behaviour which would occur if page allocations are causing VM writeback.
249 * We really don't want to intermingle reads and writes like that.
251 void do_page_cache_ra(struct readahead_control
*ractl
,
252 unsigned long nr_to_read
, unsigned long lookahead_size
)
254 struct inode
*inode
= ractl
->mapping
->host
;
255 unsigned long index
= readahead_index(ractl
);
256 loff_t isize
= i_size_read(inode
);
257 pgoff_t end_index
; /* The last page we want to read */
262 end_index
= (isize
- 1) >> PAGE_SHIFT
;
263 if (index
> end_index
)
265 /* Don't read past the page containing the last byte of the file */
266 if (nr_to_read
> end_index
- index
)
267 nr_to_read
= end_index
- index
+ 1;
269 page_cache_ra_unbounded(ractl
, nr_to_read
, lookahead_size
);
273 * Chunk the readahead into 2 megabyte units, so that we don't pin too much
276 void force_page_cache_ra(struct readahead_control
*ractl
,
277 unsigned long nr_to_read
)
279 struct address_space
*mapping
= ractl
->mapping
;
280 struct file_ra_state
*ra
= ractl
->ra
;
281 struct backing_dev_info
*bdi
= inode_to_bdi(mapping
->host
);
282 unsigned long max_pages
, index
;
284 if (unlikely(!mapping
->a_ops
->readpage
&& !mapping
->a_ops
->readpages
&&
285 !mapping
->a_ops
->readahead
))
289 * If the request exceeds the readahead window, allow the read to
290 * be up to the optimal hardware IO size
292 index
= readahead_index(ractl
);
293 max_pages
= max_t(unsigned long, bdi
->io_pages
, ra
->ra_pages
);
294 nr_to_read
= min_t(unsigned long, nr_to_read
, max_pages
);
296 unsigned long this_chunk
= (2 * 1024 * 1024) / PAGE_SIZE
;
298 if (this_chunk
> nr_to_read
)
299 this_chunk
= nr_to_read
;
300 ractl
->_index
= index
;
301 do_page_cache_ra(ractl
, this_chunk
, 0);
304 nr_to_read
-= this_chunk
;
309 * Set the initial window size, round to next power of 2 and square
310 * for small size, x 4 for medium, and x 2 for large
311 * for 128k (32 page) max ra
312 * 1-8 page = 32k initial, > 8 page = 128k initial
314 static unsigned long get_init_ra_size(unsigned long size
, unsigned long max
)
316 unsigned long newsize
= roundup_pow_of_two(size
);
318 if (newsize
<= max
/ 32)
319 newsize
= newsize
* 4;
320 else if (newsize
<= max
/ 4)
321 newsize
= newsize
* 2;
329 * Get the previous window size, ramp it up, and
330 * return it as the new window size.
332 static unsigned long get_next_ra_size(struct file_ra_state
*ra
,
335 unsigned long cur
= ra
->size
;
345 * On-demand readahead design.
347 * The fields in struct file_ra_state represent the most-recently-executed
350 * |<----- async_size ---------|
351 * |------------------- size -------------------->|
352 * |==================#===========================|
353 * ^start ^page marked with PG_readahead
355 * To overlap application thinking time and disk I/O time, we do
356 * `readahead pipelining': Do not wait until the application consumed all
357 * readahead pages and stalled on the missing page at readahead_index;
358 * Instead, submit an asynchronous readahead I/O as soon as there are
359 * only async_size pages left in the readahead window. Normally async_size
360 * will be equal to size, for maximum pipelining.
362 * In interleaved sequential reads, concurrent streams on the same fd can
363 * be invalidating each other's readahead state. So we flag the new readahead
364 * page at (start+size-async_size) with PG_readahead, and use it as readahead
365 * indicator. The flag won't be set on already cached pages, to avoid the
366 * readahead-for-nothing fuss, saving pointless page cache lookups.
368 * prev_pos tracks the last visited byte in the _previous_ read request.
369 * It should be maintained by the caller, and will be used for detecting
370 * small random reads. Note that the readahead algorithm checks loosely
371 * for sequential patterns. Hence interleaved reads might be served as
374 * There is a special-case: if the first page which the application tries to
375 * read happens to be the first page of the file, it is assumed that a linear
376 * read is about to happen and the window is immediately set to the initial size
377 * based on I/O request size and the max_readahead.
379 * The code ramps up the readahead size aggressively at first, but slow down as
380 * it approaches max_readhead.
384 * Count contiguously cached pages from @index-1 to @index-@max,
385 * this count is a conservative estimation of
386 * - length of the sequential read sequence, or
387 * - thrashing threshold in memory tight systems
389 static pgoff_t
count_history_pages(struct address_space
*mapping
,
390 pgoff_t index
, unsigned long max
)
395 head
= page_cache_prev_miss(mapping
, index
- 1, max
);
398 return index
- 1 - head
;
402 * page cache context based read-ahead
404 static int try_context_readahead(struct address_space
*mapping
,
405 struct file_ra_state
*ra
,
407 unsigned long req_size
,
412 size
= count_history_pages(mapping
, index
, max
);
415 * not enough history pages:
416 * it could be a random read
418 if (size
<= req_size
)
422 * starts from beginning of file:
423 * it is a strong indication of long-run stream (or whole-file-read)
429 ra
->size
= min(size
+ req_size
, max
);
436 * A minimal readahead algorithm for trivial sequential/random reads.
438 static void ondemand_readahead(struct readahead_control
*ractl
,
439 bool hit_readahead_marker
, unsigned long req_size
)
441 struct backing_dev_info
*bdi
= inode_to_bdi(ractl
->mapping
->host
);
442 struct file_ra_state
*ra
= ractl
->ra
;
443 unsigned long max_pages
= ra
->ra_pages
;
444 unsigned long add_pages
;
445 unsigned long index
= readahead_index(ractl
);
449 * If the request exceeds the readahead window, allow the read to
450 * be up to the optimal hardware IO size
452 if (req_size
> max_pages
&& bdi
->io_pages
> max_pages
)
453 max_pages
= min(req_size
, bdi
->io_pages
);
459 goto initial_readahead
;
462 * It's the expected callback index, assume sequential access.
463 * Ramp up sizes, and push forward the readahead window.
465 if ((index
== (ra
->start
+ ra
->size
- ra
->async_size
) ||
466 index
== (ra
->start
+ ra
->size
))) {
467 ra
->start
+= ra
->size
;
468 ra
->size
= get_next_ra_size(ra
, max_pages
);
469 ra
->async_size
= ra
->size
;
474 * Hit a marked page without valid readahead state.
475 * E.g. interleaved reads.
476 * Query the pagecache for async_size, which normally equals to
477 * readahead size. Ramp it up and use it as the new readahead size.
479 if (hit_readahead_marker
) {
483 start
= page_cache_next_miss(ractl
->mapping
, index
+ 1,
487 if (!start
|| start
- index
> max_pages
)
491 ra
->size
= start
- index
; /* old async_size */
492 ra
->size
+= req_size
;
493 ra
->size
= get_next_ra_size(ra
, max_pages
);
494 ra
->async_size
= ra
->size
;
501 if (req_size
> max_pages
)
502 goto initial_readahead
;
505 * sequential cache miss
506 * trivial case: (index - prev_index) == 1
507 * unaligned reads: (index - prev_index) == 0
509 prev_index
= (unsigned long long)ra
->prev_pos
>> PAGE_SHIFT
;
510 if (index
- prev_index
<= 1UL)
511 goto initial_readahead
;
514 * Query the page cache and look for the traces(cached history pages)
515 * that a sequential stream would leave behind.
517 if (try_context_readahead(ractl
->mapping
, ra
, index
, req_size
,
522 * standalone, small random read
523 * Read as is, and do not pollute the readahead state.
525 do_page_cache_ra(ractl
, req_size
, 0);
530 ra
->size
= get_init_ra_size(req_size
, max_pages
);
531 ra
->async_size
= ra
->size
> req_size
? ra
->size
- req_size
: ra
->size
;
535 * Will this read hit the readahead marker made by itself?
536 * If so, trigger the readahead marker hit now, and merge
537 * the resulted next readahead window into the current one.
538 * Take care of maximum IO pages as above.
540 if (index
== ra
->start
&& ra
->size
== ra
->async_size
) {
541 add_pages
= get_next_ra_size(ra
, max_pages
);
542 if (ra
->size
+ add_pages
<= max_pages
) {
543 ra
->async_size
= add_pages
;
544 ra
->size
+= add_pages
;
546 ra
->size
= max_pages
;
547 ra
->async_size
= max_pages
>> 1;
551 ractl
->_index
= ra
->start
;
552 do_page_cache_ra(ractl
, ra
->size
, ra
->async_size
);
555 void page_cache_sync_ra(struct readahead_control
*ractl
,
556 unsigned long req_count
)
558 bool do_forced_ra
= ractl
->file
&& (ractl
->file
->f_mode
& FMODE_RANDOM
);
561 * Even if read-ahead is disabled, issue this request as read-ahead
562 * as we'll need it to satisfy the requested range. The forced
563 * read-ahead will do the right thing and limit the read to just the
564 * requested range, which we'll set to 1 page for this case.
566 if (!ractl
->ra
->ra_pages
|| blk_cgroup_congested()) {
575 force_page_cache_ra(ractl
, req_count
);
580 ondemand_readahead(ractl
, false, req_count
);
582 EXPORT_SYMBOL_GPL(page_cache_sync_ra
);
584 void page_cache_async_ra(struct readahead_control
*ractl
,
585 struct page
*page
, unsigned long req_count
)
588 if (!ractl
->ra
->ra_pages
)
592 * Same bit is used for PG_readahead and PG_reclaim.
594 if (PageWriteback(page
))
597 ClearPageReadahead(page
);
600 * Defer asynchronous read-ahead on IO congestion.
602 if (inode_read_congested(ractl
->mapping
->host
))
605 if (blk_cgroup_congested())
609 ondemand_readahead(ractl
, true, req_count
);
611 EXPORT_SYMBOL_GPL(page_cache_async_ra
);
613 ssize_t
ksys_readahead(int fd
, loff_t offset
, size_t count
)
620 if (!f
.file
|| !(f
.file
->f_mode
& FMODE_READ
))
624 * The readahead() syscall is intended to run only on files
625 * that can execute readahead. If readahead is not possible
626 * on this file, then we must return -EINVAL.
629 if (!f
.file
->f_mapping
|| !f
.file
->f_mapping
->a_ops
||
630 !S_ISREG(file_inode(f
.file
)->i_mode
))
633 ret
= vfs_fadvise(f
.file
, offset
, count
, POSIX_FADV_WILLNEED
);
639 SYSCALL_DEFINE3(readahead
, int, fd
, loff_t
, offset
, size_t, count
)
641 return ksys_readahead(fd
, offset
, count
);
645 * readahead_expand - Expand a readahead request
646 * @ractl: The request to be expanded
647 * @new_start: The revised start
648 * @new_len: The revised size of the request
650 * Attempt to expand a readahead request outwards from the current size to the
651 * specified size by inserting locked pages before and after the current window
652 * to increase the size to the new window. This may involve the insertion of
653 * THPs, in which case the window may get expanded even beyond what was
656 * The algorithm will stop if it encounters a conflicting page already in the
657 * pagecache and leave a smaller expansion than requested.
659 * The caller must check for this by examining the revised @ractl object for a
660 * different expansion than was requested.
662 void readahead_expand(struct readahead_control
*ractl
,
663 loff_t new_start
, size_t new_len
)
665 struct address_space
*mapping
= ractl
->mapping
;
666 struct file_ra_state
*ra
= ractl
->ra
;
667 pgoff_t new_index
, new_nr_pages
;
668 gfp_t gfp_mask
= readahead_gfp_mask(mapping
);
670 new_index
= new_start
/ PAGE_SIZE
;
672 /* Expand the leading edge downwards */
673 while (ractl
->_index
> new_index
) {
674 unsigned long index
= ractl
->_index
- 1;
675 struct page
*page
= xa_load(&mapping
->i_pages
, index
);
677 if (page
&& !xa_is_value(page
))
678 return; /* Page apparently present */
680 page
= __page_cache_alloc(gfp_mask
);
683 if (add_to_page_cache_lru(page
, mapping
, index
, gfp_mask
) < 0) {
689 ractl
->_index
= page
->index
;
692 new_len
+= new_start
- readahead_pos(ractl
);
693 new_nr_pages
= DIV_ROUND_UP(new_len
, PAGE_SIZE
);
695 /* Expand the trailing edge upwards */
696 while (ractl
->_nr_pages
< new_nr_pages
) {
697 unsigned long index
= ractl
->_index
+ ractl
->_nr_pages
;
698 struct page
*page
= xa_load(&mapping
->i_pages
, index
);
700 if (page
&& !xa_is_value(page
))
701 return; /* Page apparently present */
703 page
= __page_cache_alloc(gfp_mask
);
706 if (add_to_page_cache_lru(page
, mapping
, index
, gfp_mask
) < 0) {
717 EXPORT_SYMBOL(readahead_expand
);