]>
Commit | Line | Data |
---|---|---|
457c8996 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1da177e4 LT |
2 | /* |
3 | * mm/readahead.c - address_space-level file readahead. | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds | |
6 | * | |
e1f8e874 | 7 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
8 | * Initial version. |
9 | */ | |
10 | ||
11 | #include <linux/kernel.h> | |
11bd969f | 12 | #include <linux/dax.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
b95f1b31 | 14 | #include <linux/export.h> |
1da177e4 LT |
15 | #include <linux/blkdev.h> |
16 | #include <linux/backing-dev.h> | |
8bde37f0 | 17 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 18 | #include <linux/pagevec.h> |
f5ff8422 | 19 | #include <linux/pagemap.h> |
782182e5 CW |
20 | #include <linux/syscalls.h> |
21 | #include <linux/file.h> | |
d72ee911 | 22 | #include <linux/mm_inline.h> |
ca47e8c7 | 23 | #include <linux/blk-cgroup.h> |
3d8f7615 | 24 | #include <linux/fadvise.h> |
f2c817be | 25 | #include <linux/sched/mm.h> |
1da177e4 | 26 | |
29f175d1 FF |
27 | #include "internal.h" |
28 | ||
1da177e4 LT |
29 | /* |
30 | * Initialise a struct file's readahead state. Assumes that the caller has | |
31 | * memset *ra to zero. | |
32 | */ | |
33 | void | |
34 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
35 | { | |
de1414a6 | 36 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
f4e6b498 | 37 | ra->prev_pos = -1; |
1da177e4 | 38 | } |
d41cc702 | 39 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 40 | |
03fb3d2a DH |
41 | /* |
42 | * see if a page needs releasing upon read_cache_pages() failure | |
266cf658 DH |
43 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
44 | * before calling, such as the NFS fs marking pages that are cached locally | |
45 | * on disk, thus we need to give the fs a chance to clean up in the event of | |
46 | * an error | |
03fb3d2a DH |
47 | */ |
48 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | |
49 | struct page *page) | |
50 | { | |
266cf658 | 51 | if (page_has_private(page)) { |
03fb3d2a DH |
52 | if (!trylock_page(page)) |
53 | BUG(); | |
54 | page->mapping = mapping; | |
09cbfeaf | 55 | do_invalidatepage(page, 0, PAGE_SIZE); |
03fb3d2a DH |
56 | page->mapping = NULL; |
57 | unlock_page(page); | |
58 | } | |
09cbfeaf | 59 | put_page(page); |
03fb3d2a DH |
60 | } |
61 | ||
62 | /* | |
63 | * release a list of pages, invalidating them first if need be | |
64 | */ | |
65 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | |
66 | struct list_head *pages) | |
67 | { | |
68 | struct page *victim; | |
69 | ||
70 | while (!list_empty(pages)) { | |
c8ad6302 | 71 | victim = lru_to_page(pages); |
03fb3d2a DH |
72 | list_del(&victim->lru); |
73 | read_cache_pages_invalidate_page(mapping, victim); | |
74 | } | |
75 | } | |
76 | ||
1da177e4 | 77 | /** |
bd40cdda | 78 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
79 | * @mapping: the address_space |
80 | * @pages: The address of a list_head which contains the target pages. These | |
81 | * pages have their ->index populated and are otherwise uninitialised. | |
82 | * @filler: callback routine for filling a single page. | |
83 | * @data: private data for the callback routine. | |
84 | * | |
85 | * Hides the details of the LRU cache etc from the filesystems. | |
a862f68a MR |
86 | * |
87 | * Returns: %0 on success, error return by @filler otherwise | |
1da177e4 LT |
88 | */ |
89 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
90 | int (*filler)(void *, struct page *), void *data) | |
91 | { | |
92 | struct page *page; | |
1da177e4 LT |
93 | int ret = 0; |
94 | ||
1da177e4 | 95 | while (!list_empty(pages)) { |
c8ad6302 | 96 | page = lru_to_page(pages); |
1da177e4 | 97 | list_del(&page->lru); |
063d99b4 | 98 | if (add_to_page_cache_lru(page, mapping, page->index, |
8a5c743e | 99 | readahead_gfp_mask(mapping))) { |
03fb3d2a | 100 | read_cache_pages_invalidate_page(mapping, page); |
1da177e4 LT |
101 | continue; |
102 | } | |
09cbfeaf | 103 | put_page(page); |
eb2be189 | 104 | |
1da177e4 | 105 | ret = filler(data, page); |
eb2be189 | 106 | if (unlikely(ret)) { |
03fb3d2a | 107 | read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4 LT |
108 | break; |
109 | } | |
09cbfeaf | 110 | task_io_account_read(PAGE_SIZE); |
1da177e4 | 111 | } |
1da177e4 LT |
112 | return ret; |
113 | } | |
114 | ||
115 | EXPORT_SYMBOL(read_cache_pages); | |
116 | ||
a4d96536 | 117 | static void read_pages(struct readahead_control *rac, struct list_head *pages, |
c1f6925e | 118 | bool skip_page) |
1da177e4 | 119 | { |
a4d96536 | 120 | const struct address_space_operations *aops = rac->mapping->a_ops; |
c1f6925e | 121 | struct page *page; |
5b417b18 | 122 | struct blk_plug plug; |
1da177e4 | 123 | |
a4d96536 | 124 | if (!readahead_count(rac)) |
c1f6925e | 125 | goto out; |
ad4ae1c7 | 126 | |
5b417b18 JA |
127 | blk_start_plug(&plug); |
128 | ||
8151b4c8 MWO |
129 | if (aops->readahead) { |
130 | aops->readahead(rac); | |
131 | /* Clean up the remaining pages */ | |
132 | while ((page = readahead_page(rac))) { | |
133 | unlock_page(page); | |
134 | put_page(page); | |
135 | } | |
136 | } else if (aops->readpages) { | |
a4d96536 MWO |
137 | aops->readpages(rac->file, rac->mapping, pages, |
138 | readahead_count(rac)); | |
029e332e OH |
139 | /* Clean up the remaining pages */ |
140 | put_pages_list(pages); | |
c1f6925e MWO |
141 | rac->_index += rac->_nr_pages; |
142 | rac->_nr_pages = 0; | |
143 | } else { | |
144 | while ((page = readahead_page(rac))) { | |
a4d96536 | 145 | aops->readpage(rac->file, page); |
c1f6925e MWO |
146 | put_page(page); |
147 | } | |
1da177e4 | 148 | } |
5b417b18 | 149 | |
5b417b18 | 150 | blk_finish_plug(&plug); |
ad4ae1c7 MWO |
151 | |
152 | BUG_ON(!list_empty(pages)); | |
c1f6925e MWO |
153 | BUG_ON(readahead_count(rac)); |
154 | ||
155 | out: | |
156 | if (skip_page) | |
157 | rac->_index++; | |
1da177e4 LT |
158 | } |
159 | ||
2c684234 MWO |
160 | /** |
161 | * page_cache_readahead_unbounded - Start unchecked readahead. | |
162 | * @mapping: File address space. | |
163 | * @file: This instance of the open file; used for authentication. | |
164 | * @index: First page index to read. | |
165 | * @nr_to_read: The number of pages to read. | |
166 | * @lookahead_size: Where to start the next readahead. | |
167 | * | |
168 | * This function is for filesystems to call when they want to start | |
169 | * readahead beyond a file's stated i_size. This is almost certainly | |
170 | * not the function you want to call. Use page_cache_async_readahead() | |
171 | * or page_cache_sync_readahead() instead. | |
172 | * | |
173 | * Context: File is referenced by caller. Mutexes may be held by caller. | |
174 | * May sleep, but will not reenter filesystem to reclaim memory. | |
1da177e4 | 175 | */ |
2c684234 MWO |
176 | void page_cache_readahead_unbounded(struct address_space *mapping, |
177 | struct file *file, pgoff_t index, unsigned long nr_to_read, | |
c534aa3f | 178 | unsigned long lookahead_size) |
1da177e4 | 179 | { |
1da177e4 | 180 | LIST_HEAD(page_pool); |
8a5c743e | 181 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
a4d96536 MWO |
182 | struct readahead_control rac = { |
183 | .mapping = mapping, | |
2c684234 | 184 | .file = file, |
c1f6925e | 185 | ._index = index, |
a4d96536 | 186 | }; |
c2c7ad74 | 187 | unsigned long i; |
1da177e4 | 188 | |
f2c817be MWO |
189 | /* |
190 | * Partway through the readahead operation, we will have added | |
191 | * locked pages to the page cache, but will not yet have submitted | |
192 | * them for I/O. Adding another page may need to allocate memory, | |
193 | * which can trigger memory reclaim. Telling the VM we're in | |
194 | * the middle of a filesystem operation will cause it to not | |
195 | * touch file-backed pages, preventing a deadlock. Most (all?) | |
196 | * filesystems already specify __GFP_NOFS in their mapping's | |
197 | * gfp_mask, but let's be explicit here. | |
198 | */ | |
199 | unsigned int nofs = memalloc_nofs_save(); | |
200 | ||
1da177e4 LT |
201 | /* |
202 | * Preallocate as many pages as we will need. | |
203 | */ | |
c2c7ad74 | 204 | for (i = 0; i < nr_to_read; i++) { |
b0f31d78 | 205 | struct page *page = xa_load(&mapping->i_pages, index + i); |
1da177e4 | 206 | |
c1f6925e MWO |
207 | BUG_ON(index + i != rac._index + rac._nr_pages); |
208 | ||
3159f943 | 209 | if (page && !xa_is_value(page)) { |
b3751e6a | 210 | /* |
2d8163e4 MWO |
211 | * Page already present? Kick off the current batch |
212 | * of contiguous pages before continuing with the | |
213 | * next batch. This page may be the one we would | |
214 | * have intended to mark as Readahead, but we don't | |
215 | * have a stable reference to this page, and it's | |
216 | * not worth getting one just for that. | |
b3751e6a | 217 | */ |
c1f6925e | 218 | read_pages(&rac, &page_pool, true); |
1da177e4 | 219 | continue; |
b3751e6a | 220 | } |
1da177e4 | 221 | |
8a5c743e | 222 | page = __page_cache_alloc(gfp_mask); |
1da177e4 LT |
223 | if (!page) |
224 | break; | |
c1f6925e MWO |
225 | if (mapping->a_ops->readpages) { |
226 | page->index = index + i; | |
227 | list_add(&page->lru, &page_pool); | |
228 | } else if (add_to_page_cache_lru(page, mapping, index + i, | |
229 | gfp_mask) < 0) { | |
230 | put_page(page); | |
231 | read_pages(&rac, &page_pool, true); | |
232 | continue; | |
233 | } | |
c2c7ad74 | 234 | if (i == nr_to_read - lookahead_size) |
46fc3e7b | 235 | SetPageReadahead(page); |
a4d96536 | 236 | rac._nr_pages++; |
1da177e4 | 237 | } |
1da177e4 LT |
238 | |
239 | /* | |
240 | * Now start the IO. We ignore I/O errors - if the page is not | |
241 | * uptodate then the caller will launch readpage again, and | |
242 | * will then handle the error. | |
243 | */ | |
c1f6925e | 244 | read_pages(&rac, &page_pool, false); |
f2c817be | 245 | memalloc_nofs_restore(nofs); |
1da177e4 | 246 | } |
2c684234 MWO |
247 | EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded); |
248 | ||
249 | /* | |
250 | * __do_page_cache_readahead() actually reads a chunk of disk. It allocates | |
251 | * the pages first, then submits them for I/O. This avoids the very bad | |
252 | * behaviour which would occur if page allocations are causing VM writeback. | |
253 | * We really don't want to intermingle reads and writes like that. | |
254 | */ | |
255 | void __do_page_cache_readahead(struct address_space *mapping, | |
256 | struct file *file, pgoff_t index, unsigned long nr_to_read, | |
257 | unsigned long lookahead_size) | |
258 | { | |
259 | struct inode *inode = mapping->host; | |
260 | loff_t isize = i_size_read(inode); | |
261 | pgoff_t end_index; /* The last page we want to read */ | |
262 | ||
263 | if (isize == 0) | |
264 | return; | |
265 | ||
266 | end_index = (isize - 1) >> PAGE_SHIFT; | |
267 | if (index > end_index) | |
268 | return; | |
269 | /* Don't read past the page containing the last byte of the file */ | |
270 | if (nr_to_read > end_index - index) | |
271 | nr_to_read = end_index - index + 1; | |
272 | ||
273 | page_cache_readahead_unbounded(mapping, file, index, nr_to_read, | |
274 | lookahead_size); | |
275 | } | |
1da177e4 LT |
276 | |
277 | /* | |
278 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
279 | * memory at once. | |
280 | */ | |
9a42823a | 281 | void force_page_cache_readahead(struct address_space *mapping, |
08eb9658 | 282 | struct file *filp, pgoff_t index, unsigned long nr_to_read) |
1da177e4 | 283 | { |
9491ae4a JA |
284 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
285 | struct file_ra_state *ra = &filp->f_ra; | |
286 | unsigned long max_pages; | |
287 | ||
8151b4c8 MWO |
288 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages && |
289 | !mapping->a_ops->readahead)) | |
9a42823a | 290 | return; |
1da177e4 | 291 | |
9491ae4a JA |
292 | /* |
293 | * If the request exceeds the readahead window, allow the read to | |
294 | * be up to the optimal hardware IO size | |
295 | */ | |
296 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); | |
297 | nr_to_read = min(nr_to_read, max_pages); | |
1da177e4 | 298 | while (nr_to_read) { |
09cbfeaf | 299 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
1da177e4 LT |
300 | |
301 | if (this_chunk > nr_to_read) | |
302 | this_chunk = nr_to_read; | |
08eb9658 | 303 | __do_page_cache_readahead(mapping, filp, index, this_chunk, 0); |
58d5640e | 304 | |
08eb9658 | 305 | index += this_chunk; |
1da177e4 LT |
306 | nr_to_read -= this_chunk; |
307 | } | |
1da177e4 LT |
308 | } |
309 | ||
c743d96b FW |
310 | /* |
311 | * Set the initial window size, round to next power of 2 and square | |
312 | * for small size, x 4 for medium, and x 2 for large | |
313 | * for 128k (32 page) max ra | |
314 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
315 | */ | |
316 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
317 | { | |
318 | unsigned long newsize = roundup_pow_of_two(size); | |
319 | ||
320 | if (newsize <= max / 32) | |
321 | newsize = newsize * 4; | |
322 | else if (newsize <= max / 4) | |
323 | newsize = newsize * 2; | |
324 | else | |
325 | newsize = max; | |
326 | ||
327 | return newsize; | |
328 | } | |
329 | ||
122a21d1 FW |
330 | /* |
331 | * Get the previous window size, ramp it up, and | |
332 | * return it as the new window size. | |
333 | */ | |
c743d96b | 334 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
20ff1c95 | 335 | unsigned long max) |
122a21d1 | 336 | { |
f9acc8c7 | 337 | unsigned long cur = ra->size; |
122a21d1 FW |
338 | |
339 | if (cur < max / 16) | |
20ff1c95 GX |
340 | return 4 * cur; |
341 | if (cur <= max / 2) | |
342 | return 2 * cur; | |
343 | return max; | |
122a21d1 FW |
344 | } |
345 | ||
346 | /* | |
347 | * On-demand readahead design. | |
348 | * | |
349 | * The fields in struct file_ra_state represent the most-recently-executed | |
350 | * readahead attempt: | |
351 | * | |
f9acc8c7 FW |
352 | * |<----- async_size ---------| |
353 | * |------------------- size -------------------->| | |
354 | * |==================#===========================| | |
355 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
356 | * |
357 | * To overlap application thinking time and disk I/O time, we do | |
358 | * `readahead pipelining': Do not wait until the application consumed all | |
359 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
360 | * Instead, submit an asynchronous readahead I/O as soon as there are |
361 | * only async_size pages left in the readahead window. Normally async_size | |
362 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
363 | * |
364 | * In interleaved sequential reads, concurrent streams on the same fd can | |
365 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 366 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
367 | * indicator. The flag won't be set on already cached pages, to avoid the |
368 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
369 | * | |
f4e6b498 | 370 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
371 | * It should be maintained by the caller, and will be used for detecting |
372 | * small random reads. Note that the readahead algorithm checks loosely | |
373 | * for sequential patterns. Hence interleaved reads might be served as | |
374 | * sequential ones. | |
375 | * | |
376 | * There is a special-case: if the first page which the application tries to | |
377 | * read happens to be the first page of the file, it is assumed that a linear | |
378 | * read is about to happen and the window is immediately set to the initial size | |
379 | * based on I/O request size and the max_readahead. | |
380 | * | |
381 | * The code ramps up the readahead size aggressively at first, but slow down as | |
382 | * it approaches max_readhead. | |
383 | */ | |
384 | ||
10be0b37 | 385 | /* |
08eb9658 | 386 | * Count contiguously cached pages from @index-1 to @index-@max, |
10be0b37 WF |
387 | * this count is a conservative estimation of |
388 | * - length of the sequential read sequence, or | |
389 | * - thrashing threshold in memory tight systems | |
390 | */ | |
391 | static pgoff_t count_history_pages(struct address_space *mapping, | |
08eb9658 | 392 | pgoff_t index, unsigned long max) |
10be0b37 WF |
393 | { |
394 | pgoff_t head; | |
395 | ||
396 | rcu_read_lock(); | |
08eb9658 | 397 | head = page_cache_prev_miss(mapping, index - 1, max); |
10be0b37 WF |
398 | rcu_read_unlock(); |
399 | ||
08eb9658 | 400 | return index - 1 - head; |
10be0b37 WF |
401 | } |
402 | ||
403 | /* | |
404 | * page cache context based read-ahead | |
405 | */ | |
406 | static int try_context_readahead(struct address_space *mapping, | |
407 | struct file_ra_state *ra, | |
08eb9658 | 408 | pgoff_t index, |
10be0b37 WF |
409 | unsigned long req_size, |
410 | unsigned long max) | |
411 | { | |
412 | pgoff_t size; | |
413 | ||
08eb9658 | 414 | size = count_history_pages(mapping, index, max); |
10be0b37 WF |
415 | |
416 | /* | |
2cad4018 | 417 | * not enough history pages: |
10be0b37 WF |
418 | * it could be a random read |
419 | */ | |
2cad4018 | 420 | if (size <= req_size) |
10be0b37 WF |
421 | return 0; |
422 | ||
423 | /* | |
424 | * starts from beginning of file: | |
425 | * it is a strong indication of long-run stream (or whole-file-read) | |
426 | */ | |
08eb9658 | 427 | if (size >= index) |
10be0b37 WF |
428 | size *= 2; |
429 | ||
08eb9658 | 430 | ra->start = index; |
2cad4018 FW |
431 | ra->size = min(size + req_size, max); |
432 | ra->async_size = 1; | |
10be0b37 WF |
433 | |
434 | return 1; | |
435 | } | |
436 | ||
122a21d1 FW |
437 | /* |
438 | * A minimal readahead algorithm for trivial sequential/random reads. | |
439 | */ | |
9a42823a MWO |
440 | static void ondemand_readahead(struct address_space *mapping, |
441 | struct file_ra_state *ra, struct file *filp, | |
08eb9658 | 442 | bool hit_readahead_marker, pgoff_t index, |
9a42823a | 443 | unsigned long req_size) |
122a21d1 | 444 | { |
9491ae4a JA |
445 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
446 | unsigned long max_pages = ra->ra_pages; | |
dc30b96a | 447 | unsigned long add_pages; |
08eb9658 | 448 | pgoff_t prev_index; |
045a2529 | 449 | |
9491ae4a JA |
450 | /* |
451 | * If the request exceeds the readahead window, allow the read to | |
452 | * be up to the optimal hardware IO size | |
453 | */ | |
454 | if (req_size > max_pages && bdi->io_pages > max_pages) | |
455 | max_pages = min(req_size, bdi->io_pages); | |
456 | ||
045a2529 WF |
457 | /* |
458 | * start of file | |
459 | */ | |
08eb9658 | 460 | if (!index) |
045a2529 | 461 | goto initial_readahead; |
122a21d1 FW |
462 | |
463 | /* | |
08eb9658 | 464 | * It's the expected callback index, assume sequential access. |
122a21d1 FW |
465 | * Ramp up sizes, and push forward the readahead window. |
466 | */ | |
08eb9658 MWO |
467 | if ((index == (ra->start + ra->size - ra->async_size) || |
468 | index == (ra->start + ra->size))) { | |
f9acc8c7 | 469 | ra->start += ra->size; |
9491ae4a | 470 | ra->size = get_next_ra_size(ra, max_pages); |
f9acc8c7 FW |
471 | ra->async_size = ra->size; |
472 | goto readit; | |
122a21d1 FW |
473 | } |
474 | ||
6b10c6c9 FW |
475 | /* |
476 | * Hit a marked page without valid readahead state. | |
477 | * E.g. interleaved reads. | |
478 | * Query the pagecache for async_size, which normally equals to | |
479 | * readahead size. Ramp it up and use it as the new readahead size. | |
480 | */ | |
481 | if (hit_readahead_marker) { | |
482 | pgoff_t start; | |
483 | ||
30002ed2 | 484 | rcu_read_lock(); |
08eb9658 | 485 | start = page_cache_next_miss(mapping, index + 1, max_pages); |
30002ed2 | 486 | rcu_read_unlock(); |
6b10c6c9 | 487 | |
08eb9658 | 488 | if (!start || start - index > max_pages) |
9a42823a | 489 | return; |
6b10c6c9 FW |
490 | |
491 | ra->start = start; | |
08eb9658 | 492 | ra->size = start - index; /* old async_size */ |
160334a0 | 493 | ra->size += req_size; |
9491ae4a | 494 | ra->size = get_next_ra_size(ra, max_pages); |
6b10c6c9 FW |
495 | ra->async_size = ra->size; |
496 | goto readit; | |
497 | } | |
498 | ||
122a21d1 | 499 | /* |
045a2529 | 500 | * oversize read |
122a21d1 | 501 | */ |
9491ae4a | 502 | if (req_size > max_pages) |
045a2529 WF |
503 | goto initial_readahead; |
504 | ||
505 | /* | |
506 | * sequential cache miss | |
08eb9658 MWO |
507 | * trivial case: (index - prev_index) == 1 |
508 | * unaligned reads: (index - prev_index) == 0 | |
045a2529 | 509 | */ |
08eb9658 MWO |
510 | prev_index = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
511 | if (index - prev_index <= 1UL) | |
045a2529 WF |
512 | goto initial_readahead; |
513 | ||
10be0b37 WF |
514 | /* |
515 | * Query the page cache and look for the traces(cached history pages) | |
516 | * that a sequential stream would leave behind. | |
517 | */ | |
08eb9658 | 518 | if (try_context_readahead(mapping, ra, index, req_size, max_pages)) |
10be0b37 WF |
519 | goto readit; |
520 | ||
045a2529 WF |
521 | /* |
522 | * standalone, small random read | |
523 | * Read as is, and do not pollute the readahead state. | |
524 | */ | |
08eb9658 | 525 | __do_page_cache_readahead(mapping, filp, index, req_size, 0); |
9a42823a | 526 | return; |
045a2529 WF |
527 | |
528 | initial_readahead: | |
08eb9658 | 529 | ra->start = index; |
9491ae4a | 530 | ra->size = get_init_ra_size(req_size, max_pages); |
f9acc8c7 | 531 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
122a21d1 | 532 | |
f9acc8c7 | 533 | readit: |
51daa88e WF |
534 | /* |
535 | * Will this read hit the readahead marker made by itself? | |
536 | * If so, trigger the readahead marker hit now, and merge | |
537 | * the resulted next readahead window into the current one. | |
dc30b96a | 538 | * Take care of maximum IO pages as above. |
51daa88e | 539 | */ |
08eb9658 | 540 | if (index == ra->start && ra->size == ra->async_size) { |
dc30b96a MS |
541 | add_pages = get_next_ra_size(ra, max_pages); |
542 | if (ra->size + add_pages <= max_pages) { | |
543 | ra->async_size = add_pages; | |
544 | ra->size += add_pages; | |
545 | } else { | |
546 | ra->size = max_pages; | |
547 | ra->async_size = max_pages >> 1; | |
548 | } | |
51daa88e WF |
549 | } |
550 | ||
9a42823a | 551 | ra_submit(ra, mapping, filp); |
122a21d1 FW |
552 | } |
553 | ||
554 | /** | |
cf914a7d | 555 | * page_cache_sync_readahead - generic file readahead |
122a21d1 FW |
556 | * @mapping: address_space which holds the pagecache and I/O vectors |
557 | * @ra: file_ra_state which holds the readahead state | |
558 | * @filp: passed on to ->readpage() and ->readpages() | |
08eb9658 MWO |
559 | * @index: Index of first page to be read. |
560 | * @req_count: Total number of pages being read by the caller. | |
122a21d1 | 561 | * |
cf914a7d RR |
562 | * page_cache_sync_readahead() should be called when a cache miss happened: |
563 | * it will submit the read. The readahead logic may decide to piggyback more | |
564 | * pages onto the read request if access patterns suggest it will improve | |
565 | * performance. | |
122a21d1 | 566 | */ |
cf914a7d RR |
567 | void page_cache_sync_readahead(struct address_space *mapping, |
568 | struct file_ra_state *ra, struct file *filp, | |
08eb9658 | 569 | pgoff_t index, unsigned long req_count) |
122a21d1 FW |
570 | { |
571 | /* no read-ahead */ | |
572 | if (!ra->ra_pages) | |
cf914a7d RR |
573 | return; |
574 | ||
ca47e8c7 JB |
575 | if (blk_cgroup_congested()) |
576 | return; | |
577 | ||
0141450f | 578 | /* be dumb */ |
70655c06 | 579 | if (filp && (filp->f_mode & FMODE_RANDOM)) { |
08eb9658 | 580 | force_page_cache_readahead(mapping, filp, index, req_count); |
0141450f WF |
581 | return; |
582 | } | |
583 | ||
cf914a7d | 584 | /* do read-ahead */ |
08eb9658 | 585 | ondemand_readahead(mapping, ra, filp, false, index, req_count); |
cf914a7d RR |
586 | } |
587 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |
588 | ||
589 | /** | |
590 | * page_cache_async_readahead - file readahead for marked pages | |
591 | * @mapping: address_space which holds the pagecache and I/O vectors | |
592 | * @ra: file_ra_state which holds the readahead state | |
593 | * @filp: passed on to ->readpage() and ->readpages() | |
08eb9658 MWO |
594 | * @page: The page at @index which triggered the readahead call. |
595 | * @index: Index of first page to be read. | |
596 | * @req_count: Total number of pages being read by the caller. | |
cf914a7d | 597 | * |
bf8abe8b | 598 | * page_cache_async_readahead() should be called when a page is used which |
08eb9658 | 599 | * is marked as PageReadahead; this is a marker to suggest that the application |
cf914a7d | 600 | * has used up enough of the readahead window that we should start pulling in |
f7850d93 RD |
601 | * more pages. |
602 | */ | |
cf914a7d RR |
603 | void |
604 | page_cache_async_readahead(struct address_space *mapping, | |
605 | struct file_ra_state *ra, struct file *filp, | |
08eb9658 MWO |
606 | struct page *page, pgoff_t index, |
607 | unsigned long req_count) | |
cf914a7d RR |
608 | { |
609 | /* no read-ahead */ | |
610 | if (!ra->ra_pages) | |
611 | return; | |
612 | ||
613 | /* | |
614 | * Same bit is used for PG_readahead and PG_reclaim. | |
615 | */ | |
616 | if (PageWriteback(page)) | |
617 | return; | |
618 | ||
619 | ClearPageReadahead(page); | |
620 | ||
621 | /* | |
622 | * Defer asynchronous read-ahead on IO congestion. | |
623 | */ | |
703c2708 | 624 | if (inode_read_congested(mapping->host)) |
cf914a7d | 625 | return; |
122a21d1 | 626 | |
ca47e8c7 JB |
627 | if (blk_cgroup_congested()) |
628 | return; | |
629 | ||
122a21d1 | 630 | /* do read-ahead */ |
08eb9658 | 631 | ondemand_readahead(mapping, ra, filp, true, index, req_count); |
122a21d1 | 632 | } |
cf914a7d | 633 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |
782182e5 | 634 | |
c7b95d51 | 635 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count) |
782182e5 CW |
636 | { |
637 | ssize_t ret; | |
2903ff01 | 638 | struct fd f; |
782182e5 CW |
639 | |
640 | ret = -EBADF; | |
2903ff01 | 641 | f = fdget(fd); |
3d8f7615 AG |
642 | if (!f.file || !(f.file->f_mode & FMODE_READ)) |
643 | goto out; | |
644 | ||
645 | /* | |
646 | * The readahead() syscall is intended to run only on files | |
647 | * that can execute readahead. If readahead is not possible | |
648 | * on this file, then we must return -EINVAL. | |
649 | */ | |
650 | ret = -EINVAL; | |
651 | if (!f.file->f_mapping || !f.file->f_mapping->a_ops || | |
652 | !S_ISREG(file_inode(f.file)->i_mode)) | |
653 | goto out; | |
654 | ||
655 | ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED); | |
656 | out: | |
657 | fdput(f); | |
782182e5 CW |
658 | return ret; |
659 | } | |
c7b95d51 DB |
660 | |
661 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) | |
662 | { | |
663 | return ksys_readahead(fd, offset, count); | |
664 | } |