]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
e1f8e874 | 6 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
7 | * Initial version. |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11bd969f | 11 | #include <linux/dax.h> |
5a0e3ad6 | 12 | #include <linux/gfp.h> |
b95f1b31 | 13 | #include <linux/export.h> |
1da177e4 LT |
14 | #include <linux/blkdev.h> |
15 | #include <linux/backing-dev.h> | |
8bde37f0 | 16 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 17 | #include <linux/pagevec.h> |
f5ff8422 | 18 | #include <linux/pagemap.h> |
782182e5 CW |
19 | #include <linux/syscalls.h> |
20 | #include <linux/file.h> | |
d72ee911 | 21 | #include <linux/mm_inline.h> |
1da177e4 | 22 | |
29f175d1 FF |
23 | #include "internal.h" |
24 | ||
1da177e4 LT |
25 | /* |
26 | * Initialise a struct file's readahead state. Assumes that the caller has | |
27 | * memset *ra to zero. | |
28 | */ | |
29 | void | |
30 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
31 | { | |
de1414a6 | 32 | ra->ra_pages = inode_to_bdi(mapping->host)->ra_pages; |
f4e6b498 | 33 | ra->prev_pos = -1; |
1da177e4 | 34 | } |
d41cc702 | 35 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 36 | |
03fb3d2a DH |
37 | /* |
38 | * see if a page needs releasing upon read_cache_pages() failure | |
266cf658 DH |
39 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
40 | * before calling, such as the NFS fs marking pages that are cached locally | |
41 | * on disk, thus we need to give the fs a chance to clean up in the event of | |
42 | * an error | |
03fb3d2a DH |
43 | */ |
44 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | |
45 | struct page *page) | |
46 | { | |
266cf658 | 47 | if (page_has_private(page)) { |
03fb3d2a DH |
48 | if (!trylock_page(page)) |
49 | BUG(); | |
50 | page->mapping = mapping; | |
09cbfeaf | 51 | do_invalidatepage(page, 0, PAGE_SIZE); |
03fb3d2a DH |
52 | page->mapping = NULL; |
53 | unlock_page(page); | |
54 | } | |
09cbfeaf | 55 | put_page(page); |
03fb3d2a DH |
56 | } |
57 | ||
58 | /* | |
59 | * release a list of pages, invalidating them first if need be | |
60 | */ | |
61 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | |
62 | struct list_head *pages) | |
63 | { | |
64 | struct page *victim; | |
65 | ||
66 | while (!list_empty(pages)) { | |
c8ad6302 | 67 | victim = lru_to_page(pages); |
03fb3d2a DH |
68 | list_del(&victim->lru); |
69 | read_cache_pages_invalidate_page(mapping, victim); | |
70 | } | |
71 | } | |
72 | ||
1da177e4 | 73 | /** |
bd40cdda | 74 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
75 | * @mapping: the address_space |
76 | * @pages: The address of a list_head which contains the target pages. These | |
77 | * pages have their ->index populated and are otherwise uninitialised. | |
78 | * @filler: callback routine for filling a single page. | |
79 | * @data: private data for the callback routine. | |
80 | * | |
81 | * Hides the details of the LRU cache etc from the filesystems. | |
82 | */ | |
83 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
84 | int (*filler)(void *, struct page *), void *data) | |
85 | { | |
86 | struct page *page; | |
1da177e4 LT |
87 | int ret = 0; |
88 | ||
1da177e4 | 89 | while (!list_empty(pages)) { |
c8ad6302 | 90 | page = lru_to_page(pages); |
1da177e4 | 91 | list_del(&page->lru); |
063d99b4 | 92 | if (add_to_page_cache_lru(page, mapping, page->index, |
8a5c743e | 93 | readahead_gfp_mask(mapping))) { |
03fb3d2a | 94 | read_cache_pages_invalidate_page(mapping, page); |
1da177e4 LT |
95 | continue; |
96 | } | |
09cbfeaf | 97 | put_page(page); |
eb2be189 | 98 | |
1da177e4 | 99 | ret = filler(data, page); |
eb2be189 | 100 | if (unlikely(ret)) { |
03fb3d2a | 101 | read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4 LT |
102 | break; |
103 | } | |
09cbfeaf | 104 | task_io_account_read(PAGE_SIZE); |
1da177e4 | 105 | } |
1da177e4 LT |
106 | return ret; |
107 | } | |
108 | ||
109 | EXPORT_SYMBOL(read_cache_pages); | |
110 | ||
111 | static int read_pages(struct address_space *mapping, struct file *filp, | |
8a5c743e | 112 | struct list_head *pages, unsigned int nr_pages, gfp_t gfp) |
1da177e4 | 113 | { |
5b417b18 | 114 | struct blk_plug plug; |
1da177e4 | 115 | unsigned page_idx; |
994fc28c | 116 | int ret; |
1da177e4 | 117 | |
5b417b18 JA |
118 | blk_start_plug(&plug); |
119 | ||
1da177e4 LT |
120 | if (mapping->a_ops->readpages) { |
121 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | |
029e332e OH |
122 | /* Clean up the remaining pages */ |
123 | put_pages_list(pages); | |
1da177e4 LT |
124 | goto out; |
125 | } | |
126 | ||
1da177e4 | 127 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
c8ad6302 | 128 | struct page *page = lru_to_page(pages); |
1da177e4 | 129 | list_del(&page->lru); |
8a5c743e | 130 | if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) |
9f1a3cfc | 131 | mapping->a_ops->readpage(filp, page); |
09cbfeaf | 132 | put_page(page); |
1da177e4 | 133 | } |
994fc28c | 134 | ret = 0; |
5b417b18 | 135 | |
1da177e4 | 136 | out: |
5b417b18 JA |
137 | blk_finish_plug(&plug); |
138 | ||
1da177e4 LT |
139 | return ret; |
140 | } | |
141 | ||
1da177e4 | 142 | /* |
b3751e6a CH |
143 | * __do_page_cache_readahead() actually reads a chunk of disk. It allocates |
144 | * the pages first, then submits them for I/O. This avoids the very bad | |
1da177e4 LT |
145 | * behaviour which would occur if page allocations are causing VM writeback. |
146 | * We really don't want to intermingle reads and writes like that. | |
147 | * | |
148 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | |
1da177e4 | 149 | */ |
c534aa3f CH |
150 | unsigned int __do_page_cache_readahead(struct address_space *mapping, |
151 | struct file *filp, pgoff_t offset, unsigned long nr_to_read, | |
152 | unsigned long lookahead_size) | |
1da177e4 LT |
153 | { |
154 | struct inode *inode = mapping->host; | |
155 | struct page *page; | |
156 | unsigned long end_index; /* The last page we want to read */ | |
157 | LIST_HEAD(page_pool); | |
158 | int page_idx; | |
c534aa3f | 159 | unsigned int nr_pages = 0; |
1da177e4 | 160 | loff_t isize = i_size_read(inode); |
8a5c743e | 161 | gfp_t gfp_mask = readahead_gfp_mask(mapping); |
1da177e4 LT |
162 | |
163 | if (isize == 0) | |
164 | goto out; | |
165 | ||
09cbfeaf | 166 | end_index = ((isize - 1) >> PAGE_SHIFT); |
1da177e4 LT |
167 | |
168 | /* | |
169 | * Preallocate as many pages as we will need. | |
170 | */ | |
1da177e4 | 171 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { |
7361f4d8 | 172 | pgoff_t page_offset = offset + page_idx; |
c743d96b | 173 | |
1da177e4 LT |
174 | if (page_offset > end_index) |
175 | break; | |
176 | ||
00128188 | 177 | rcu_read_lock(); |
b93b0163 | 178 | page = radix_tree_lookup(&mapping->i_pages, page_offset); |
00128188 | 179 | rcu_read_unlock(); |
b3751e6a CH |
180 | if (page && !radix_tree_exceptional_entry(page)) { |
181 | /* | |
182 | * Page already present? Kick off the current batch of | |
183 | * contiguous pages before continuing with the next | |
184 | * batch. | |
185 | */ | |
186 | if (nr_pages) | |
187 | read_pages(mapping, filp, &page_pool, nr_pages, | |
188 | gfp_mask); | |
189 | nr_pages = 0; | |
1da177e4 | 190 | continue; |
b3751e6a | 191 | } |
1da177e4 | 192 | |
8a5c743e | 193 | page = __page_cache_alloc(gfp_mask); |
1da177e4 LT |
194 | if (!page) |
195 | break; | |
196 | page->index = page_offset; | |
197 | list_add(&page->lru, &page_pool); | |
46fc3e7b FW |
198 | if (page_idx == nr_to_read - lookahead_size) |
199 | SetPageReadahead(page); | |
836978b3 | 200 | nr_pages++; |
1da177e4 | 201 | } |
1da177e4 LT |
202 | |
203 | /* | |
204 | * Now start the IO. We ignore I/O errors - if the page is not | |
205 | * uptodate then the caller will launch readpage again, and | |
206 | * will then handle the error. | |
207 | */ | |
836978b3 CH |
208 | if (nr_pages) |
209 | read_pages(mapping, filp, &page_pool, nr_pages, gfp_mask); | |
1da177e4 LT |
210 | BUG_ON(!list_empty(&page_pool)); |
211 | out: | |
836978b3 | 212 | return nr_pages; |
1da177e4 LT |
213 | } |
214 | ||
215 | /* | |
216 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
217 | * memory at once. | |
218 | */ | |
219 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
9491ae4a | 220 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 | 221 | { |
9491ae4a JA |
222 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
223 | struct file_ra_state *ra = &filp->f_ra; | |
224 | unsigned long max_pages; | |
225 | ||
1da177e4 LT |
226 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) |
227 | return -EINVAL; | |
228 | ||
9491ae4a JA |
229 | /* |
230 | * If the request exceeds the readahead window, allow the read to | |
231 | * be up to the optimal hardware IO size | |
232 | */ | |
233 | max_pages = max_t(unsigned long, bdi->io_pages, ra->ra_pages); | |
234 | nr_to_read = min(nr_to_read, max_pages); | |
1da177e4 | 235 | while (nr_to_read) { |
09cbfeaf | 236 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_SIZE; |
1da177e4 LT |
237 | |
238 | if (this_chunk > nr_to_read) | |
239 | this_chunk = nr_to_read; | |
c534aa3f | 240 | __do_page_cache_readahead(mapping, filp, offset, this_chunk, 0); |
58d5640e | 241 | |
1da177e4 LT |
242 | offset += this_chunk; |
243 | nr_to_read -= this_chunk; | |
244 | } | |
58d5640e | 245 | return 0; |
1da177e4 LT |
246 | } |
247 | ||
c743d96b FW |
248 | /* |
249 | * Set the initial window size, round to next power of 2 and square | |
250 | * for small size, x 4 for medium, and x 2 for large | |
251 | * for 128k (32 page) max ra | |
252 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
253 | */ | |
254 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
255 | { | |
256 | unsigned long newsize = roundup_pow_of_two(size); | |
257 | ||
258 | if (newsize <= max / 32) | |
259 | newsize = newsize * 4; | |
260 | else if (newsize <= max / 4) | |
261 | newsize = newsize * 2; | |
262 | else | |
263 | newsize = max; | |
264 | ||
265 | return newsize; | |
266 | } | |
267 | ||
122a21d1 FW |
268 | /* |
269 | * Get the previous window size, ramp it up, and | |
270 | * return it as the new window size. | |
271 | */ | |
c743d96b | 272 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
122a21d1 FW |
273 | unsigned long max) |
274 | { | |
f9acc8c7 | 275 | unsigned long cur = ra->size; |
122a21d1 FW |
276 | unsigned long newsize; |
277 | ||
278 | if (cur < max / 16) | |
c743d96b | 279 | newsize = 4 * cur; |
122a21d1 | 280 | else |
c743d96b | 281 | newsize = 2 * cur; |
122a21d1 FW |
282 | |
283 | return min(newsize, max); | |
284 | } | |
285 | ||
286 | /* | |
287 | * On-demand readahead design. | |
288 | * | |
289 | * The fields in struct file_ra_state represent the most-recently-executed | |
290 | * readahead attempt: | |
291 | * | |
f9acc8c7 FW |
292 | * |<----- async_size ---------| |
293 | * |------------------- size -------------------->| | |
294 | * |==================#===========================| | |
295 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
296 | * |
297 | * To overlap application thinking time and disk I/O time, we do | |
298 | * `readahead pipelining': Do not wait until the application consumed all | |
299 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
300 | * Instead, submit an asynchronous readahead I/O as soon as there are |
301 | * only async_size pages left in the readahead window. Normally async_size | |
302 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
303 | * |
304 | * In interleaved sequential reads, concurrent streams on the same fd can | |
305 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 306 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
307 | * indicator. The flag won't be set on already cached pages, to avoid the |
308 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
309 | * | |
f4e6b498 | 310 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
311 | * It should be maintained by the caller, and will be used for detecting |
312 | * small random reads. Note that the readahead algorithm checks loosely | |
313 | * for sequential patterns. Hence interleaved reads might be served as | |
314 | * sequential ones. | |
315 | * | |
316 | * There is a special-case: if the first page which the application tries to | |
317 | * read happens to be the first page of the file, it is assumed that a linear | |
318 | * read is about to happen and the window is immediately set to the initial size | |
319 | * based on I/O request size and the max_readahead. | |
320 | * | |
321 | * The code ramps up the readahead size aggressively at first, but slow down as | |
322 | * it approaches max_readhead. | |
323 | */ | |
324 | ||
10be0b37 WF |
325 | /* |
326 | * Count contiguously cached pages from @offset-1 to @offset-@max, | |
327 | * this count is a conservative estimation of | |
328 | * - length of the sequential read sequence, or | |
329 | * - thrashing threshold in memory tight systems | |
330 | */ | |
331 | static pgoff_t count_history_pages(struct address_space *mapping, | |
10be0b37 WF |
332 | pgoff_t offset, unsigned long max) |
333 | { | |
334 | pgoff_t head; | |
335 | ||
336 | rcu_read_lock(); | |
e7b563bb | 337 | head = page_cache_prev_hole(mapping, offset - 1, max); |
10be0b37 WF |
338 | rcu_read_unlock(); |
339 | ||
340 | return offset - 1 - head; | |
341 | } | |
342 | ||
343 | /* | |
344 | * page cache context based read-ahead | |
345 | */ | |
346 | static int try_context_readahead(struct address_space *mapping, | |
347 | struct file_ra_state *ra, | |
348 | pgoff_t offset, | |
349 | unsigned long req_size, | |
350 | unsigned long max) | |
351 | { | |
352 | pgoff_t size; | |
353 | ||
3e2faa08 | 354 | size = count_history_pages(mapping, offset, max); |
10be0b37 WF |
355 | |
356 | /* | |
2cad4018 | 357 | * not enough history pages: |
10be0b37 WF |
358 | * it could be a random read |
359 | */ | |
2cad4018 | 360 | if (size <= req_size) |
10be0b37 WF |
361 | return 0; |
362 | ||
363 | /* | |
364 | * starts from beginning of file: | |
365 | * it is a strong indication of long-run stream (or whole-file-read) | |
366 | */ | |
367 | if (size >= offset) | |
368 | size *= 2; | |
369 | ||
370 | ra->start = offset; | |
2cad4018 FW |
371 | ra->size = min(size + req_size, max); |
372 | ra->async_size = 1; | |
10be0b37 WF |
373 | |
374 | return 1; | |
375 | } | |
376 | ||
122a21d1 FW |
377 | /* |
378 | * A minimal readahead algorithm for trivial sequential/random reads. | |
379 | */ | |
380 | static unsigned long | |
381 | ondemand_readahead(struct address_space *mapping, | |
382 | struct file_ra_state *ra, struct file *filp, | |
cf914a7d | 383 | bool hit_readahead_marker, pgoff_t offset, |
122a21d1 FW |
384 | unsigned long req_size) |
385 | { | |
9491ae4a JA |
386 | struct backing_dev_info *bdi = inode_to_bdi(mapping->host); |
387 | unsigned long max_pages = ra->ra_pages; | |
af248a0c | 388 | pgoff_t prev_offset; |
045a2529 | 389 | |
9491ae4a JA |
390 | /* |
391 | * If the request exceeds the readahead window, allow the read to | |
392 | * be up to the optimal hardware IO size | |
393 | */ | |
394 | if (req_size > max_pages && bdi->io_pages > max_pages) | |
395 | max_pages = min(req_size, bdi->io_pages); | |
396 | ||
045a2529 WF |
397 | /* |
398 | * start of file | |
399 | */ | |
400 | if (!offset) | |
401 | goto initial_readahead; | |
122a21d1 FW |
402 | |
403 | /* | |
f9acc8c7 | 404 | * It's the expected callback offset, assume sequential access. |
122a21d1 FW |
405 | * Ramp up sizes, and push forward the readahead window. |
406 | */ | |
045a2529 WF |
407 | if ((offset == (ra->start + ra->size - ra->async_size) || |
408 | offset == (ra->start + ra->size))) { | |
f9acc8c7 | 409 | ra->start += ra->size; |
9491ae4a | 410 | ra->size = get_next_ra_size(ra, max_pages); |
f9acc8c7 FW |
411 | ra->async_size = ra->size; |
412 | goto readit; | |
122a21d1 FW |
413 | } |
414 | ||
6b10c6c9 FW |
415 | /* |
416 | * Hit a marked page without valid readahead state. | |
417 | * E.g. interleaved reads. | |
418 | * Query the pagecache for async_size, which normally equals to | |
419 | * readahead size. Ramp it up and use it as the new readahead size. | |
420 | */ | |
421 | if (hit_readahead_marker) { | |
422 | pgoff_t start; | |
423 | ||
30002ed2 | 424 | rcu_read_lock(); |
9491ae4a | 425 | start = page_cache_next_hole(mapping, offset + 1, max_pages); |
30002ed2 | 426 | rcu_read_unlock(); |
6b10c6c9 | 427 | |
9491ae4a | 428 | if (!start || start - offset > max_pages) |
6b10c6c9 FW |
429 | return 0; |
430 | ||
431 | ra->start = start; | |
432 | ra->size = start - offset; /* old async_size */ | |
160334a0 | 433 | ra->size += req_size; |
9491ae4a | 434 | ra->size = get_next_ra_size(ra, max_pages); |
6b10c6c9 FW |
435 | ra->async_size = ra->size; |
436 | goto readit; | |
437 | } | |
438 | ||
122a21d1 | 439 | /* |
045a2529 | 440 | * oversize read |
122a21d1 | 441 | */ |
9491ae4a | 442 | if (req_size > max_pages) |
045a2529 WF |
443 | goto initial_readahead; |
444 | ||
445 | /* | |
446 | * sequential cache miss | |
af248a0c DR |
447 | * trivial case: (offset - prev_offset) == 1 |
448 | * unaligned reads: (offset - prev_offset) == 0 | |
045a2529 | 449 | */ |
09cbfeaf | 450 | prev_offset = (unsigned long long)ra->prev_pos >> PAGE_SHIFT; |
af248a0c | 451 | if (offset - prev_offset <= 1UL) |
045a2529 WF |
452 | goto initial_readahead; |
453 | ||
10be0b37 WF |
454 | /* |
455 | * Query the page cache and look for the traces(cached history pages) | |
456 | * that a sequential stream would leave behind. | |
457 | */ | |
9491ae4a | 458 | if (try_context_readahead(mapping, ra, offset, req_size, max_pages)) |
10be0b37 WF |
459 | goto readit; |
460 | ||
045a2529 WF |
461 | /* |
462 | * standalone, small random read | |
463 | * Read as is, and do not pollute the readahead state. | |
464 | */ | |
465 | return __do_page_cache_readahead(mapping, filp, offset, req_size, 0); | |
466 | ||
467 | initial_readahead: | |
f9acc8c7 | 468 | ra->start = offset; |
9491ae4a | 469 | ra->size = get_init_ra_size(req_size, max_pages); |
f9acc8c7 | 470 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; |
122a21d1 | 471 | |
f9acc8c7 | 472 | readit: |
51daa88e WF |
473 | /* |
474 | * Will this read hit the readahead marker made by itself? | |
475 | * If so, trigger the readahead marker hit now, and merge | |
476 | * the resulted next readahead window into the current one. | |
477 | */ | |
478 | if (offset == ra->start && ra->size == ra->async_size) { | |
9491ae4a | 479 | ra->async_size = get_next_ra_size(ra, max_pages); |
51daa88e WF |
480 | ra->size += ra->async_size; |
481 | } | |
482 | ||
122a21d1 FW |
483 | return ra_submit(ra, mapping, filp); |
484 | } | |
485 | ||
486 | /** | |
cf914a7d | 487 | * page_cache_sync_readahead - generic file readahead |
122a21d1 FW |
488 | * @mapping: address_space which holds the pagecache and I/O vectors |
489 | * @ra: file_ra_state which holds the readahead state | |
490 | * @filp: passed on to ->readpage() and ->readpages() | |
cf914a7d | 491 | * @offset: start offset into @mapping, in pagecache page-sized units |
122a21d1 | 492 | * @req_size: hint: total size of the read which the caller is performing in |
cf914a7d | 493 | * pagecache pages |
122a21d1 | 494 | * |
cf914a7d RR |
495 | * page_cache_sync_readahead() should be called when a cache miss happened: |
496 | * it will submit the read. The readahead logic may decide to piggyback more | |
497 | * pages onto the read request if access patterns suggest it will improve | |
498 | * performance. | |
122a21d1 | 499 | */ |
cf914a7d RR |
500 | void page_cache_sync_readahead(struct address_space *mapping, |
501 | struct file_ra_state *ra, struct file *filp, | |
502 | pgoff_t offset, unsigned long req_size) | |
122a21d1 FW |
503 | { |
504 | /* no read-ahead */ | |
505 | if (!ra->ra_pages) | |
cf914a7d RR |
506 | return; |
507 | ||
0141450f | 508 | /* be dumb */ |
70655c06 | 509 | if (filp && (filp->f_mode & FMODE_RANDOM)) { |
0141450f WF |
510 | force_page_cache_readahead(mapping, filp, offset, req_size); |
511 | return; | |
512 | } | |
513 | ||
cf914a7d RR |
514 | /* do read-ahead */ |
515 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); | |
516 | } | |
517 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |
518 | ||
519 | /** | |
520 | * page_cache_async_readahead - file readahead for marked pages | |
521 | * @mapping: address_space which holds the pagecache and I/O vectors | |
522 | * @ra: file_ra_state which holds the readahead state | |
523 | * @filp: passed on to ->readpage() and ->readpages() | |
524 | * @page: the page at @offset which has the PG_readahead flag set | |
525 | * @offset: start offset into @mapping, in pagecache page-sized units | |
526 | * @req_size: hint: total size of the read which the caller is performing in | |
527 | * pagecache pages | |
528 | * | |
bf8abe8b | 529 | * page_cache_async_readahead() should be called when a page is used which |
f7850d93 | 530 | * has the PG_readahead flag; this is a marker to suggest that the application |
cf914a7d | 531 | * has used up enough of the readahead window that we should start pulling in |
f7850d93 RD |
532 | * more pages. |
533 | */ | |
cf914a7d RR |
534 | void |
535 | page_cache_async_readahead(struct address_space *mapping, | |
536 | struct file_ra_state *ra, struct file *filp, | |
537 | struct page *page, pgoff_t offset, | |
538 | unsigned long req_size) | |
539 | { | |
540 | /* no read-ahead */ | |
541 | if (!ra->ra_pages) | |
542 | return; | |
543 | ||
544 | /* | |
545 | * Same bit is used for PG_readahead and PG_reclaim. | |
546 | */ | |
547 | if (PageWriteback(page)) | |
548 | return; | |
549 | ||
550 | ClearPageReadahead(page); | |
551 | ||
552 | /* | |
553 | * Defer asynchronous read-ahead on IO congestion. | |
554 | */ | |
703c2708 | 555 | if (inode_read_congested(mapping->host)) |
cf914a7d | 556 | return; |
122a21d1 FW |
557 | |
558 | /* do read-ahead */ | |
cf914a7d | 559 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
122a21d1 | 560 | } |
cf914a7d | 561 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |
782182e5 CW |
562 | |
563 | static ssize_t | |
564 | do_readahead(struct address_space *mapping, struct file *filp, | |
565 | pgoff_t index, unsigned long nr) | |
566 | { | |
63d0f0a3 | 567 | if (!mapping || !mapping->a_ops) |
782182e5 CW |
568 | return -EINVAL; |
569 | ||
11bd969f RZ |
570 | /* |
571 | * Readahead doesn't make sense for DAX inodes, but we don't want it | |
572 | * to report a failure either. Instead, we just return success and | |
573 | * don't do any work. | |
574 | */ | |
575 | if (dax_mapping(mapping)) | |
576 | return 0; | |
577 | ||
58d5640e | 578 | return force_page_cache_readahead(mapping, filp, index, nr); |
782182e5 CW |
579 | } |
580 | ||
c7b95d51 | 581 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count) |
782182e5 CW |
582 | { |
583 | ssize_t ret; | |
2903ff01 | 584 | struct fd f; |
782182e5 CW |
585 | |
586 | ret = -EBADF; | |
2903ff01 AV |
587 | f = fdget(fd); |
588 | if (f.file) { | |
589 | if (f.file->f_mode & FMODE_READ) { | |
590 | struct address_space *mapping = f.file->f_mapping; | |
09cbfeaf KS |
591 | pgoff_t start = offset >> PAGE_SHIFT; |
592 | pgoff_t end = (offset + count - 1) >> PAGE_SHIFT; | |
782182e5 | 593 | unsigned long len = end - start + 1; |
2903ff01 | 594 | ret = do_readahead(mapping, f.file, start, len); |
782182e5 | 595 | } |
2903ff01 | 596 | fdput(f); |
782182e5 CW |
597 | } |
598 | return ret; | |
599 | } | |
c7b95d51 DB |
600 | |
601 | SYSCALL_DEFINE3(readahead, int, fd, loff_t, offset, size_t, count) | |
602 | { | |
603 | return ksys_readahead(fd, offset, count); | |
604 | } |