]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
e1f8e874 | 6 | * 09Apr2002 Andrew Morton |
1da177e4 LT |
7 | * Initial version. |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/backing-dev.h> | |
8bde37f0 | 16 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 17 | #include <linux/pagevec.h> |
f5ff8422 | 18 | #include <linux/pagemap.h> |
1da177e4 | 19 | |
1da177e4 LT |
20 | /* |
21 | * Initialise a struct file's readahead state. Assumes that the caller has | |
22 | * memset *ra to zero. | |
23 | */ | |
24 | void | |
25 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
26 | { | |
27 | ra->ra_pages = mapping->backing_dev_info->ra_pages; | |
f4e6b498 | 28 | ra->prev_pos = -1; |
1da177e4 | 29 | } |
d41cc702 | 30 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 31 | |
1da177e4 LT |
32 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) |
33 | ||
03fb3d2a DH |
34 | /* |
35 | * see if a page needs releasing upon read_cache_pages() failure | |
266cf658 DH |
36 | * - the caller of read_cache_pages() may have set PG_private or PG_fscache |
37 | * before calling, such as the NFS fs marking pages that are cached locally | |
38 | * on disk, thus we need to give the fs a chance to clean up in the event of | |
39 | * an error | |
03fb3d2a DH |
40 | */ |
41 | static void read_cache_pages_invalidate_page(struct address_space *mapping, | |
42 | struct page *page) | |
43 | { | |
266cf658 | 44 | if (page_has_private(page)) { |
03fb3d2a DH |
45 | if (!trylock_page(page)) |
46 | BUG(); | |
47 | page->mapping = mapping; | |
48 | do_invalidatepage(page, 0); | |
49 | page->mapping = NULL; | |
50 | unlock_page(page); | |
51 | } | |
52 | page_cache_release(page); | |
53 | } | |
54 | ||
55 | /* | |
56 | * release a list of pages, invalidating them first if need be | |
57 | */ | |
58 | static void read_cache_pages_invalidate_pages(struct address_space *mapping, | |
59 | struct list_head *pages) | |
60 | { | |
61 | struct page *victim; | |
62 | ||
63 | while (!list_empty(pages)) { | |
64 | victim = list_to_page(pages); | |
65 | list_del(&victim->lru); | |
66 | read_cache_pages_invalidate_page(mapping, victim); | |
67 | } | |
68 | } | |
69 | ||
1da177e4 | 70 | /** |
bd40cdda | 71 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
72 | * @mapping: the address_space |
73 | * @pages: The address of a list_head which contains the target pages. These | |
74 | * pages have their ->index populated and are otherwise uninitialised. | |
75 | * @filler: callback routine for filling a single page. | |
76 | * @data: private data for the callback routine. | |
77 | * | |
78 | * Hides the details of the LRU cache etc from the filesystems. | |
79 | */ | |
80 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
81 | int (*filler)(void *, struct page *), void *data) | |
82 | { | |
83 | struct page *page; | |
1da177e4 LT |
84 | int ret = 0; |
85 | ||
1da177e4 LT |
86 | while (!list_empty(pages)) { |
87 | page = list_to_page(pages); | |
88 | list_del(&page->lru); | |
eb2be189 NP |
89 | if (add_to_page_cache_lru(page, mapping, |
90 | page->index, GFP_KERNEL)) { | |
03fb3d2a | 91 | read_cache_pages_invalidate_page(mapping, page); |
1da177e4 LT |
92 | continue; |
93 | } | |
eb2be189 NP |
94 | page_cache_release(page); |
95 | ||
1da177e4 | 96 | ret = filler(data, page); |
eb2be189 | 97 | if (unlikely(ret)) { |
03fb3d2a | 98 | read_cache_pages_invalidate_pages(mapping, pages); |
1da177e4 LT |
99 | break; |
100 | } | |
8bde37f0 | 101 | task_io_account_read(PAGE_CACHE_SIZE); |
1da177e4 | 102 | } |
1da177e4 LT |
103 | return ret; |
104 | } | |
105 | ||
106 | EXPORT_SYMBOL(read_cache_pages); | |
107 | ||
108 | static int read_pages(struct address_space *mapping, struct file *filp, | |
109 | struct list_head *pages, unsigned nr_pages) | |
110 | { | |
111 | unsigned page_idx; | |
994fc28c | 112 | int ret; |
1da177e4 LT |
113 | |
114 | if (mapping->a_ops->readpages) { | |
115 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | |
029e332e OH |
116 | /* Clean up the remaining pages */ |
117 | put_pages_list(pages); | |
1da177e4 LT |
118 | goto out; |
119 | } | |
120 | ||
1da177e4 LT |
121 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
122 | struct page *page = list_to_page(pages); | |
123 | list_del(&page->lru); | |
eb2be189 | 124 | if (!add_to_page_cache_lru(page, mapping, |
1da177e4 | 125 | page->index, GFP_KERNEL)) { |
9f1a3cfc | 126 | mapping->a_ops->readpage(filp, page); |
eb2be189 NP |
127 | } |
128 | page_cache_release(page); | |
1da177e4 | 129 | } |
994fc28c | 130 | ret = 0; |
1da177e4 LT |
131 | out: |
132 | return ret; | |
133 | } | |
134 | ||
1da177e4 LT |
135 | /* |
136 | * do_page_cache_readahead actually reads a chunk of disk. It allocates all | |
137 | * the pages first, then submits them all for I/O. This avoids the very bad | |
138 | * behaviour which would occur if page allocations are causing VM writeback. | |
139 | * We really don't want to intermingle reads and writes like that. | |
140 | * | |
141 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | |
142 | * | |
143 | * do_page_cache_readahead() returns -1 if it encountered request queue | |
144 | * congestion. | |
145 | */ | |
146 | static int | |
147 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
46fc3e7b FW |
148 | pgoff_t offset, unsigned long nr_to_read, |
149 | unsigned long lookahead_size) | |
1da177e4 LT |
150 | { |
151 | struct inode *inode = mapping->host; | |
152 | struct page *page; | |
153 | unsigned long end_index; /* The last page we want to read */ | |
154 | LIST_HEAD(page_pool); | |
155 | int page_idx; | |
156 | int ret = 0; | |
157 | loff_t isize = i_size_read(inode); | |
158 | ||
159 | if (isize == 0) | |
160 | goto out; | |
161 | ||
46fc3e7b | 162 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); |
1da177e4 LT |
163 | |
164 | /* | |
165 | * Preallocate as many pages as we will need. | |
166 | */ | |
1da177e4 | 167 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { |
7361f4d8 | 168 | pgoff_t page_offset = offset + page_idx; |
c743d96b | 169 | |
1da177e4 LT |
170 | if (page_offset > end_index) |
171 | break; | |
172 | ||
00128188 | 173 | rcu_read_lock(); |
1da177e4 | 174 | page = radix_tree_lookup(&mapping->page_tree, page_offset); |
00128188 | 175 | rcu_read_unlock(); |
1da177e4 LT |
176 | if (page) |
177 | continue; | |
178 | ||
1da177e4 | 179 | page = page_cache_alloc_cold(mapping); |
1da177e4 LT |
180 | if (!page) |
181 | break; | |
182 | page->index = page_offset; | |
183 | list_add(&page->lru, &page_pool); | |
46fc3e7b FW |
184 | if (page_idx == nr_to_read - lookahead_size) |
185 | SetPageReadahead(page); | |
1da177e4 LT |
186 | ret++; |
187 | } | |
1da177e4 LT |
188 | |
189 | /* | |
190 | * Now start the IO. We ignore I/O errors - if the page is not | |
191 | * uptodate then the caller will launch readpage again, and | |
192 | * will then handle the error. | |
193 | */ | |
194 | if (ret) | |
195 | read_pages(mapping, filp, &page_pool, ret); | |
196 | BUG_ON(!list_empty(&page_pool)); | |
197 | out: | |
198 | return ret; | |
199 | } | |
200 | ||
201 | /* | |
202 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
203 | * memory at once. | |
204 | */ | |
205 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 206 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
207 | { |
208 | int ret = 0; | |
209 | ||
210 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | |
211 | return -EINVAL; | |
212 | ||
f7e839dd | 213 | nr_to_read = max_sane_readahead(nr_to_read); |
1da177e4 LT |
214 | while (nr_to_read) { |
215 | int err; | |
216 | ||
217 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | |
218 | ||
219 | if (this_chunk > nr_to_read) | |
220 | this_chunk = nr_to_read; | |
221 | err = __do_page_cache_readahead(mapping, filp, | |
46fc3e7b | 222 | offset, this_chunk, 0); |
1da177e4 LT |
223 | if (err < 0) { |
224 | ret = err; | |
225 | break; | |
226 | } | |
227 | ret += err; | |
228 | offset += this_chunk; | |
229 | nr_to_read -= this_chunk; | |
230 | } | |
231 | return ret; | |
232 | } | |
233 | ||
1da177e4 LT |
234 | /* |
235 | * This version skips the IO if the queue is read-congested, and will tell the | |
236 | * block layer to abandon the readahead if request allocation would block. | |
237 | * | |
238 | * force_page_cache_readahead() will ignore queue congestion and will block on | |
239 | * request queues. | |
240 | */ | |
241 | int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 242 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
243 | { |
244 | if (bdi_read_congested(mapping->backing_dev_info)) | |
245 | return -1; | |
246 | ||
46fc3e7b | 247 | return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); |
1da177e4 LT |
248 | } |
249 | ||
1da177e4 LT |
250 | /* |
251 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | |
252 | * sensible upper limit. | |
253 | */ | |
254 | unsigned long max_sane_readahead(unsigned long nr) | |
255 | { | |
4f98a2fe | 256 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE_FILE) |
05a0416b | 257 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); |
1da177e4 | 258 | } |
5ce1110b FW |
259 | |
260 | /* | |
261 | * Submit IO for the read-ahead request in file_ra_state. | |
262 | */ | |
f9acc8c7 | 263 | static unsigned long ra_submit(struct file_ra_state *ra, |
5ce1110b FW |
264 | struct address_space *mapping, struct file *filp) |
265 | { | |
5ce1110b FW |
266 | int actual; |
267 | ||
5ce1110b | 268 | actual = __do_page_cache_readahead(mapping, filp, |
f9acc8c7 | 269 | ra->start, ra->size, ra->async_size); |
5ce1110b FW |
270 | |
271 | return actual; | |
272 | } | |
122a21d1 | 273 | |
c743d96b FW |
274 | /* |
275 | * Set the initial window size, round to next power of 2 and square | |
276 | * for small size, x 4 for medium, and x 2 for large | |
277 | * for 128k (32 page) max ra | |
278 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
279 | */ | |
280 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
281 | { | |
282 | unsigned long newsize = roundup_pow_of_two(size); | |
283 | ||
284 | if (newsize <= max / 32) | |
285 | newsize = newsize * 4; | |
286 | else if (newsize <= max / 4) | |
287 | newsize = newsize * 2; | |
288 | else | |
289 | newsize = max; | |
290 | ||
291 | return newsize; | |
292 | } | |
293 | ||
122a21d1 FW |
294 | /* |
295 | * Get the previous window size, ramp it up, and | |
296 | * return it as the new window size. | |
297 | */ | |
c743d96b | 298 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
122a21d1 FW |
299 | unsigned long max) |
300 | { | |
f9acc8c7 | 301 | unsigned long cur = ra->size; |
122a21d1 FW |
302 | unsigned long newsize; |
303 | ||
304 | if (cur < max / 16) | |
c743d96b | 305 | newsize = 4 * cur; |
122a21d1 | 306 | else |
c743d96b | 307 | newsize = 2 * cur; |
122a21d1 FW |
308 | |
309 | return min(newsize, max); | |
310 | } | |
311 | ||
312 | /* | |
313 | * On-demand readahead design. | |
314 | * | |
315 | * The fields in struct file_ra_state represent the most-recently-executed | |
316 | * readahead attempt: | |
317 | * | |
f9acc8c7 FW |
318 | * |<----- async_size ---------| |
319 | * |------------------- size -------------------->| | |
320 | * |==================#===========================| | |
321 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
322 | * |
323 | * To overlap application thinking time and disk I/O time, we do | |
324 | * `readahead pipelining': Do not wait until the application consumed all | |
325 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
326 | * Instead, submit an asynchronous readahead I/O as soon as there are |
327 | * only async_size pages left in the readahead window. Normally async_size | |
328 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
329 | * |
330 | * In interleaved sequential reads, concurrent streams on the same fd can | |
331 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 332 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
333 | * indicator. The flag won't be set on already cached pages, to avoid the |
334 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
335 | * | |
f4e6b498 | 336 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
337 | * It should be maintained by the caller, and will be used for detecting |
338 | * small random reads. Note that the readahead algorithm checks loosely | |
339 | * for sequential patterns. Hence interleaved reads might be served as | |
340 | * sequential ones. | |
341 | * | |
342 | * There is a special-case: if the first page which the application tries to | |
343 | * read happens to be the first page of the file, it is assumed that a linear | |
344 | * read is about to happen and the window is immediately set to the initial size | |
345 | * based on I/O request size and the max_readahead. | |
346 | * | |
347 | * The code ramps up the readahead size aggressively at first, but slow down as | |
348 | * it approaches max_readhead. | |
349 | */ | |
350 | ||
351 | /* | |
352 | * A minimal readahead algorithm for trivial sequential/random reads. | |
353 | */ | |
354 | static unsigned long | |
355 | ondemand_readahead(struct address_space *mapping, | |
356 | struct file_ra_state *ra, struct file *filp, | |
cf914a7d | 357 | bool hit_readahead_marker, pgoff_t offset, |
122a21d1 FW |
358 | unsigned long req_size) |
359 | { | |
fc31d16a | 360 | unsigned long max = max_sane_readahead(ra->ra_pages); |
f4e6b498 FW |
361 | pgoff_t prev_offset; |
362 | int sequential; | |
122a21d1 FW |
363 | |
364 | /* | |
f9acc8c7 | 365 | * It's the expected callback offset, assume sequential access. |
122a21d1 FW |
366 | * Ramp up sizes, and push forward the readahead window. |
367 | */ | |
f9acc8c7 FW |
368 | if (offset && (offset == (ra->start + ra->size - ra->async_size) || |
369 | offset == (ra->start + ra->size))) { | |
370 | ra->start += ra->size; | |
371 | ra->size = get_next_ra_size(ra, max); | |
372 | ra->async_size = ra->size; | |
373 | goto readit; | |
122a21d1 FW |
374 | } |
375 | ||
f4e6b498 FW |
376 | prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; |
377 | sequential = offset - prev_offset <= 1UL || req_size > max; | |
378 | ||
122a21d1 FW |
379 | /* |
380 | * Standalone, small read. | |
381 | * Read as is, and do not pollute the readahead state. | |
382 | */ | |
cf914a7d | 383 | if (!hit_readahead_marker && !sequential) { |
122a21d1 FW |
384 | return __do_page_cache_readahead(mapping, filp, |
385 | offset, req_size, 0); | |
386 | } | |
387 | ||
6b10c6c9 FW |
388 | /* |
389 | * Hit a marked page without valid readahead state. | |
390 | * E.g. interleaved reads. | |
391 | * Query the pagecache for async_size, which normally equals to | |
392 | * readahead size. Ramp it up and use it as the new readahead size. | |
393 | */ | |
394 | if (hit_readahead_marker) { | |
395 | pgoff_t start; | |
396 | ||
30002ed2 NP |
397 | rcu_read_lock(); |
398 | start = radix_tree_next_hole(&mapping->page_tree, offset,max+1); | |
399 | rcu_read_unlock(); | |
6b10c6c9 FW |
400 | |
401 | if (!start || start - offset > max) | |
402 | return 0; | |
403 | ||
404 | ra->start = start; | |
405 | ra->size = start - offset; /* old async_size */ | |
406 | ra->size = get_next_ra_size(ra, max); | |
407 | ra->async_size = ra->size; | |
408 | goto readit; | |
409 | } | |
410 | ||
122a21d1 FW |
411 | /* |
412 | * It may be one of | |
413 | * - first read on start of file | |
414 | * - sequential cache miss | |
415 | * - oversize random read | |
416 | * Start readahead for it. | |
417 | */ | |
f9acc8c7 FW |
418 | ra->start = offset; |
419 | ra->size = get_init_ra_size(req_size, max); | |
420 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; | |
122a21d1 | 421 | |
f9acc8c7 | 422 | readit: |
122a21d1 FW |
423 | return ra_submit(ra, mapping, filp); |
424 | } | |
425 | ||
426 | /** | |
cf914a7d | 427 | * page_cache_sync_readahead - generic file readahead |
122a21d1 FW |
428 | * @mapping: address_space which holds the pagecache and I/O vectors |
429 | * @ra: file_ra_state which holds the readahead state | |
430 | * @filp: passed on to ->readpage() and ->readpages() | |
cf914a7d | 431 | * @offset: start offset into @mapping, in pagecache page-sized units |
122a21d1 | 432 | * @req_size: hint: total size of the read which the caller is performing in |
cf914a7d | 433 | * pagecache pages |
122a21d1 | 434 | * |
cf914a7d RR |
435 | * page_cache_sync_readahead() should be called when a cache miss happened: |
436 | * it will submit the read. The readahead logic may decide to piggyback more | |
437 | * pages onto the read request if access patterns suggest it will improve | |
438 | * performance. | |
122a21d1 | 439 | */ |
cf914a7d RR |
440 | void page_cache_sync_readahead(struct address_space *mapping, |
441 | struct file_ra_state *ra, struct file *filp, | |
442 | pgoff_t offset, unsigned long req_size) | |
122a21d1 FW |
443 | { |
444 | /* no read-ahead */ | |
445 | if (!ra->ra_pages) | |
cf914a7d RR |
446 | return; |
447 | ||
448 | /* do read-ahead */ | |
449 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); | |
450 | } | |
451 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |
452 | ||
453 | /** | |
454 | * page_cache_async_readahead - file readahead for marked pages | |
455 | * @mapping: address_space which holds the pagecache and I/O vectors | |
456 | * @ra: file_ra_state which holds the readahead state | |
457 | * @filp: passed on to ->readpage() and ->readpages() | |
458 | * @page: the page at @offset which has the PG_readahead flag set | |
459 | * @offset: start offset into @mapping, in pagecache page-sized units | |
460 | * @req_size: hint: total size of the read which the caller is performing in | |
461 | * pagecache pages | |
462 | * | |
463 | * page_cache_async_ondemand() should be called when a page is used which | |
f7850d93 | 464 | * has the PG_readahead flag; this is a marker to suggest that the application |
cf914a7d | 465 | * has used up enough of the readahead window that we should start pulling in |
f7850d93 RD |
466 | * more pages. |
467 | */ | |
cf914a7d RR |
468 | void |
469 | page_cache_async_readahead(struct address_space *mapping, | |
470 | struct file_ra_state *ra, struct file *filp, | |
471 | struct page *page, pgoff_t offset, | |
472 | unsigned long req_size) | |
473 | { | |
474 | /* no read-ahead */ | |
475 | if (!ra->ra_pages) | |
476 | return; | |
477 | ||
478 | /* | |
479 | * Same bit is used for PG_readahead and PG_reclaim. | |
480 | */ | |
481 | if (PageWriteback(page)) | |
482 | return; | |
483 | ||
484 | ClearPageReadahead(page); | |
485 | ||
486 | /* | |
487 | * Defer asynchronous read-ahead on IO congestion. | |
488 | */ | |
489 | if (bdi_read_congested(mapping->backing_dev_info)) | |
490 | return; | |
122a21d1 FW |
491 | |
492 | /* do read-ahead */ | |
cf914a7d | 493 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
122a21d1 | 494 | } |
cf914a7d | 495 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |