]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * mm/readahead.c - address_space-level file readahead. | |
3 | * | |
4 | * Copyright (C) 2002, Linus Torvalds | |
5 | * | |
6 | * 09Apr2002 akpm@zip.com.au | |
7 | * Initial version. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/fs.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/blkdev.h> | |
15 | #include <linux/backing-dev.h> | |
8bde37f0 | 16 | #include <linux/task_io_accounting_ops.h> |
1da177e4 | 17 | #include <linux/pagevec.h> |
f5ff8422 | 18 | #include <linux/pagemap.h> |
1da177e4 LT |
19 | |
20 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page) | |
21 | { | |
22 | } | |
23 | EXPORT_SYMBOL(default_unplug_io_fn); | |
24 | ||
25 | struct backing_dev_info default_backing_dev_info = { | |
535443f5 | 26 | .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE, |
1da177e4 LT |
27 | .state = 0, |
28 | .capabilities = BDI_CAP_MAP_COPY, | |
29 | .unplug_io_fn = default_unplug_io_fn, | |
30 | }; | |
31 | EXPORT_SYMBOL_GPL(default_backing_dev_info); | |
32 | ||
33 | /* | |
34 | * Initialise a struct file's readahead state. Assumes that the caller has | |
35 | * memset *ra to zero. | |
36 | */ | |
37 | void | |
38 | file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping) | |
39 | { | |
40 | ra->ra_pages = mapping->backing_dev_info->ra_pages; | |
f4e6b498 | 41 | ra->prev_pos = -1; |
1da177e4 | 42 | } |
d41cc702 | 43 | EXPORT_SYMBOL_GPL(file_ra_state_init); |
1da177e4 | 44 | |
1da177e4 LT |
45 | #define list_to_page(head) (list_entry((head)->prev, struct page, lru)) |
46 | ||
47 | /** | |
bd40cdda | 48 | * read_cache_pages - populate an address space with some pages & start reads against them |
1da177e4 LT |
49 | * @mapping: the address_space |
50 | * @pages: The address of a list_head which contains the target pages. These | |
51 | * pages have their ->index populated and are otherwise uninitialised. | |
52 | * @filler: callback routine for filling a single page. | |
53 | * @data: private data for the callback routine. | |
54 | * | |
55 | * Hides the details of the LRU cache etc from the filesystems. | |
56 | */ | |
57 | int read_cache_pages(struct address_space *mapping, struct list_head *pages, | |
58 | int (*filler)(void *, struct page *), void *data) | |
59 | { | |
60 | struct page *page; | |
61 | struct pagevec lru_pvec; | |
62 | int ret = 0; | |
63 | ||
64 | pagevec_init(&lru_pvec, 0); | |
65 | ||
66 | while (!list_empty(pages)) { | |
67 | page = list_to_page(pages); | |
68 | list_del(&page->lru); | |
69 | if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) { | |
70 | page_cache_release(page); | |
71 | continue; | |
72 | } | |
73 | ret = filler(data, page); | |
74 | if (!pagevec_add(&lru_pvec, page)) | |
75 | __pagevec_lru_add(&lru_pvec); | |
76 | if (ret) { | |
38da288b | 77 | put_pages_list(pages); |
1da177e4 LT |
78 | break; |
79 | } | |
8bde37f0 | 80 | task_io_account_read(PAGE_CACHE_SIZE); |
1da177e4 LT |
81 | } |
82 | pagevec_lru_add(&lru_pvec); | |
83 | return ret; | |
84 | } | |
85 | ||
86 | EXPORT_SYMBOL(read_cache_pages); | |
87 | ||
88 | static int read_pages(struct address_space *mapping, struct file *filp, | |
89 | struct list_head *pages, unsigned nr_pages) | |
90 | { | |
91 | unsigned page_idx; | |
92 | struct pagevec lru_pvec; | |
994fc28c | 93 | int ret; |
1da177e4 LT |
94 | |
95 | if (mapping->a_ops->readpages) { | |
96 | ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages); | |
029e332e OH |
97 | /* Clean up the remaining pages */ |
98 | put_pages_list(pages); | |
1da177e4 LT |
99 | goto out; |
100 | } | |
101 | ||
102 | pagevec_init(&lru_pvec, 0); | |
103 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | |
104 | struct page *page = list_to_page(pages); | |
105 | list_del(&page->lru); | |
106 | if (!add_to_page_cache(page, mapping, | |
107 | page->index, GFP_KERNEL)) { | |
9f1a3cfc ZB |
108 | mapping->a_ops->readpage(filp, page); |
109 | if (!pagevec_add(&lru_pvec, page)) | |
110 | __pagevec_lru_add(&lru_pvec); | |
111 | } else | |
112 | page_cache_release(page); | |
1da177e4 LT |
113 | } |
114 | pagevec_lru_add(&lru_pvec); | |
994fc28c | 115 | ret = 0; |
1da177e4 LT |
116 | out: |
117 | return ret; | |
118 | } | |
119 | ||
1da177e4 LT |
120 | /* |
121 | * do_page_cache_readahead actually reads a chunk of disk. It allocates all | |
122 | * the pages first, then submits them all for I/O. This avoids the very bad | |
123 | * behaviour which would occur if page allocations are causing VM writeback. | |
124 | * We really don't want to intermingle reads and writes like that. | |
125 | * | |
126 | * Returns the number of pages requested, or the maximum amount of I/O allowed. | |
127 | * | |
128 | * do_page_cache_readahead() returns -1 if it encountered request queue | |
129 | * congestion. | |
130 | */ | |
131 | static int | |
132 | __do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
46fc3e7b FW |
133 | pgoff_t offset, unsigned long nr_to_read, |
134 | unsigned long lookahead_size) | |
1da177e4 LT |
135 | { |
136 | struct inode *inode = mapping->host; | |
137 | struct page *page; | |
138 | unsigned long end_index; /* The last page we want to read */ | |
139 | LIST_HEAD(page_pool); | |
140 | int page_idx; | |
141 | int ret = 0; | |
142 | loff_t isize = i_size_read(inode); | |
143 | ||
144 | if (isize == 0) | |
145 | goto out; | |
146 | ||
46fc3e7b | 147 | end_index = ((isize - 1) >> PAGE_CACHE_SHIFT); |
1da177e4 LT |
148 | |
149 | /* | |
150 | * Preallocate as many pages as we will need. | |
151 | */ | |
152 | read_lock_irq(&mapping->tree_lock); | |
153 | for (page_idx = 0; page_idx < nr_to_read; page_idx++) { | |
7361f4d8 | 154 | pgoff_t page_offset = offset + page_idx; |
c743d96b | 155 | |
1da177e4 LT |
156 | if (page_offset > end_index) |
157 | break; | |
158 | ||
159 | page = radix_tree_lookup(&mapping->page_tree, page_offset); | |
160 | if (page) | |
161 | continue; | |
162 | ||
163 | read_unlock_irq(&mapping->tree_lock); | |
164 | page = page_cache_alloc_cold(mapping); | |
165 | read_lock_irq(&mapping->tree_lock); | |
166 | if (!page) | |
167 | break; | |
168 | page->index = page_offset; | |
169 | list_add(&page->lru, &page_pool); | |
46fc3e7b FW |
170 | if (page_idx == nr_to_read - lookahead_size) |
171 | SetPageReadahead(page); | |
1da177e4 LT |
172 | ret++; |
173 | } | |
174 | read_unlock_irq(&mapping->tree_lock); | |
175 | ||
176 | /* | |
177 | * Now start the IO. We ignore I/O errors - if the page is not | |
178 | * uptodate then the caller will launch readpage again, and | |
179 | * will then handle the error. | |
180 | */ | |
181 | if (ret) | |
182 | read_pages(mapping, filp, &page_pool, ret); | |
183 | BUG_ON(!list_empty(&page_pool)); | |
184 | out: | |
185 | return ret; | |
186 | } | |
187 | ||
188 | /* | |
189 | * Chunk the readahead into 2 megabyte units, so that we don't pin too much | |
190 | * memory at once. | |
191 | */ | |
192 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 193 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
194 | { |
195 | int ret = 0; | |
196 | ||
197 | if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages)) | |
198 | return -EINVAL; | |
199 | ||
200 | while (nr_to_read) { | |
201 | int err; | |
202 | ||
203 | unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE; | |
204 | ||
205 | if (this_chunk > nr_to_read) | |
206 | this_chunk = nr_to_read; | |
207 | err = __do_page_cache_readahead(mapping, filp, | |
46fc3e7b | 208 | offset, this_chunk, 0); |
1da177e4 LT |
209 | if (err < 0) { |
210 | ret = err; | |
211 | break; | |
212 | } | |
213 | ret += err; | |
214 | offset += this_chunk; | |
215 | nr_to_read -= this_chunk; | |
216 | } | |
217 | return ret; | |
218 | } | |
219 | ||
1da177e4 LT |
220 | /* |
221 | * This version skips the IO if the queue is read-congested, and will tell the | |
222 | * block layer to abandon the readahead if request allocation would block. | |
223 | * | |
224 | * force_page_cache_readahead() will ignore queue congestion and will block on | |
225 | * request queues. | |
226 | */ | |
227 | int do_page_cache_readahead(struct address_space *mapping, struct file *filp, | |
7361f4d8 | 228 | pgoff_t offset, unsigned long nr_to_read) |
1da177e4 LT |
229 | { |
230 | if (bdi_read_congested(mapping->backing_dev_info)) | |
231 | return -1; | |
232 | ||
46fc3e7b | 233 | return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0); |
1da177e4 LT |
234 | } |
235 | ||
1da177e4 LT |
236 | /* |
237 | * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a | |
238 | * sensible upper limit. | |
239 | */ | |
240 | unsigned long max_sane_readahead(unsigned long nr) | |
241 | { | |
05a0416b CL |
242 | return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE) |
243 | + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2); | |
1da177e4 | 244 | } |
5ce1110b FW |
245 | |
246 | /* | |
247 | * Submit IO for the read-ahead request in file_ra_state. | |
248 | */ | |
f9acc8c7 | 249 | static unsigned long ra_submit(struct file_ra_state *ra, |
5ce1110b FW |
250 | struct address_space *mapping, struct file *filp) |
251 | { | |
5ce1110b FW |
252 | int actual; |
253 | ||
5ce1110b | 254 | actual = __do_page_cache_readahead(mapping, filp, |
f9acc8c7 | 255 | ra->start, ra->size, ra->async_size); |
5ce1110b FW |
256 | |
257 | return actual; | |
258 | } | |
122a21d1 | 259 | |
c743d96b FW |
260 | /* |
261 | * Set the initial window size, round to next power of 2 and square | |
262 | * for small size, x 4 for medium, and x 2 for large | |
263 | * for 128k (32 page) max ra | |
264 | * 1-8 page = 32k initial, > 8 page = 128k initial | |
265 | */ | |
266 | static unsigned long get_init_ra_size(unsigned long size, unsigned long max) | |
267 | { | |
268 | unsigned long newsize = roundup_pow_of_two(size); | |
269 | ||
270 | if (newsize <= max / 32) | |
271 | newsize = newsize * 4; | |
272 | else if (newsize <= max / 4) | |
273 | newsize = newsize * 2; | |
274 | else | |
275 | newsize = max; | |
276 | ||
277 | return newsize; | |
278 | } | |
279 | ||
122a21d1 FW |
280 | /* |
281 | * Get the previous window size, ramp it up, and | |
282 | * return it as the new window size. | |
283 | */ | |
c743d96b | 284 | static unsigned long get_next_ra_size(struct file_ra_state *ra, |
122a21d1 FW |
285 | unsigned long max) |
286 | { | |
f9acc8c7 | 287 | unsigned long cur = ra->size; |
122a21d1 FW |
288 | unsigned long newsize; |
289 | ||
290 | if (cur < max / 16) | |
c743d96b | 291 | newsize = 4 * cur; |
122a21d1 | 292 | else |
c743d96b | 293 | newsize = 2 * cur; |
122a21d1 FW |
294 | |
295 | return min(newsize, max); | |
296 | } | |
297 | ||
298 | /* | |
299 | * On-demand readahead design. | |
300 | * | |
301 | * The fields in struct file_ra_state represent the most-recently-executed | |
302 | * readahead attempt: | |
303 | * | |
f9acc8c7 FW |
304 | * |<----- async_size ---------| |
305 | * |------------------- size -------------------->| | |
306 | * |==================#===========================| | |
307 | * ^start ^page marked with PG_readahead | |
122a21d1 FW |
308 | * |
309 | * To overlap application thinking time and disk I/O time, we do | |
310 | * `readahead pipelining': Do not wait until the application consumed all | |
311 | * readahead pages and stalled on the missing page at readahead_index; | |
f9acc8c7 FW |
312 | * Instead, submit an asynchronous readahead I/O as soon as there are |
313 | * only async_size pages left in the readahead window. Normally async_size | |
314 | * will be equal to size, for maximum pipelining. | |
122a21d1 FW |
315 | * |
316 | * In interleaved sequential reads, concurrent streams on the same fd can | |
317 | * be invalidating each other's readahead state. So we flag the new readahead | |
f9acc8c7 | 318 | * page at (start+size-async_size) with PG_readahead, and use it as readahead |
122a21d1 FW |
319 | * indicator. The flag won't be set on already cached pages, to avoid the |
320 | * readahead-for-nothing fuss, saving pointless page cache lookups. | |
321 | * | |
f4e6b498 | 322 | * prev_pos tracks the last visited byte in the _previous_ read request. |
122a21d1 FW |
323 | * It should be maintained by the caller, and will be used for detecting |
324 | * small random reads. Note that the readahead algorithm checks loosely | |
325 | * for sequential patterns. Hence interleaved reads might be served as | |
326 | * sequential ones. | |
327 | * | |
328 | * There is a special-case: if the first page which the application tries to | |
329 | * read happens to be the first page of the file, it is assumed that a linear | |
330 | * read is about to happen and the window is immediately set to the initial size | |
331 | * based on I/O request size and the max_readahead. | |
332 | * | |
333 | * The code ramps up the readahead size aggressively at first, but slow down as | |
334 | * it approaches max_readhead. | |
335 | */ | |
336 | ||
337 | /* | |
338 | * A minimal readahead algorithm for trivial sequential/random reads. | |
339 | */ | |
340 | static unsigned long | |
341 | ondemand_readahead(struct address_space *mapping, | |
342 | struct file_ra_state *ra, struct file *filp, | |
cf914a7d | 343 | bool hit_readahead_marker, pgoff_t offset, |
122a21d1 FW |
344 | unsigned long req_size) |
345 | { | |
f4e6b498 FW |
346 | int max = ra->ra_pages; /* max readahead pages */ |
347 | pgoff_t prev_offset; | |
348 | int sequential; | |
122a21d1 FW |
349 | |
350 | /* | |
f9acc8c7 | 351 | * It's the expected callback offset, assume sequential access. |
122a21d1 FW |
352 | * Ramp up sizes, and push forward the readahead window. |
353 | */ | |
f9acc8c7 FW |
354 | if (offset && (offset == (ra->start + ra->size - ra->async_size) || |
355 | offset == (ra->start + ra->size))) { | |
356 | ra->start += ra->size; | |
357 | ra->size = get_next_ra_size(ra, max); | |
358 | ra->async_size = ra->size; | |
359 | goto readit; | |
122a21d1 FW |
360 | } |
361 | ||
f4e6b498 FW |
362 | prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT; |
363 | sequential = offset - prev_offset <= 1UL || req_size > max; | |
364 | ||
122a21d1 FW |
365 | /* |
366 | * Standalone, small read. | |
367 | * Read as is, and do not pollute the readahead state. | |
368 | */ | |
cf914a7d | 369 | if (!hit_readahead_marker && !sequential) { |
122a21d1 FW |
370 | return __do_page_cache_readahead(mapping, filp, |
371 | offset, req_size, 0); | |
372 | } | |
373 | ||
6b10c6c9 FW |
374 | /* |
375 | * Hit a marked page without valid readahead state. | |
376 | * E.g. interleaved reads. | |
377 | * Query the pagecache for async_size, which normally equals to | |
378 | * readahead size. Ramp it up and use it as the new readahead size. | |
379 | */ | |
380 | if (hit_readahead_marker) { | |
381 | pgoff_t start; | |
382 | ||
383 | read_lock_irq(&mapping->tree_lock); | |
384 | start = radix_tree_next_hole(&mapping->page_tree, offset, max+1); | |
385 | read_unlock_irq(&mapping->tree_lock); | |
386 | ||
387 | if (!start || start - offset > max) | |
388 | return 0; | |
389 | ||
390 | ra->start = start; | |
391 | ra->size = start - offset; /* old async_size */ | |
392 | ra->size = get_next_ra_size(ra, max); | |
393 | ra->async_size = ra->size; | |
394 | goto readit; | |
395 | } | |
396 | ||
122a21d1 FW |
397 | /* |
398 | * It may be one of | |
399 | * - first read on start of file | |
400 | * - sequential cache miss | |
401 | * - oversize random read | |
402 | * Start readahead for it. | |
403 | */ | |
f9acc8c7 FW |
404 | ra->start = offset; |
405 | ra->size = get_init_ra_size(req_size, max); | |
406 | ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size; | |
122a21d1 | 407 | |
f9acc8c7 | 408 | readit: |
122a21d1 FW |
409 | return ra_submit(ra, mapping, filp); |
410 | } | |
411 | ||
412 | /** | |
cf914a7d | 413 | * page_cache_sync_readahead - generic file readahead |
122a21d1 FW |
414 | * @mapping: address_space which holds the pagecache and I/O vectors |
415 | * @ra: file_ra_state which holds the readahead state | |
416 | * @filp: passed on to ->readpage() and ->readpages() | |
cf914a7d | 417 | * @offset: start offset into @mapping, in pagecache page-sized units |
122a21d1 | 418 | * @req_size: hint: total size of the read which the caller is performing in |
cf914a7d | 419 | * pagecache pages |
122a21d1 | 420 | * |
cf914a7d RR |
421 | * page_cache_sync_readahead() should be called when a cache miss happened: |
422 | * it will submit the read. The readahead logic may decide to piggyback more | |
423 | * pages onto the read request if access patterns suggest it will improve | |
424 | * performance. | |
122a21d1 | 425 | */ |
cf914a7d RR |
426 | void page_cache_sync_readahead(struct address_space *mapping, |
427 | struct file_ra_state *ra, struct file *filp, | |
428 | pgoff_t offset, unsigned long req_size) | |
122a21d1 FW |
429 | { |
430 | /* no read-ahead */ | |
431 | if (!ra->ra_pages) | |
cf914a7d RR |
432 | return; |
433 | ||
434 | /* do read-ahead */ | |
435 | ondemand_readahead(mapping, ra, filp, false, offset, req_size); | |
436 | } | |
437 | EXPORT_SYMBOL_GPL(page_cache_sync_readahead); | |
438 | ||
439 | /** | |
440 | * page_cache_async_readahead - file readahead for marked pages | |
441 | * @mapping: address_space which holds the pagecache and I/O vectors | |
442 | * @ra: file_ra_state which holds the readahead state | |
443 | * @filp: passed on to ->readpage() and ->readpages() | |
444 | * @page: the page at @offset which has the PG_readahead flag set | |
445 | * @offset: start offset into @mapping, in pagecache page-sized units | |
446 | * @req_size: hint: total size of the read which the caller is performing in | |
447 | * pagecache pages | |
448 | * | |
449 | * page_cache_async_ondemand() should be called when a page is used which | |
450 | * has the PG_readahead flag: this is a marker to suggest that the application | |
451 | * has used up enough of the readahead window that we should start pulling in | |
452 | * more pages. */ | |
453 | void | |
454 | page_cache_async_readahead(struct address_space *mapping, | |
455 | struct file_ra_state *ra, struct file *filp, | |
456 | struct page *page, pgoff_t offset, | |
457 | unsigned long req_size) | |
458 | { | |
459 | /* no read-ahead */ | |
460 | if (!ra->ra_pages) | |
461 | return; | |
462 | ||
463 | /* | |
464 | * Same bit is used for PG_readahead and PG_reclaim. | |
465 | */ | |
466 | if (PageWriteback(page)) | |
467 | return; | |
468 | ||
469 | ClearPageReadahead(page); | |
470 | ||
471 | /* | |
472 | * Defer asynchronous read-ahead on IO congestion. | |
473 | */ | |
474 | if (bdi_read_congested(mapping->backing_dev_info)) | |
475 | return; | |
122a21d1 FW |
476 | |
477 | /* do read-ahead */ | |
cf914a7d | 478 | ondemand_readahead(mapping, ra, filp, true, offset, req_size); |
122a21d1 | 479 | } |
cf914a7d | 480 | EXPORT_SYMBOL_GPL(page_cache_async_readahead); |