]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/filemap.c | |
3 | * | |
4 | * Copyright (C) 1994-1999 Linus Torvalds | |
5 | */ | |
6 | ||
7 | /* | |
8 | * This file handles the generic file mmap semantics used by | |
9 | * most "normal" filesystems (but you don't /have/ to use this: | |
10 | * the NFS filesystem used to do this differently, for example) | |
11 | */ | |
12 | #include <linux/config.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/compiler.h> | |
16 | #include <linux/fs.h> | |
17 | #include <linux/aio.h> | |
18 | #include <linux/kernel_stat.h> | |
19 | #include <linux/mm.h> | |
20 | #include <linux/swap.h> | |
21 | #include <linux/mman.h> | |
22 | #include <linux/pagemap.h> | |
23 | #include <linux/file.h> | |
24 | #include <linux/uio.h> | |
25 | #include <linux/hash.h> | |
26 | #include <linux/writeback.h> | |
27 | #include <linux/pagevec.h> | |
28 | #include <linux/blkdev.h> | |
29 | #include <linux/security.h> | |
30 | #include <linux/syscalls.h> | |
ceffc078 | 31 | #include "filemap.h" |
1da177e4 | 32 | /* |
1da177e4 LT |
33 | * FIXME: remove all knowledge of the buffer layer from the core VM |
34 | */ | |
35 | #include <linux/buffer_head.h> /* for generic_osync_inode */ | |
36 | ||
37 | #include <asm/uaccess.h> | |
38 | #include <asm/mman.h> | |
39 | ||
5ce7852c AB |
40 | static ssize_t |
41 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |
42 | loff_t offset, unsigned long nr_segs); | |
43 | ||
1da177e4 LT |
44 | /* |
45 | * Shared mappings implemented 30.11.1994. It's not fully working yet, | |
46 | * though. | |
47 | * | |
48 | * Shared mappings now work. 15.8.1995 Bruno. | |
49 | * | |
50 | * finished 'unifying' the page and buffer cache and SMP-threaded the | |
51 | * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com> | |
52 | * | |
53 | * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de> | |
54 | */ | |
55 | ||
56 | /* | |
57 | * Lock ordering: | |
58 | * | |
59 | * ->i_mmap_lock (vmtruncate) | |
60 | * ->private_lock (__free_pte->__set_page_dirty_buffers) | |
5d337b91 HD |
61 | * ->swap_lock (exclusive_swap_page, others) |
62 | * ->mapping->tree_lock | |
1da177e4 | 63 | * |
1b1dcc1b | 64 | * ->i_mutex |
1da177e4 LT |
65 | * ->i_mmap_lock (truncate->unmap_mapping_range) |
66 | * | |
67 | * ->mmap_sem | |
68 | * ->i_mmap_lock | |
b8072f09 | 69 | * ->page_table_lock or pte_lock (various, mainly in memory.c) |
1da177e4 LT |
70 | * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) |
71 | * | |
72 | * ->mmap_sem | |
73 | * ->lock_page (access_process_vm) | |
74 | * | |
75 | * ->mmap_sem | |
1b1dcc1b | 76 | * ->i_mutex (msync) |
1da177e4 | 77 | * |
1b1dcc1b | 78 | * ->i_mutex |
1da177e4 LT |
79 | * ->i_alloc_sem (various) |
80 | * | |
81 | * ->inode_lock | |
82 | * ->sb_lock (fs/fs-writeback.c) | |
83 | * ->mapping->tree_lock (__sync_single_inode) | |
84 | * | |
85 | * ->i_mmap_lock | |
86 | * ->anon_vma.lock (vma_adjust) | |
87 | * | |
88 | * ->anon_vma.lock | |
b8072f09 | 89 | * ->page_table_lock or pte_lock (anon_vma_prepare and various) |
1da177e4 | 90 | * |
b8072f09 | 91 | * ->page_table_lock or pte_lock |
5d337b91 | 92 | * ->swap_lock (try_to_unmap_one) |
1da177e4 LT |
93 | * ->private_lock (try_to_unmap_one) |
94 | * ->tree_lock (try_to_unmap_one) | |
95 | * ->zone.lru_lock (follow_page->mark_page_accessed) | |
96 | * ->private_lock (page_remove_rmap->set_page_dirty) | |
97 | * ->tree_lock (page_remove_rmap->set_page_dirty) | |
98 | * ->inode_lock (page_remove_rmap->set_page_dirty) | |
99 | * ->inode_lock (zap_pte_range->set_page_dirty) | |
100 | * ->private_lock (zap_pte_range->__set_page_dirty_buffers) | |
101 | * | |
102 | * ->task->proc_lock | |
103 | * ->dcache_lock (proc_pid_lookup) | |
104 | */ | |
105 | ||
106 | /* | |
107 | * Remove a page from the page cache and free it. Caller has to make | |
108 | * sure the page is locked and that nobody else uses it - or that usage | |
109 | * is safe. The caller must hold a write_lock on the mapping's tree_lock. | |
110 | */ | |
111 | void __remove_from_page_cache(struct page *page) | |
112 | { | |
113 | struct address_space *mapping = page->mapping; | |
114 | ||
115 | radix_tree_delete(&mapping->page_tree, page->index); | |
116 | page->mapping = NULL; | |
117 | mapping->nrpages--; | |
118 | pagecache_acct(-1); | |
119 | } | |
120 | ||
121 | void remove_from_page_cache(struct page *page) | |
122 | { | |
123 | struct address_space *mapping = page->mapping; | |
124 | ||
cd7619d6 | 125 | BUG_ON(!PageLocked(page)); |
1da177e4 LT |
126 | |
127 | write_lock_irq(&mapping->tree_lock); | |
128 | __remove_from_page_cache(page); | |
129 | write_unlock_irq(&mapping->tree_lock); | |
130 | } | |
131 | ||
132 | static int sync_page(void *word) | |
133 | { | |
134 | struct address_space *mapping; | |
135 | struct page *page; | |
136 | ||
07808b74 | 137 | page = container_of((unsigned long *)word, struct page, flags); |
1da177e4 LT |
138 | |
139 | /* | |
dd1d5afc WLII |
140 | * page_mapping() is being called without PG_locked held. |
141 | * Some knowledge of the state and use of the page is used to | |
142 | * reduce the requirements down to a memory barrier. | |
143 | * The danger here is of a stale page_mapping() return value | |
144 | * indicating a struct address_space different from the one it's | |
145 | * associated with when it is associated with one. | |
146 | * After smp_mb(), it's either the correct page_mapping() for | |
147 | * the page, or an old page_mapping() and the page's own | |
148 | * page_mapping() has gone NULL. | |
149 | * The ->sync_page() address_space operation must tolerate | |
150 | * page_mapping() going NULL. By an amazing coincidence, | |
151 | * this comes about because none of the users of the page | |
152 | * in the ->sync_page() methods make essential use of the | |
153 | * page_mapping(), merely passing the page down to the backing | |
154 | * device's unplug functions when it's non-NULL, which in turn | |
4c21e2f2 | 155 | * ignore it for all cases but swap, where only page_private(page) is |
dd1d5afc WLII |
156 | * of interest. When page_mapping() does go NULL, the entire |
157 | * call stack gracefully ignores the page and returns. | |
158 | * -- wli | |
1da177e4 LT |
159 | */ |
160 | smp_mb(); | |
161 | mapping = page_mapping(page); | |
162 | if (mapping && mapping->a_ops && mapping->a_ops->sync_page) | |
163 | mapping->a_ops->sync_page(page); | |
164 | io_schedule(); | |
165 | return 0; | |
166 | } | |
167 | ||
168 | /** | |
169 | * filemap_fdatawrite_range - start writeback against all of a mapping's | |
170 | * dirty pages that lie within the byte offsets <start, end> | |
67be2dd1 MW |
171 | * @mapping: address space structure to write |
172 | * @start: offset in bytes where the range starts | |
173 | * @end: offset in bytes where the range ends | |
174 | * @sync_mode: enable synchronous operation | |
1da177e4 LT |
175 | * |
176 | * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as | |
177 | * opposed to a regular memory * cleansing writeback. The difference between | |
178 | * these two operations is that if a dirty page/buffer is encountered, it must | |
179 | * be waited upon, and not just skipped over. | |
180 | */ | |
181 | static int __filemap_fdatawrite_range(struct address_space *mapping, | |
182 | loff_t start, loff_t end, int sync_mode) | |
183 | { | |
184 | int ret; | |
185 | struct writeback_control wbc = { | |
186 | .sync_mode = sync_mode, | |
187 | .nr_to_write = mapping->nrpages * 2, | |
188 | .start = start, | |
189 | .end = end, | |
190 | }; | |
191 | ||
192 | if (!mapping_cap_writeback_dirty(mapping)) | |
193 | return 0; | |
194 | ||
195 | ret = do_writepages(mapping, &wbc); | |
196 | return ret; | |
197 | } | |
198 | ||
199 | static inline int __filemap_fdatawrite(struct address_space *mapping, | |
200 | int sync_mode) | |
201 | { | |
202 | return __filemap_fdatawrite_range(mapping, 0, 0, sync_mode); | |
203 | } | |
204 | ||
205 | int filemap_fdatawrite(struct address_space *mapping) | |
206 | { | |
207 | return __filemap_fdatawrite(mapping, WB_SYNC_ALL); | |
208 | } | |
209 | EXPORT_SYMBOL(filemap_fdatawrite); | |
210 | ||
211 | static int filemap_fdatawrite_range(struct address_space *mapping, | |
212 | loff_t start, loff_t end) | |
213 | { | |
214 | return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); | |
215 | } | |
216 | ||
217 | /* | |
218 | * This is a mostly non-blocking flush. Not suitable for data-integrity | |
219 | * purposes - I/O may not be started against all dirty pages. | |
220 | */ | |
221 | int filemap_flush(struct address_space *mapping) | |
222 | { | |
223 | return __filemap_fdatawrite(mapping, WB_SYNC_NONE); | |
224 | } | |
225 | EXPORT_SYMBOL(filemap_flush); | |
226 | ||
227 | /* | |
228 | * Wait for writeback to complete against pages indexed by start->end | |
229 | * inclusive | |
230 | */ | |
231 | static int wait_on_page_writeback_range(struct address_space *mapping, | |
232 | pgoff_t start, pgoff_t end) | |
233 | { | |
234 | struct pagevec pvec; | |
235 | int nr_pages; | |
236 | int ret = 0; | |
237 | pgoff_t index; | |
238 | ||
239 | if (end < start) | |
240 | return 0; | |
241 | ||
242 | pagevec_init(&pvec, 0); | |
243 | index = start; | |
244 | while ((index <= end) && | |
245 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | |
246 | PAGECACHE_TAG_WRITEBACK, | |
247 | min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1)) != 0) { | |
248 | unsigned i; | |
249 | ||
250 | for (i = 0; i < nr_pages; i++) { | |
251 | struct page *page = pvec.pages[i]; | |
252 | ||
253 | /* until radix tree lookup accepts end_index */ | |
254 | if (page->index > end) | |
255 | continue; | |
256 | ||
257 | wait_on_page_writeback(page); | |
258 | if (PageError(page)) | |
259 | ret = -EIO; | |
260 | } | |
261 | pagevec_release(&pvec); | |
262 | cond_resched(); | |
263 | } | |
264 | ||
265 | /* Check for outstanding write errors */ | |
266 | if (test_and_clear_bit(AS_ENOSPC, &mapping->flags)) | |
267 | ret = -ENOSPC; | |
268 | if (test_and_clear_bit(AS_EIO, &mapping->flags)) | |
269 | ret = -EIO; | |
270 | ||
271 | return ret; | |
272 | } | |
273 | ||
274 | /* | |
275 | * Write and wait upon all the pages in the passed range. This is a "data | |
276 | * integrity" operation. It waits upon in-flight writeout before starting and | |
277 | * waiting upon new writeout. If there was an IO error, return it. | |
278 | * | |
1b1dcc1b | 279 | * We need to re-take i_mutex during the generic_osync_inode list walk because |
1da177e4 LT |
280 | * it is otherwise livelockable. |
281 | */ | |
282 | int sync_page_range(struct inode *inode, struct address_space *mapping, | |
268fc16e | 283 | loff_t pos, loff_t count) |
1da177e4 LT |
284 | { |
285 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; | |
286 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | |
287 | int ret; | |
288 | ||
289 | if (!mapping_cap_writeback_dirty(mapping) || !count) | |
290 | return 0; | |
291 | ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); | |
292 | if (ret == 0) { | |
1b1dcc1b | 293 | mutex_lock(&inode->i_mutex); |
1da177e4 | 294 | ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); |
1b1dcc1b | 295 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
296 | } |
297 | if (ret == 0) | |
298 | ret = wait_on_page_writeback_range(mapping, start, end); | |
299 | return ret; | |
300 | } | |
301 | EXPORT_SYMBOL(sync_page_range); | |
302 | ||
303 | /* | |
1b1dcc1b | 304 | * Note: Holding i_mutex across sync_page_range_nolock is not a good idea |
1da177e4 LT |
305 | * as it forces O_SYNC writers to different parts of the same file |
306 | * to be serialised right until io completion. | |
307 | */ | |
268fc16e OH |
308 | int sync_page_range_nolock(struct inode *inode, struct address_space *mapping, |
309 | loff_t pos, loff_t count) | |
1da177e4 LT |
310 | { |
311 | pgoff_t start = pos >> PAGE_CACHE_SHIFT; | |
312 | pgoff_t end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | |
313 | int ret; | |
314 | ||
315 | if (!mapping_cap_writeback_dirty(mapping) || !count) | |
316 | return 0; | |
317 | ret = filemap_fdatawrite_range(mapping, pos, pos + count - 1); | |
318 | if (ret == 0) | |
319 | ret = generic_osync_inode(inode, mapping, OSYNC_METADATA); | |
320 | if (ret == 0) | |
321 | ret = wait_on_page_writeback_range(mapping, start, end); | |
322 | return ret; | |
323 | } | |
268fc16e | 324 | EXPORT_SYMBOL(sync_page_range_nolock); |
1da177e4 LT |
325 | |
326 | /** | |
327 | * filemap_fdatawait - walk the list of under-writeback pages of the given | |
328 | * address space and wait for all of them. | |
329 | * | |
330 | * @mapping: address space structure to wait for | |
331 | */ | |
332 | int filemap_fdatawait(struct address_space *mapping) | |
333 | { | |
334 | loff_t i_size = i_size_read(mapping->host); | |
335 | ||
336 | if (i_size == 0) | |
337 | return 0; | |
338 | ||
339 | return wait_on_page_writeback_range(mapping, 0, | |
340 | (i_size - 1) >> PAGE_CACHE_SHIFT); | |
341 | } | |
342 | EXPORT_SYMBOL(filemap_fdatawait); | |
343 | ||
344 | int filemap_write_and_wait(struct address_space *mapping) | |
345 | { | |
28fd1298 | 346 | int err = 0; |
1da177e4 LT |
347 | |
348 | if (mapping->nrpages) { | |
28fd1298 OH |
349 | err = filemap_fdatawrite(mapping); |
350 | /* | |
351 | * Even if the above returned error, the pages may be | |
352 | * written partially (e.g. -ENOSPC), so we wait for it. | |
353 | * But the -EIO is special case, it may indicate the worst | |
354 | * thing (e.g. bug) happened, so we avoid waiting for it. | |
355 | */ | |
356 | if (err != -EIO) { | |
357 | int err2 = filemap_fdatawait(mapping); | |
358 | if (!err) | |
359 | err = err2; | |
360 | } | |
1da177e4 | 361 | } |
28fd1298 | 362 | return err; |
1da177e4 | 363 | } |
28fd1298 | 364 | EXPORT_SYMBOL(filemap_write_and_wait); |
1da177e4 LT |
365 | |
366 | int filemap_write_and_wait_range(struct address_space *mapping, | |
367 | loff_t lstart, loff_t lend) | |
368 | { | |
28fd1298 | 369 | int err = 0; |
1da177e4 LT |
370 | |
371 | if (mapping->nrpages) { | |
28fd1298 OH |
372 | err = __filemap_fdatawrite_range(mapping, lstart, lend, |
373 | WB_SYNC_ALL); | |
374 | /* See comment of filemap_write_and_wait() */ | |
375 | if (err != -EIO) { | |
376 | int err2 = wait_on_page_writeback_range(mapping, | |
377 | lstart >> PAGE_CACHE_SHIFT, | |
378 | lend >> PAGE_CACHE_SHIFT); | |
379 | if (!err) | |
380 | err = err2; | |
381 | } | |
1da177e4 | 382 | } |
28fd1298 | 383 | return err; |
1da177e4 LT |
384 | } |
385 | ||
386 | /* | |
387 | * This function is used to add newly allocated pagecache pages: | |
388 | * the page is new, so we can just run SetPageLocked() against it. | |
389 | * The other page state flags were set by rmqueue(). | |
390 | * | |
391 | * This function does not add the page to the LRU. The caller must do that. | |
392 | */ | |
393 | int add_to_page_cache(struct page *page, struct address_space *mapping, | |
6daa0e28 | 394 | pgoff_t offset, gfp_t gfp_mask) |
1da177e4 LT |
395 | { |
396 | int error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); | |
397 | ||
398 | if (error == 0) { | |
399 | write_lock_irq(&mapping->tree_lock); | |
400 | error = radix_tree_insert(&mapping->page_tree, offset, page); | |
401 | if (!error) { | |
402 | page_cache_get(page); | |
403 | SetPageLocked(page); | |
404 | page->mapping = mapping; | |
405 | page->index = offset; | |
406 | mapping->nrpages++; | |
407 | pagecache_acct(1); | |
408 | } | |
409 | write_unlock_irq(&mapping->tree_lock); | |
410 | radix_tree_preload_end(); | |
411 | } | |
412 | return error; | |
413 | } | |
414 | ||
415 | EXPORT_SYMBOL(add_to_page_cache); | |
416 | ||
417 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
6daa0e28 | 418 | pgoff_t offset, gfp_t gfp_mask) |
1da177e4 LT |
419 | { |
420 | int ret = add_to_page_cache(page, mapping, offset, gfp_mask); | |
421 | if (ret == 0) | |
422 | lru_cache_add(page); | |
423 | return ret; | |
424 | } | |
425 | ||
426 | /* | |
427 | * In order to wait for pages to become available there must be | |
428 | * waitqueues associated with pages. By using a hash table of | |
429 | * waitqueues where the bucket discipline is to maintain all | |
430 | * waiters on the same queue and wake all when any of the pages | |
431 | * become available, and for the woken contexts to check to be | |
432 | * sure the appropriate page became available, this saves space | |
433 | * at a cost of "thundering herd" phenomena during rare hash | |
434 | * collisions. | |
435 | */ | |
436 | static wait_queue_head_t *page_waitqueue(struct page *page) | |
437 | { | |
438 | const struct zone *zone = page_zone(page); | |
439 | ||
440 | return &zone->wait_table[hash_ptr(page, zone->wait_table_bits)]; | |
441 | } | |
442 | ||
443 | static inline void wake_up_page(struct page *page, int bit) | |
444 | { | |
445 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | |
446 | } | |
447 | ||
448 | void fastcall wait_on_page_bit(struct page *page, int bit_nr) | |
449 | { | |
450 | DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); | |
451 | ||
452 | if (test_bit(bit_nr, &page->flags)) | |
453 | __wait_on_bit(page_waitqueue(page), &wait, sync_page, | |
454 | TASK_UNINTERRUPTIBLE); | |
455 | } | |
456 | EXPORT_SYMBOL(wait_on_page_bit); | |
457 | ||
458 | /** | |
459 | * unlock_page() - unlock a locked page | |
460 | * | |
461 | * @page: the page | |
462 | * | |
463 | * Unlocks the page and wakes up sleepers in ___wait_on_page_locked(). | |
464 | * Also wakes sleepers in wait_on_page_writeback() because the wakeup | |
465 | * mechananism between PageLocked pages and PageWriteback pages is shared. | |
466 | * But that's OK - sleepers in wait_on_page_writeback() just go back to sleep. | |
467 | * | |
468 | * The first mb is necessary to safely close the critical section opened by the | |
469 | * TestSetPageLocked(), the second mb is necessary to enforce ordering between | |
470 | * the clear_bit and the read of the waitqueue (to avoid SMP races with a | |
471 | * parallel wait_on_page_locked()). | |
472 | */ | |
473 | void fastcall unlock_page(struct page *page) | |
474 | { | |
475 | smp_mb__before_clear_bit(); | |
476 | if (!TestClearPageLocked(page)) | |
477 | BUG(); | |
478 | smp_mb__after_clear_bit(); | |
479 | wake_up_page(page, PG_locked); | |
480 | } | |
481 | EXPORT_SYMBOL(unlock_page); | |
482 | ||
483 | /* | |
484 | * End writeback against a page. | |
485 | */ | |
486 | void end_page_writeback(struct page *page) | |
487 | { | |
488 | if (!TestClearPageReclaim(page) || rotate_reclaimable_page(page)) { | |
489 | if (!test_clear_page_writeback(page)) | |
490 | BUG(); | |
491 | } | |
492 | smp_mb__after_clear_bit(); | |
493 | wake_up_page(page, PG_writeback); | |
494 | } | |
495 | EXPORT_SYMBOL(end_page_writeback); | |
496 | ||
497 | /* | |
498 | * Get a lock on the page, assuming we need to sleep to get it. | |
499 | * | |
500 | * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary. If some | |
501 | * random driver's requestfn sets TASK_RUNNING, we could busywait. However | |
502 | * chances are that on the second loop, the block layer's plug list is empty, | |
503 | * so sync_page() will then return in state TASK_UNINTERRUPTIBLE. | |
504 | */ | |
505 | void fastcall __lock_page(struct page *page) | |
506 | { | |
507 | DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); | |
508 | ||
509 | __wait_on_bit_lock(page_waitqueue(page), &wait, sync_page, | |
510 | TASK_UNINTERRUPTIBLE); | |
511 | } | |
512 | EXPORT_SYMBOL(__lock_page); | |
513 | ||
514 | /* | |
515 | * a rather lightweight function, finding and getting a reference to a | |
516 | * hashed page atomically. | |
517 | */ | |
518 | struct page * find_get_page(struct address_space *mapping, unsigned long offset) | |
519 | { | |
520 | struct page *page; | |
521 | ||
522 | read_lock_irq(&mapping->tree_lock); | |
523 | page = radix_tree_lookup(&mapping->page_tree, offset); | |
524 | if (page) | |
525 | page_cache_get(page); | |
526 | read_unlock_irq(&mapping->tree_lock); | |
527 | return page; | |
528 | } | |
529 | ||
530 | EXPORT_SYMBOL(find_get_page); | |
531 | ||
532 | /* | |
533 | * Same as above, but trylock it instead of incrementing the count. | |
534 | */ | |
535 | struct page *find_trylock_page(struct address_space *mapping, unsigned long offset) | |
536 | { | |
537 | struct page *page; | |
538 | ||
539 | read_lock_irq(&mapping->tree_lock); | |
540 | page = radix_tree_lookup(&mapping->page_tree, offset); | |
541 | if (page && TestSetPageLocked(page)) | |
542 | page = NULL; | |
543 | read_unlock_irq(&mapping->tree_lock); | |
544 | return page; | |
545 | } | |
546 | ||
547 | EXPORT_SYMBOL(find_trylock_page); | |
548 | ||
549 | /** | |
550 | * find_lock_page - locate, pin and lock a pagecache page | |
551 | * | |
67be2dd1 MW |
552 | * @mapping: the address_space to search |
553 | * @offset: the page index | |
1da177e4 LT |
554 | * |
555 | * Locates the desired pagecache page, locks it, increments its reference | |
556 | * count and returns its address. | |
557 | * | |
558 | * Returns zero if the page was not present. find_lock_page() may sleep. | |
559 | */ | |
560 | struct page *find_lock_page(struct address_space *mapping, | |
561 | unsigned long offset) | |
562 | { | |
563 | struct page *page; | |
564 | ||
565 | read_lock_irq(&mapping->tree_lock); | |
566 | repeat: | |
567 | page = radix_tree_lookup(&mapping->page_tree, offset); | |
568 | if (page) { | |
569 | page_cache_get(page); | |
570 | if (TestSetPageLocked(page)) { | |
571 | read_unlock_irq(&mapping->tree_lock); | |
bbfbb7ce | 572 | __lock_page(page); |
1da177e4 LT |
573 | read_lock_irq(&mapping->tree_lock); |
574 | ||
575 | /* Has the page been truncated while we slept? */ | |
bbfbb7ce ND |
576 | if (unlikely(page->mapping != mapping || |
577 | page->index != offset)) { | |
1da177e4 LT |
578 | unlock_page(page); |
579 | page_cache_release(page); | |
580 | goto repeat; | |
581 | } | |
582 | } | |
583 | } | |
584 | read_unlock_irq(&mapping->tree_lock); | |
585 | return page; | |
586 | } | |
587 | ||
588 | EXPORT_SYMBOL(find_lock_page); | |
589 | ||
590 | /** | |
591 | * find_or_create_page - locate or add a pagecache page | |
592 | * | |
67be2dd1 MW |
593 | * @mapping: the page's address_space |
594 | * @index: the page's index into the mapping | |
595 | * @gfp_mask: page allocation mode | |
1da177e4 LT |
596 | * |
597 | * Locates a page in the pagecache. If the page is not present, a new page | |
598 | * is allocated using @gfp_mask and is added to the pagecache and to the VM's | |
599 | * LRU list. The returned page is locked and has its reference count | |
600 | * incremented. | |
601 | * | |
602 | * find_or_create_page() may sleep, even if @gfp_flags specifies an atomic | |
603 | * allocation! | |
604 | * | |
605 | * find_or_create_page() returns the desired page's address, or zero on | |
606 | * memory exhaustion. | |
607 | */ | |
608 | struct page *find_or_create_page(struct address_space *mapping, | |
6daa0e28 | 609 | unsigned long index, gfp_t gfp_mask) |
1da177e4 LT |
610 | { |
611 | struct page *page, *cached_page = NULL; | |
612 | int err; | |
613 | repeat: | |
614 | page = find_lock_page(mapping, index); | |
615 | if (!page) { | |
616 | if (!cached_page) { | |
617 | cached_page = alloc_page(gfp_mask); | |
618 | if (!cached_page) | |
619 | return NULL; | |
620 | } | |
621 | err = add_to_page_cache_lru(cached_page, mapping, | |
622 | index, gfp_mask); | |
623 | if (!err) { | |
624 | page = cached_page; | |
625 | cached_page = NULL; | |
626 | } else if (err == -EEXIST) | |
627 | goto repeat; | |
628 | } | |
629 | if (cached_page) | |
630 | page_cache_release(cached_page); | |
631 | return page; | |
632 | } | |
633 | ||
634 | EXPORT_SYMBOL(find_or_create_page); | |
635 | ||
636 | /** | |
637 | * find_get_pages - gang pagecache lookup | |
638 | * @mapping: The address_space to search | |
639 | * @start: The starting page index | |
640 | * @nr_pages: The maximum number of pages | |
641 | * @pages: Where the resulting pages are placed | |
642 | * | |
643 | * find_get_pages() will search for and return a group of up to | |
644 | * @nr_pages pages in the mapping. The pages are placed at @pages. | |
645 | * find_get_pages() takes a reference against the returned pages. | |
646 | * | |
647 | * The search returns a group of mapping-contiguous pages with ascending | |
648 | * indexes. There may be holes in the indices due to not-present pages. | |
649 | * | |
650 | * find_get_pages() returns the number of pages which were found. | |
651 | */ | |
652 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, | |
653 | unsigned int nr_pages, struct page **pages) | |
654 | { | |
655 | unsigned int i; | |
656 | unsigned int ret; | |
657 | ||
658 | read_lock_irq(&mapping->tree_lock); | |
659 | ret = radix_tree_gang_lookup(&mapping->page_tree, | |
660 | (void **)pages, start, nr_pages); | |
661 | for (i = 0; i < ret; i++) | |
662 | page_cache_get(pages[i]); | |
663 | read_unlock_irq(&mapping->tree_lock); | |
664 | return ret; | |
665 | } | |
666 | ||
667 | /* | |
668 | * Like find_get_pages, except we only return pages which are tagged with | |
669 | * `tag'. We update *index to index the next page for the traversal. | |
670 | */ | |
671 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, | |
672 | int tag, unsigned int nr_pages, struct page **pages) | |
673 | { | |
674 | unsigned int i; | |
675 | unsigned int ret; | |
676 | ||
677 | read_lock_irq(&mapping->tree_lock); | |
678 | ret = radix_tree_gang_lookup_tag(&mapping->page_tree, | |
679 | (void **)pages, *index, nr_pages, tag); | |
680 | for (i = 0; i < ret; i++) | |
681 | page_cache_get(pages[i]); | |
682 | if (ret) | |
683 | *index = pages[ret - 1]->index + 1; | |
684 | read_unlock_irq(&mapping->tree_lock); | |
685 | return ret; | |
686 | } | |
687 | ||
688 | /* | |
689 | * Same as grab_cache_page, but do not wait if the page is unavailable. | |
690 | * This is intended for speculative data generators, where the data can | |
691 | * be regenerated if the page couldn't be grabbed. This routine should | |
692 | * be safe to call while holding the lock for another page. | |
693 | * | |
694 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
695 | * and deadlock against the caller's locked page. | |
696 | */ | |
697 | struct page * | |
698 | grab_cache_page_nowait(struct address_space *mapping, unsigned long index) | |
699 | { | |
700 | struct page *page = find_get_page(mapping, index); | |
6daa0e28 | 701 | gfp_t gfp_mask; |
1da177e4 LT |
702 | |
703 | if (page) { | |
704 | if (!TestSetPageLocked(page)) | |
705 | return page; | |
706 | page_cache_release(page); | |
707 | return NULL; | |
708 | } | |
709 | gfp_mask = mapping_gfp_mask(mapping) & ~__GFP_FS; | |
710 | page = alloc_pages(gfp_mask, 0); | |
711 | if (page && add_to_page_cache_lru(page, mapping, index, gfp_mask)) { | |
712 | page_cache_release(page); | |
713 | page = NULL; | |
714 | } | |
715 | return page; | |
716 | } | |
717 | ||
718 | EXPORT_SYMBOL(grab_cache_page_nowait); | |
719 | ||
720 | /* | |
721 | * This is a generic file read routine, and uses the | |
722 | * mapping->a_ops->readpage() function for the actual low-level | |
723 | * stuff. | |
724 | * | |
725 | * This is really ugly. But the goto's actually try to clarify some | |
726 | * of the logic when it comes to error handling etc. | |
727 | * | |
728 | * Note the struct file* is only passed for the use of readpage. It may be | |
729 | * NULL. | |
730 | */ | |
731 | void do_generic_mapping_read(struct address_space *mapping, | |
732 | struct file_ra_state *_ra, | |
733 | struct file *filp, | |
734 | loff_t *ppos, | |
735 | read_descriptor_t *desc, | |
736 | read_actor_t actor) | |
737 | { | |
738 | struct inode *inode = mapping->host; | |
739 | unsigned long index; | |
740 | unsigned long end_index; | |
741 | unsigned long offset; | |
742 | unsigned long last_index; | |
743 | unsigned long next_index; | |
744 | unsigned long prev_index; | |
745 | loff_t isize; | |
746 | struct page *cached_page; | |
747 | int error; | |
748 | struct file_ra_state ra = *_ra; | |
749 | ||
750 | cached_page = NULL; | |
751 | index = *ppos >> PAGE_CACHE_SHIFT; | |
752 | next_index = index; | |
753 | prev_index = ra.prev_page; | |
754 | last_index = (*ppos + desc->count + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT; | |
755 | offset = *ppos & ~PAGE_CACHE_MASK; | |
756 | ||
757 | isize = i_size_read(inode); | |
758 | if (!isize) | |
759 | goto out; | |
760 | ||
761 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
762 | for (;;) { | |
763 | struct page *page; | |
764 | unsigned long nr, ret; | |
765 | ||
766 | /* nr is the maximum number of bytes to copy from this page */ | |
767 | nr = PAGE_CACHE_SIZE; | |
768 | if (index >= end_index) { | |
769 | if (index > end_index) | |
770 | goto out; | |
771 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | |
772 | if (nr <= offset) { | |
773 | goto out; | |
774 | } | |
775 | } | |
776 | nr = nr - offset; | |
777 | ||
778 | cond_resched(); | |
779 | if (index == next_index) | |
780 | next_index = page_cache_readahead(mapping, &ra, filp, | |
781 | index, last_index - index); | |
782 | ||
783 | find_page: | |
784 | page = find_get_page(mapping, index); | |
785 | if (unlikely(page == NULL)) { | |
786 | handle_ra_miss(mapping, &ra, index); | |
787 | goto no_cached_page; | |
788 | } | |
789 | if (!PageUptodate(page)) | |
790 | goto page_not_up_to_date; | |
791 | page_ok: | |
792 | ||
793 | /* If users can be writing to this page using arbitrary | |
794 | * virtual addresses, take care about potential aliasing | |
795 | * before reading the page on the kernel side. | |
796 | */ | |
797 | if (mapping_writably_mapped(mapping)) | |
798 | flush_dcache_page(page); | |
799 | ||
800 | /* | |
801 | * When (part of) the same page is read multiple times | |
802 | * in succession, only mark it as accessed the first time. | |
803 | */ | |
804 | if (prev_index != index) | |
805 | mark_page_accessed(page); | |
806 | prev_index = index; | |
807 | ||
808 | /* | |
809 | * Ok, we have the page, and it's up-to-date, so | |
810 | * now we can copy it to user space... | |
811 | * | |
812 | * The actor routine returns how many bytes were actually used.. | |
813 | * NOTE! This may not be the same as how much of a user buffer | |
814 | * we filled up (we may be padding etc), so we can only update | |
815 | * "pos" here (the actor routine has to update the user buffer | |
816 | * pointers and the remaining count). | |
817 | */ | |
818 | ret = actor(desc, page, offset, nr); | |
819 | offset += ret; | |
820 | index += offset >> PAGE_CACHE_SHIFT; | |
821 | offset &= ~PAGE_CACHE_MASK; | |
822 | ||
823 | page_cache_release(page); | |
824 | if (ret == nr && desc->count) | |
825 | continue; | |
826 | goto out; | |
827 | ||
828 | page_not_up_to_date: | |
829 | /* Get exclusive access to the page ... */ | |
830 | lock_page(page); | |
831 | ||
832 | /* Did it get unhashed before we got the lock? */ | |
833 | if (!page->mapping) { | |
834 | unlock_page(page); | |
835 | page_cache_release(page); | |
836 | continue; | |
837 | } | |
838 | ||
839 | /* Did somebody else fill it already? */ | |
840 | if (PageUptodate(page)) { | |
841 | unlock_page(page); | |
842 | goto page_ok; | |
843 | } | |
844 | ||
845 | readpage: | |
846 | /* Start the actual read. The read will unlock the page. */ | |
847 | error = mapping->a_ops->readpage(filp, page); | |
848 | ||
994fc28c ZB |
849 | if (unlikely(error)) { |
850 | if (error == AOP_TRUNCATED_PAGE) { | |
851 | page_cache_release(page); | |
852 | goto find_page; | |
853 | } | |
1da177e4 | 854 | goto readpage_error; |
994fc28c | 855 | } |
1da177e4 LT |
856 | |
857 | if (!PageUptodate(page)) { | |
858 | lock_page(page); | |
859 | if (!PageUptodate(page)) { | |
860 | if (page->mapping == NULL) { | |
861 | /* | |
862 | * invalidate_inode_pages got it | |
863 | */ | |
864 | unlock_page(page); | |
865 | page_cache_release(page); | |
866 | goto find_page; | |
867 | } | |
868 | unlock_page(page); | |
869 | error = -EIO; | |
870 | goto readpage_error; | |
871 | } | |
872 | unlock_page(page); | |
873 | } | |
874 | ||
875 | /* | |
876 | * i_size must be checked after we have done ->readpage. | |
877 | * | |
878 | * Checking i_size after the readpage allows us to calculate | |
879 | * the correct value for "nr", which means the zero-filled | |
880 | * part of the page is not copied back to userspace (unless | |
881 | * another truncate extends the file - this is desired though). | |
882 | */ | |
883 | isize = i_size_read(inode); | |
884 | end_index = (isize - 1) >> PAGE_CACHE_SHIFT; | |
885 | if (unlikely(!isize || index > end_index)) { | |
886 | page_cache_release(page); | |
887 | goto out; | |
888 | } | |
889 | ||
890 | /* nr is the maximum number of bytes to copy from this page */ | |
891 | nr = PAGE_CACHE_SIZE; | |
892 | if (index == end_index) { | |
893 | nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1; | |
894 | if (nr <= offset) { | |
895 | page_cache_release(page); | |
896 | goto out; | |
897 | } | |
898 | } | |
899 | nr = nr - offset; | |
900 | goto page_ok; | |
901 | ||
902 | readpage_error: | |
903 | /* UHHUH! A synchronous read error occurred. Report it */ | |
904 | desc->error = error; | |
905 | page_cache_release(page); | |
906 | goto out; | |
907 | ||
908 | no_cached_page: | |
909 | /* | |
910 | * Ok, it wasn't cached, so we need to create a new | |
911 | * page.. | |
912 | */ | |
913 | if (!cached_page) { | |
914 | cached_page = page_cache_alloc_cold(mapping); | |
915 | if (!cached_page) { | |
916 | desc->error = -ENOMEM; | |
917 | goto out; | |
918 | } | |
919 | } | |
920 | error = add_to_page_cache_lru(cached_page, mapping, | |
921 | index, GFP_KERNEL); | |
922 | if (error) { | |
923 | if (error == -EEXIST) | |
924 | goto find_page; | |
925 | desc->error = error; | |
926 | goto out; | |
927 | } | |
928 | page = cached_page; | |
929 | cached_page = NULL; | |
930 | goto readpage; | |
931 | } | |
932 | ||
933 | out: | |
934 | *_ra = ra; | |
935 | ||
936 | *ppos = ((loff_t) index << PAGE_CACHE_SHIFT) + offset; | |
937 | if (cached_page) | |
938 | page_cache_release(cached_page); | |
939 | if (filp) | |
940 | file_accessed(filp); | |
941 | } | |
942 | ||
943 | EXPORT_SYMBOL(do_generic_mapping_read); | |
944 | ||
945 | int file_read_actor(read_descriptor_t *desc, struct page *page, | |
946 | unsigned long offset, unsigned long size) | |
947 | { | |
948 | char *kaddr; | |
949 | unsigned long left, count = desc->count; | |
950 | ||
951 | if (size > count) | |
952 | size = count; | |
953 | ||
954 | /* | |
955 | * Faults on the destination of a read are common, so do it before | |
956 | * taking the kmap. | |
957 | */ | |
958 | if (!fault_in_pages_writeable(desc->arg.buf, size)) { | |
959 | kaddr = kmap_atomic(page, KM_USER0); | |
960 | left = __copy_to_user_inatomic(desc->arg.buf, | |
961 | kaddr + offset, size); | |
962 | kunmap_atomic(kaddr, KM_USER0); | |
963 | if (left == 0) | |
964 | goto success; | |
965 | } | |
966 | ||
967 | /* Do it the slow way */ | |
968 | kaddr = kmap(page); | |
969 | left = __copy_to_user(desc->arg.buf, kaddr + offset, size); | |
970 | kunmap(page); | |
971 | ||
972 | if (left) { | |
973 | size -= left; | |
974 | desc->error = -EFAULT; | |
975 | } | |
976 | success: | |
977 | desc->count = count - size; | |
978 | desc->written += size; | |
979 | desc->arg.buf += size; | |
980 | return size; | |
981 | } | |
982 | ||
983 | /* | |
984 | * This is the "read()" routine for all filesystems | |
985 | * that can use the page cache directly. | |
986 | */ | |
987 | ssize_t | |
988 | __generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov, | |
989 | unsigned long nr_segs, loff_t *ppos) | |
990 | { | |
991 | struct file *filp = iocb->ki_filp; | |
992 | ssize_t retval; | |
993 | unsigned long seg; | |
994 | size_t count; | |
995 | ||
996 | count = 0; | |
997 | for (seg = 0; seg < nr_segs; seg++) { | |
998 | const struct iovec *iv = &iov[seg]; | |
999 | ||
1000 | /* | |
1001 | * If any segment has a negative length, or the cumulative | |
1002 | * length ever wraps negative then return -EINVAL. | |
1003 | */ | |
1004 | count += iv->iov_len; | |
1005 | if (unlikely((ssize_t)(count|iv->iov_len) < 0)) | |
1006 | return -EINVAL; | |
1007 | if (access_ok(VERIFY_WRITE, iv->iov_base, iv->iov_len)) | |
1008 | continue; | |
1009 | if (seg == 0) | |
1010 | return -EFAULT; | |
1011 | nr_segs = seg; | |
1012 | count -= iv->iov_len; /* This segment is no good */ | |
1013 | break; | |
1014 | } | |
1015 | ||
1016 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | |
1017 | if (filp->f_flags & O_DIRECT) { | |
1018 | loff_t pos = *ppos, size; | |
1019 | struct address_space *mapping; | |
1020 | struct inode *inode; | |
1021 | ||
1022 | mapping = filp->f_mapping; | |
1023 | inode = mapping->host; | |
1024 | retval = 0; | |
1025 | if (!count) | |
1026 | goto out; /* skip atime */ | |
1027 | size = i_size_read(inode); | |
1028 | if (pos < size) { | |
1029 | retval = generic_file_direct_IO(READ, iocb, | |
1030 | iov, pos, nr_segs); | |
b5c44c21 | 1031 | if (retval > 0 && !is_sync_kiocb(iocb)) |
1da177e4 LT |
1032 | retval = -EIOCBQUEUED; |
1033 | if (retval > 0) | |
1034 | *ppos = pos + retval; | |
1035 | } | |
1036 | file_accessed(filp); | |
1037 | goto out; | |
1038 | } | |
1039 | ||
1040 | retval = 0; | |
1041 | if (count) { | |
1042 | for (seg = 0; seg < nr_segs; seg++) { | |
1043 | read_descriptor_t desc; | |
1044 | ||
1045 | desc.written = 0; | |
1046 | desc.arg.buf = iov[seg].iov_base; | |
1047 | desc.count = iov[seg].iov_len; | |
1048 | if (desc.count == 0) | |
1049 | continue; | |
1050 | desc.error = 0; | |
1051 | do_generic_file_read(filp,ppos,&desc,file_read_actor); | |
1052 | retval += desc.written; | |
39e88ca2 TH |
1053 | if (desc.error) { |
1054 | retval = retval ?: desc.error; | |
1da177e4 LT |
1055 | break; |
1056 | } | |
1057 | } | |
1058 | } | |
1059 | out: | |
1060 | return retval; | |
1061 | } | |
1062 | ||
1063 | EXPORT_SYMBOL(__generic_file_aio_read); | |
1064 | ||
1065 | ssize_t | |
1066 | generic_file_aio_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos) | |
1067 | { | |
1068 | struct iovec local_iov = { .iov_base = buf, .iov_len = count }; | |
1069 | ||
1070 | BUG_ON(iocb->ki_pos != pos); | |
1071 | return __generic_file_aio_read(iocb, &local_iov, 1, &iocb->ki_pos); | |
1072 | } | |
1073 | ||
1074 | EXPORT_SYMBOL(generic_file_aio_read); | |
1075 | ||
1076 | ssize_t | |
1077 | generic_file_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos) | |
1078 | { | |
1079 | struct iovec local_iov = { .iov_base = buf, .iov_len = count }; | |
1080 | struct kiocb kiocb; | |
1081 | ssize_t ret; | |
1082 | ||
1083 | init_sync_kiocb(&kiocb, filp); | |
1084 | ret = __generic_file_aio_read(&kiocb, &local_iov, 1, ppos); | |
1085 | if (-EIOCBQUEUED == ret) | |
1086 | ret = wait_on_sync_kiocb(&kiocb); | |
1087 | return ret; | |
1088 | } | |
1089 | ||
1090 | EXPORT_SYMBOL(generic_file_read); | |
1091 | ||
1092 | int file_send_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size) | |
1093 | { | |
1094 | ssize_t written; | |
1095 | unsigned long count = desc->count; | |
1096 | struct file *file = desc->arg.data; | |
1097 | ||
1098 | if (size > count) | |
1099 | size = count; | |
1100 | ||
1101 | written = file->f_op->sendpage(file, page, offset, | |
1102 | size, &file->f_pos, size<count); | |
1103 | if (written < 0) { | |
1104 | desc->error = written; | |
1105 | written = 0; | |
1106 | } | |
1107 | desc->count = count - written; | |
1108 | desc->written += written; | |
1109 | return written; | |
1110 | } | |
1111 | ||
1112 | ssize_t generic_file_sendfile(struct file *in_file, loff_t *ppos, | |
1113 | size_t count, read_actor_t actor, void *target) | |
1114 | { | |
1115 | read_descriptor_t desc; | |
1116 | ||
1117 | if (!count) | |
1118 | return 0; | |
1119 | ||
1120 | desc.written = 0; | |
1121 | desc.count = count; | |
1122 | desc.arg.data = target; | |
1123 | desc.error = 0; | |
1124 | ||
1125 | do_generic_file_read(in_file, ppos, &desc, actor); | |
1126 | if (desc.written) | |
1127 | return desc.written; | |
1128 | return desc.error; | |
1129 | } | |
1130 | ||
1131 | EXPORT_SYMBOL(generic_file_sendfile); | |
1132 | ||
1133 | static ssize_t | |
1134 | do_readahead(struct address_space *mapping, struct file *filp, | |
1135 | unsigned long index, unsigned long nr) | |
1136 | { | |
1137 | if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage) | |
1138 | return -EINVAL; | |
1139 | ||
1140 | force_page_cache_readahead(mapping, filp, index, | |
1141 | max_sane_readahead(nr)); | |
1142 | return 0; | |
1143 | } | |
1144 | ||
1145 | asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) | |
1146 | { | |
1147 | ssize_t ret; | |
1148 | struct file *file; | |
1149 | ||
1150 | ret = -EBADF; | |
1151 | file = fget(fd); | |
1152 | if (file) { | |
1153 | if (file->f_mode & FMODE_READ) { | |
1154 | struct address_space *mapping = file->f_mapping; | |
1155 | unsigned long start = offset >> PAGE_CACHE_SHIFT; | |
1156 | unsigned long end = (offset + count - 1) >> PAGE_CACHE_SHIFT; | |
1157 | unsigned long len = end - start + 1; | |
1158 | ret = do_readahead(mapping, file, start, len); | |
1159 | } | |
1160 | fput(file); | |
1161 | } | |
1162 | return ret; | |
1163 | } | |
1164 | ||
1165 | #ifdef CONFIG_MMU | |
1166 | /* | |
1167 | * This adds the requested page to the page cache if it isn't already there, | |
1168 | * and schedules an I/O to read in its contents from disk. | |
1169 | */ | |
1170 | static int FASTCALL(page_cache_read(struct file * file, unsigned long offset)); | |
1171 | static int fastcall page_cache_read(struct file * file, unsigned long offset) | |
1172 | { | |
1173 | struct address_space *mapping = file->f_mapping; | |
1174 | struct page *page; | |
994fc28c | 1175 | int ret; |
1da177e4 | 1176 | |
994fc28c ZB |
1177 | do { |
1178 | page = page_cache_alloc_cold(mapping); | |
1179 | if (!page) | |
1180 | return -ENOMEM; | |
1181 | ||
1182 | ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL); | |
1183 | if (ret == 0) | |
1184 | ret = mapping->a_ops->readpage(file, page); | |
1185 | else if (ret == -EEXIST) | |
1186 | ret = 0; /* losing race to add is OK */ | |
1da177e4 | 1187 | |
1da177e4 | 1188 | page_cache_release(page); |
1da177e4 | 1189 | |
994fc28c ZB |
1190 | } while (ret == AOP_TRUNCATED_PAGE); |
1191 | ||
1192 | return ret; | |
1da177e4 LT |
1193 | } |
1194 | ||
1195 | #define MMAP_LOTSAMISS (100) | |
1196 | ||
1197 | /* | |
1198 | * filemap_nopage() is invoked via the vma operations vector for a | |
1199 | * mapped memory region to read in file data during a page fault. | |
1200 | * | |
1201 | * The goto's are kind of ugly, but this streamlines the normal case of having | |
1202 | * it in the page cache, and handles the special cases reasonably without | |
1203 | * having a lot of duplicated code. | |
1204 | */ | |
1205 | struct page *filemap_nopage(struct vm_area_struct *area, | |
1206 | unsigned long address, int *type) | |
1207 | { | |
1208 | int error; | |
1209 | struct file *file = area->vm_file; | |
1210 | struct address_space *mapping = file->f_mapping; | |
1211 | struct file_ra_state *ra = &file->f_ra; | |
1212 | struct inode *inode = mapping->host; | |
1213 | struct page *page; | |
1214 | unsigned long size, pgoff; | |
1215 | int did_readaround = 0, majmin = VM_FAULT_MINOR; | |
1216 | ||
1217 | pgoff = ((address-area->vm_start) >> PAGE_CACHE_SHIFT) + area->vm_pgoff; | |
1218 | ||
1219 | retry_all: | |
1220 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
1221 | if (pgoff >= size) | |
1222 | goto outside_data_content; | |
1223 | ||
1224 | /* If we don't want any read-ahead, don't bother */ | |
1225 | if (VM_RandomReadHint(area)) | |
1226 | goto no_cached_page; | |
1227 | ||
1228 | /* | |
1229 | * The readahead code wants to be told about each and every page | |
1230 | * so it can build and shrink its windows appropriately | |
1231 | * | |
1232 | * For sequential accesses, we use the generic readahead logic. | |
1233 | */ | |
1234 | if (VM_SequentialReadHint(area)) | |
1235 | page_cache_readahead(mapping, ra, file, pgoff, 1); | |
1236 | ||
1237 | /* | |
1238 | * Do we have something in the page cache already? | |
1239 | */ | |
1240 | retry_find: | |
1241 | page = find_get_page(mapping, pgoff); | |
1242 | if (!page) { | |
1243 | unsigned long ra_pages; | |
1244 | ||
1245 | if (VM_SequentialReadHint(area)) { | |
1246 | handle_ra_miss(mapping, ra, pgoff); | |
1247 | goto no_cached_page; | |
1248 | } | |
1249 | ra->mmap_miss++; | |
1250 | ||
1251 | /* | |
1252 | * Do we miss much more than hit in this file? If so, | |
1253 | * stop bothering with read-ahead. It will only hurt. | |
1254 | */ | |
1255 | if (ra->mmap_miss > ra->mmap_hit + MMAP_LOTSAMISS) | |
1256 | goto no_cached_page; | |
1257 | ||
1258 | /* | |
1259 | * To keep the pgmajfault counter straight, we need to | |
1260 | * check did_readaround, as this is an inner loop. | |
1261 | */ | |
1262 | if (!did_readaround) { | |
1263 | majmin = VM_FAULT_MAJOR; | |
1264 | inc_page_state(pgmajfault); | |
1265 | } | |
1266 | did_readaround = 1; | |
1267 | ra_pages = max_sane_readahead(file->f_ra.ra_pages); | |
1268 | if (ra_pages) { | |
1269 | pgoff_t start = 0; | |
1270 | ||
1271 | if (pgoff > ra_pages / 2) | |
1272 | start = pgoff - ra_pages / 2; | |
1273 | do_page_cache_readahead(mapping, file, start, ra_pages); | |
1274 | } | |
1275 | page = find_get_page(mapping, pgoff); | |
1276 | if (!page) | |
1277 | goto no_cached_page; | |
1278 | } | |
1279 | ||
1280 | if (!did_readaround) | |
1281 | ra->mmap_hit++; | |
1282 | ||
1283 | /* | |
1284 | * Ok, found a page in the page cache, now we need to check | |
1285 | * that it's up-to-date. | |
1286 | */ | |
1287 | if (!PageUptodate(page)) | |
1288 | goto page_not_uptodate; | |
1289 | ||
1290 | success: | |
1291 | /* | |
1292 | * Found the page and have a reference on it. | |
1293 | */ | |
1294 | mark_page_accessed(page); | |
1295 | if (type) | |
1296 | *type = majmin; | |
1297 | return page; | |
1298 | ||
1299 | outside_data_content: | |
1300 | /* | |
1301 | * An external ptracer can access pages that normally aren't | |
1302 | * accessible.. | |
1303 | */ | |
1304 | if (area->vm_mm == current->mm) | |
1305 | return NULL; | |
1306 | /* Fall through to the non-read-ahead case */ | |
1307 | no_cached_page: | |
1308 | /* | |
1309 | * We're only likely to ever get here if MADV_RANDOM is in | |
1310 | * effect. | |
1311 | */ | |
1312 | error = page_cache_read(file, pgoff); | |
1313 | grab_swap_token(); | |
1314 | ||
1315 | /* | |
1316 | * The page we want has now been added to the page cache. | |
1317 | * In the unlikely event that someone removed it in the | |
1318 | * meantime, we'll just come back here and read it again. | |
1319 | */ | |
1320 | if (error >= 0) | |
1321 | goto retry_find; | |
1322 | ||
1323 | /* | |
1324 | * An error return from page_cache_read can result if the | |
1325 | * system is low on memory, or a problem occurs while trying | |
1326 | * to schedule I/O. | |
1327 | */ | |
1328 | if (error == -ENOMEM) | |
1329 | return NOPAGE_OOM; | |
1330 | return NULL; | |
1331 | ||
1332 | page_not_uptodate: | |
1333 | if (!did_readaround) { | |
1334 | majmin = VM_FAULT_MAJOR; | |
1335 | inc_page_state(pgmajfault); | |
1336 | } | |
1337 | lock_page(page); | |
1338 | ||
1339 | /* Did it get unhashed while we waited for it? */ | |
1340 | if (!page->mapping) { | |
1341 | unlock_page(page); | |
1342 | page_cache_release(page); | |
1343 | goto retry_all; | |
1344 | } | |
1345 | ||
1346 | /* Did somebody else get it up-to-date? */ | |
1347 | if (PageUptodate(page)) { | |
1348 | unlock_page(page); | |
1349 | goto success; | |
1350 | } | |
1351 | ||
994fc28c ZB |
1352 | error = mapping->a_ops->readpage(file, page); |
1353 | if (!error) { | |
1da177e4 LT |
1354 | wait_on_page_locked(page); |
1355 | if (PageUptodate(page)) | |
1356 | goto success; | |
994fc28c ZB |
1357 | } else if (error == AOP_TRUNCATED_PAGE) { |
1358 | page_cache_release(page); | |
1359 | goto retry_find; | |
1da177e4 LT |
1360 | } |
1361 | ||
1362 | /* | |
1363 | * Umm, take care of errors if the page isn't up-to-date. | |
1364 | * Try to re-read it _once_. We do this synchronously, | |
1365 | * because there really aren't any performance issues here | |
1366 | * and we need to check for errors. | |
1367 | */ | |
1368 | lock_page(page); | |
1369 | ||
1370 | /* Somebody truncated the page on us? */ | |
1371 | if (!page->mapping) { | |
1372 | unlock_page(page); | |
1373 | page_cache_release(page); | |
1374 | goto retry_all; | |
1375 | } | |
1376 | ||
1377 | /* Somebody else successfully read it in? */ | |
1378 | if (PageUptodate(page)) { | |
1379 | unlock_page(page); | |
1380 | goto success; | |
1381 | } | |
1382 | ClearPageError(page); | |
994fc28c ZB |
1383 | error = mapping->a_ops->readpage(file, page); |
1384 | if (!error) { | |
1da177e4 LT |
1385 | wait_on_page_locked(page); |
1386 | if (PageUptodate(page)) | |
1387 | goto success; | |
994fc28c ZB |
1388 | } else if (error == AOP_TRUNCATED_PAGE) { |
1389 | page_cache_release(page); | |
1390 | goto retry_find; | |
1da177e4 LT |
1391 | } |
1392 | ||
1393 | /* | |
1394 | * Things didn't work out. Return zero to tell the | |
1395 | * mm layer so, possibly freeing the page cache page first. | |
1396 | */ | |
1397 | page_cache_release(page); | |
1398 | return NULL; | |
1399 | } | |
1400 | ||
1401 | EXPORT_SYMBOL(filemap_nopage); | |
1402 | ||
1403 | static struct page * filemap_getpage(struct file *file, unsigned long pgoff, | |
1404 | int nonblock) | |
1405 | { | |
1406 | struct address_space *mapping = file->f_mapping; | |
1407 | struct page *page; | |
1408 | int error; | |
1409 | ||
1410 | /* | |
1411 | * Do we have something in the page cache already? | |
1412 | */ | |
1413 | retry_find: | |
1414 | page = find_get_page(mapping, pgoff); | |
1415 | if (!page) { | |
1416 | if (nonblock) | |
1417 | return NULL; | |
1418 | goto no_cached_page; | |
1419 | } | |
1420 | ||
1421 | /* | |
1422 | * Ok, found a page in the page cache, now we need to check | |
1423 | * that it's up-to-date. | |
1424 | */ | |
d3457342 JM |
1425 | if (!PageUptodate(page)) { |
1426 | if (nonblock) { | |
1427 | page_cache_release(page); | |
1428 | return NULL; | |
1429 | } | |
1da177e4 | 1430 | goto page_not_uptodate; |
d3457342 | 1431 | } |
1da177e4 LT |
1432 | |
1433 | success: | |
1434 | /* | |
1435 | * Found the page and have a reference on it. | |
1436 | */ | |
1437 | mark_page_accessed(page); | |
1438 | return page; | |
1439 | ||
1440 | no_cached_page: | |
1441 | error = page_cache_read(file, pgoff); | |
1442 | ||
1443 | /* | |
1444 | * The page we want has now been added to the page cache. | |
1445 | * In the unlikely event that someone removed it in the | |
1446 | * meantime, we'll just come back here and read it again. | |
1447 | */ | |
1448 | if (error >= 0) | |
1449 | goto retry_find; | |
1450 | ||
1451 | /* | |
1452 | * An error return from page_cache_read can result if the | |
1453 | * system is low on memory, or a problem occurs while trying | |
1454 | * to schedule I/O. | |
1455 | */ | |
1456 | return NULL; | |
1457 | ||
1458 | page_not_uptodate: | |
1459 | lock_page(page); | |
1460 | ||
1461 | /* Did it get unhashed while we waited for it? */ | |
1462 | if (!page->mapping) { | |
1463 | unlock_page(page); | |
1464 | goto err; | |
1465 | } | |
1466 | ||
1467 | /* Did somebody else get it up-to-date? */ | |
1468 | if (PageUptodate(page)) { | |
1469 | unlock_page(page); | |
1470 | goto success; | |
1471 | } | |
1472 | ||
994fc28c ZB |
1473 | error = mapping->a_ops->readpage(file, page); |
1474 | if (!error) { | |
1da177e4 LT |
1475 | wait_on_page_locked(page); |
1476 | if (PageUptodate(page)) | |
1477 | goto success; | |
994fc28c ZB |
1478 | } else if (error == AOP_TRUNCATED_PAGE) { |
1479 | page_cache_release(page); | |
1480 | goto retry_find; | |
1da177e4 LT |
1481 | } |
1482 | ||
1483 | /* | |
1484 | * Umm, take care of errors if the page isn't up-to-date. | |
1485 | * Try to re-read it _once_. We do this synchronously, | |
1486 | * because there really aren't any performance issues here | |
1487 | * and we need to check for errors. | |
1488 | */ | |
1489 | lock_page(page); | |
1490 | ||
1491 | /* Somebody truncated the page on us? */ | |
1492 | if (!page->mapping) { | |
1493 | unlock_page(page); | |
1494 | goto err; | |
1495 | } | |
1496 | /* Somebody else successfully read it in? */ | |
1497 | if (PageUptodate(page)) { | |
1498 | unlock_page(page); | |
1499 | goto success; | |
1500 | } | |
1501 | ||
1502 | ClearPageError(page); | |
994fc28c ZB |
1503 | error = mapping->a_ops->readpage(file, page); |
1504 | if (!error) { | |
1da177e4 LT |
1505 | wait_on_page_locked(page); |
1506 | if (PageUptodate(page)) | |
1507 | goto success; | |
994fc28c ZB |
1508 | } else if (error == AOP_TRUNCATED_PAGE) { |
1509 | page_cache_release(page); | |
1510 | goto retry_find; | |
1da177e4 LT |
1511 | } |
1512 | ||
1513 | /* | |
1514 | * Things didn't work out. Return zero to tell the | |
1515 | * mm layer so, possibly freeing the page cache page first. | |
1516 | */ | |
1517 | err: | |
1518 | page_cache_release(page); | |
1519 | ||
1520 | return NULL; | |
1521 | } | |
1522 | ||
1523 | int filemap_populate(struct vm_area_struct *vma, unsigned long addr, | |
1524 | unsigned long len, pgprot_t prot, unsigned long pgoff, | |
1525 | int nonblock) | |
1526 | { | |
1527 | struct file *file = vma->vm_file; | |
1528 | struct address_space *mapping = file->f_mapping; | |
1529 | struct inode *inode = mapping->host; | |
1530 | unsigned long size; | |
1531 | struct mm_struct *mm = vma->vm_mm; | |
1532 | struct page *page; | |
1533 | int err; | |
1534 | ||
1535 | if (!nonblock) | |
1536 | force_page_cache_readahead(mapping, vma->vm_file, | |
1537 | pgoff, len >> PAGE_CACHE_SHIFT); | |
1538 | ||
1539 | repeat: | |
1540 | size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | |
1541 | if (pgoff + (len >> PAGE_CACHE_SHIFT) > size) | |
1542 | return -EINVAL; | |
1543 | ||
1544 | page = filemap_getpage(file, pgoff, nonblock); | |
d44ed4f8 PBG |
1545 | |
1546 | /* XXX: This is wrong, a filesystem I/O error may have happened. Fix that as | |
1547 | * done in shmem_populate calling shmem_getpage */ | |
1da177e4 LT |
1548 | if (!page && !nonblock) |
1549 | return -ENOMEM; | |
d44ed4f8 | 1550 | |
1da177e4 LT |
1551 | if (page) { |
1552 | err = install_page(mm, vma, addr, page, prot); | |
1553 | if (err) { | |
1554 | page_cache_release(page); | |
1555 | return err; | |
1556 | } | |
65500d23 | 1557 | } else if (vma->vm_flags & VM_NONLINEAR) { |
d44ed4f8 PBG |
1558 | /* No page was found just because we can't read it in now (being |
1559 | * here implies nonblock != 0), but the page may exist, so set | |
1560 | * the PTE to fault it in later. */ | |
1da177e4 LT |
1561 | err = install_file_pte(mm, vma, addr, pgoff, prot); |
1562 | if (err) | |
1563 | return err; | |
1564 | } | |
1565 | ||
1566 | len -= PAGE_SIZE; | |
1567 | addr += PAGE_SIZE; | |
1568 | pgoff++; | |
1569 | if (len) | |
1570 | goto repeat; | |
1571 | ||
1572 | return 0; | |
1573 | } | |
b1459461 | 1574 | EXPORT_SYMBOL(filemap_populate); |
1da177e4 LT |
1575 | |
1576 | struct vm_operations_struct generic_file_vm_ops = { | |
1577 | .nopage = filemap_nopage, | |
1578 | .populate = filemap_populate, | |
1579 | }; | |
1580 | ||
1581 | /* This is used for a general mmap of a disk file */ | |
1582 | ||
1583 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | |
1584 | { | |
1585 | struct address_space *mapping = file->f_mapping; | |
1586 | ||
1587 | if (!mapping->a_ops->readpage) | |
1588 | return -ENOEXEC; | |
1589 | file_accessed(file); | |
1590 | vma->vm_ops = &generic_file_vm_ops; | |
1591 | return 0; | |
1592 | } | |
1da177e4 LT |
1593 | |
1594 | /* | |
1595 | * This is for filesystems which do not implement ->writepage. | |
1596 | */ | |
1597 | int generic_file_readonly_mmap(struct file *file, struct vm_area_struct *vma) | |
1598 | { | |
1599 | if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) | |
1600 | return -EINVAL; | |
1601 | return generic_file_mmap(file, vma); | |
1602 | } | |
1603 | #else | |
1604 | int generic_file_mmap(struct file * file, struct vm_area_struct * vma) | |
1605 | { | |
1606 | return -ENOSYS; | |
1607 | } | |
1608 | int generic_file_readonly_mmap(struct file * file, struct vm_area_struct * vma) | |
1609 | { | |
1610 | return -ENOSYS; | |
1611 | } | |
1612 | #endif /* CONFIG_MMU */ | |
1613 | ||
1614 | EXPORT_SYMBOL(generic_file_mmap); | |
1615 | EXPORT_SYMBOL(generic_file_readonly_mmap); | |
1616 | ||
1617 | static inline struct page *__read_cache_page(struct address_space *mapping, | |
1618 | unsigned long index, | |
1619 | int (*filler)(void *,struct page*), | |
1620 | void *data) | |
1621 | { | |
1622 | struct page *page, *cached_page = NULL; | |
1623 | int err; | |
1624 | repeat: | |
1625 | page = find_get_page(mapping, index); | |
1626 | if (!page) { | |
1627 | if (!cached_page) { | |
1628 | cached_page = page_cache_alloc_cold(mapping); | |
1629 | if (!cached_page) | |
1630 | return ERR_PTR(-ENOMEM); | |
1631 | } | |
1632 | err = add_to_page_cache_lru(cached_page, mapping, | |
1633 | index, GFP_KERNEL); | |
1634 | if (err == -EEXIST) | |
1635 | goto repeat; | |
1636 | if (err < 0) { | |
1637 | /* Presumably ENOMEM for radix tree node */ | |
1638 | page_cache_release(cached_page); | |
1639 | return ERR_PTR(err); | |
1640 | } | |
1641 | page = cached_page; | |
1642 | cached_page = NULL; | |
1643 | err = filler(data, page); | |
1644 | if (err < 0) { | |
1645 | page_cache_release(page); | |
1646 | page = ERR_PTR(err); | |
1647 | } | |
1648 | } | |
1649 | if (cached_page) | |
1650 | page_cache_release(cached_page); | |
1651 | return page; | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * Read into the page cache. If a page already exists, | |
1656 | * and PageUptodate() is not set, try to fill the page. | |
1657 | */ | |
1658 | struct page *read_cache_page(struct address_space *mapping, | |
1659 | unsigned long index, | |
1660 | int (*filler)(void *,struct page*), | |
1661 | void *data) | |
1662 | { | |
1663 | struct page *page; | |
1664 | int err; | |
1665 | ||
1666 | retry: | |
1667 | page = __read_cache_page(mapping, index, filler, data); | |
1668 | if (IS_ERR(page)) | |
1669 | goto out; | |
1670 | mark_page_accessed(page); | |
1671 | if (PageUptodate(page)) | |
1672 | goto out; | |
1673 | ||
1674 | lock_page(page); | |
1675 | if (!page->mapping) { | |
1676 | unlock_page(page); | |
1677 | page_cache_release(page); | |
1678 | goto retry; | |
1679 | } | |
1680 | if (PageUptodate(page)) { | |
1681 | unlock_page(page); | |
1682 | goto out; | |
1683 | } | |
1684 | err = filler(data, page); | |
1685 | if (err < 0) { | |
1686 | page_cache_release(page); | |
1687 | page = ERR_PTR(err); | |
1688 | } | |
1689 | out: | |
1690 | return page; | |
1691 | } | |
1692 | ||
1693 | EXPORT_SYMBOL(read_cache_page); | |
1694 | ||
1695 | /* | |
1696 | * If the page was newly created, increment its refcount and add it to the | |
1697 | * caller's lru-buffering pagevec. This function is specifically for | |
1698 | * generic_file_write(). | |
1699 | */ | |
1700 | static inline struct page * | |
1701 | __grab_cache_page(struct address_space *mapping, unsigned long index, | |
1702 | struct page **cached_page, struct pagevec *lru_pvec) | |
1703 | { | |
1704 | int err; | |
1705 | struct page *page; | |
1706 | repeat: | |
1707 | page = find_lock_page(mapping, index); | |
1708 | if (!page) { | |
1709 | if (!*cached_page) { | |
1710 | *cached_page = page_cache_alloc(mapping); | |
1711 | if (!*cached_page) | |
1712 | return NULL; | |
1713 | } | |
1714 | err = add_to_page_cache(*cached_page, mapping, | |
1715 | index, GFP_KERNEL); | |
1716 | if (err == -EEXIST) | |
1717 | goto repeat; | |
1718 | if (err == 0) { | |
1719 | page = *cached_page; | |
1720 | page_cache_get(page); | |
1721 | if (!pagevec_add(lru_pvec, page)) | |
1722 | __pagevec_lru_add(lru_pvec); | |
1723 | *cached_page = NULL; | |
1724 | } | |
1725 | } | |
1726 | return page; | |
1727 | } | |
1728 | ||
1729 | /* | |
1730 | * The logic we want is | |
1731 | * | |
1732 | * if suid or (sgid and xgrp) | |
1733 | * remove privs | |
1734 | */ | |
1735 | int remove_suid(struct dentry *dentry) | |
1736 | { | |
1737 | mode_t mode = dentry->d_inode->i_mode; | |
1738 | int kill = 0; | |
1739 | int result = 0; | |
1740 | ||
1741 | /* suid always must be killed */ | |
1742 | if (unlikely(mode & S_ISUID)) | |
1743 | kill = ATTR_KILL_SUID; | |
1744 | ||
1745 | /* | |
1746 | * sgid without any exec bits is just a mandatory locking mark; leave | |
1747 | * it alone. If some exec bits are set, it's a real sgid; kill it. | |
1748 | */ | |
1749 | if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) | |
1750 | kill |= ATTR_KILL_SGID; | |
1751 | ||
1752 | if (unlikely(kill && !capable(CAP_FSETID))) { | |
1753 | struct iattr newattrs; | |
1754 | ||
1755 | newattrs.ia_valid = ATTR_FORCE | kill; | |
1756 | result = notify_change(dentry, &newattrs); | |
1757 | } | |
1758 | return result; | |
1759 | } | |
1760 | EXPORT_SYMBOL(remove_suid); | |
1761 | ||
ceffc078 | 1762 | size_t |
1da177e4 LT |
1763 | __filemap_copy_from_user_iovec(char *vaddr, |
1764 | const struct iovec *iov, size_t base, size_t bytes) | |
1765 | { | |
1766 | size_t copied = 0, left = 0; | |
1767 | ||
1768 | while (bytes) { | |
1769 | char __user *buf = iov->iov_base + base; | |
1770 | int copy = min(bytes, iov->iov_len - base); | |
1771 | ||
1772 | base = 0; | |
1773 | left = __copy_from_user_inatomic(vaddr, buf, copy); | |
1774 | copied += copy; | |
1775 | bytes -= copy; | |
1776 | vaddr += copy; | |
1777 | iov++; | |
1778 | ||
1779 | if (unlikely(left)) { | |
1780 | /* zero the rest of the target like __copy_from_user */ | |
1781 | if (bytes) | |
1782 | memset(vaddr, 0, bytes); | |
1783 | break; | |
1784 | } | |
1785 | } | |
1786 | return copied - left; | |
1787 | } | |
1788 | ||
1da177e4 LT |
1789 | /* |
1790 | * Performs necessary checks before doing a write | |
1791 | * | |
1792 | * Can adjust writing position aor amount of bytes to write. | |
1793 | * Returns appropriate error code that caller should return or | |
1794 | * zero in case that write should be allowed. | |
1795 | */ | |
1796 | inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, int isblk) | |
1797 | { | |
1798 | struct inode *inode = file->f_mapping->host; | |
1799 | unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; | |
1800 | ||
1801 | if (unlikely(*pos < 0)) | |
1802 | return -EINVAL; | |
1803 | ||
1da177e4 LT |
1804 | if (!isblk) { |
1805 | /* FIXME: this is for backwards compatibility with 2.4 */ | |
1806 | if (file->f_flags & O_APPEND) | |
1807 | *pos = i_size_read(inode); | |
1808 | ||
1809 | if (limit != RLIM_INFINITY) { | |
1810 | if (*pos >= limit) { | |
1811 | send_sig(SIGXFSZ, current, 0); | |
1812 | return -EFBIG; | |
1813 | } | |
1814 | if (*count > limit - (typeof(limit))*pos) { | |
1815 | *count = limit - (typeof(limit))*pos; | |
1816 | } | |
1817 | } | |
1818 | } | |
1819 | ||
1820 | /* | |
1821 | * LFS rule | |
1822 | */ | |
1823 | if (unlikely(*pos + *count > MAX_NON_LFS && | |
1824 | !(file->f_flags & O_LARGEFILE))) { | |
1825 | if (*pos >= MAX_NON_LFS) { | |
1826 | send_sig(SIGXFSZ, current, 0); | |
1827 | return -EFBIG; | |
1828 | } | |
1829 | if (*count > MAX_NON_LFS - (unsigned long)*pos) { | |
1830 | *count = MAX_NON_LFS - (unsigned long)*pos; | |
1831 | } | |
1832 | } | |
1833 | ||
1834 | /* | |
1835 | * Are we about to exceed the fs block limit ? | |
1836 | * | |
1837 | * If we have written data it becomes a short write. If we have | |
1838 | * exceeded without writing data we send a signal and return EFBIG. | |
1839 | * Linus frestrict idea will clean these up nicely.. | |
1840 | */ | |
1841 | if (likely(!isblk)) { | |
1842 | if (unlikely(*pos >= inode->i_sb->s_maxbytes)) { | |
1843 | if (*count || *pos > inode->i_sb->s_maxbytes) { | |
1844 | send_sig(SIGXFSZ, current, 0); | |
1845 | return -EFBIG; | |
1846 | } | |
1847 | /* zero-length writes at ->s_maxbytes are OK */ | |
1848 | } | |
1849 | ||
1850 | if (unlikely(*pos + *count > inode->i_sb->s_maxbytes)) | |
1851 | *count = inode->i_sb->s_maxbytes - *pos; | |
1852 | } else { | |
1853 | loff_t isize; | |
1854 | if (bdev_read_only(I_BDEV(inode))) | |
1855 | return -EPERM; | |
1856 | isize = i_size_read(inode); | |
1857 | if (*pos >= isize) { | |
1858 | if (*count || *pos > isize) | |
1859 | return -ENOSPC; | |
1860 | } | |
1861 | ||
1862 | if (*pos + *count > isize) | |
1863 | *count = isize - *pos; | |
1864 | } | |
1865 | return 0; | |
1866 | } | |
1867 | EXPORT_SYMBOL(generic_write_checks); | |
1868 | ||
1869 | ssize_t | |
1870 | generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | |
1871 | unsigned long *nr_segs, loff_t pos, loff_t *ppos, | |
1872 | size_t count, size_t ocount) | |
1873 | { | |
1874 | struct file *file = iocb->ki_filp; | |
1875 | struct address_space *mapping = file->f_mapping; | |
1876 | struct inode *inode = mapping->host; | |
1877 | ssize_t written; | |
1878 | ||
1879 | if (count != ocount) | |
1880 | *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count); | |
1881 | ||
1882 | written = generic_file_direct_IO(WRITE, iocb, iov, pos, *nr_segs); | |
1883 | if (written > 0) { | |
1884 | loff_t end = pos + written; | |
1885 | if (end > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { | |
1886 | i_size_write(inode, end); | |
1887 | mark_inode_dirty(inode); | |
1888 | } | |
1889 | *ppos = end; | |
1890 | } | |
1891 | ||
1892 | /* | |
1893 | * Sync the fs metadata but not the minor inode changes and | |
1894 | * of course not the data as we did direct DMA for the IO. | |
1b1dcc1b | 1895 | * i_mutex is held, which protects generic_osync_inode() from |
1da177e4 LT |
1896 | * livelocking. |
1897 | */ | |
1e8a81c5 HH |
1898 | if (written >= 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { |
1899 | int err = generic_osync_inode(inode, mapping, OSYNC_METADATA); | |
1900 | if (err < 0) | |
1901 | written = err; | |
1902 | } | |
1da177e4 LT |
1903 | if (written == count && !is_sync_kiocb(iocb)) |
1904 | written = -EIOCBQUEUED; | |
1905 | return written; | |
1906 | } | |
1907 | EXPORT_SYMBOL(generic_file_direct_write); | |
1908 | ||
1909 | ssize_t | |
1910 | generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov, | |
1911 | unsigned long nr_segs, loff_t pos, loff_t *ppos, | |
1912 | size_t count, ssize_t written) | |
1913 | { | |
1914 | struct file *file = iocb->ki_filp; | |
1915 | struct address_space * mapping = file->f_mapping; | |
1916 | struct address_space_operations *a_ops = mapping->a_ops; | |
1917 | struct inode *inode = mapping->host; | |
1918 | long status = 0; | |
1919 | struct page *page; | |
1920 | struct page *cached_page = NULL; | |
1921 | size_t bytes; | |
1922 | struct pagevec lru_pvec; | |
1923 | const struct iovec *cur_iov = iov; /* current iovec */ | |
1924 | size_t iov_base = 0; /* offset in the current iovec */ | |
1925 | char __user *buf; | |
1926 | ||
1927 | pagevec_init(&lru_pvec, 0); | |
1928 | ||
1929 | /* | |
1930 | * handle partial DIO write. Adjust cur_iov if needed. | |
1931 | */ | |
1932 | if (likely(nr_segs == 1)) | |
1933 | buf = iov->iov_base + written; | |
1934 | else { | |
1935 | filemap_set_next_iovec(&cur_iov, &iov_base, written); | |
f021e921 | 1936 | buf = cur_iov->iov_base + iov_base; |
1da177e4 LT |
1937 | } |
1938 | ||
1939 | do { | |
1940 | unsigned long index; | |
1941 | unsigned long offset; | |
a5117181 | 1942 | unsigned long maxlen; |
1da177e4 LT |
1943 | size_t copied; |
1944 | ||
1945 | offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */ | |
1946 | index = pos >> PAGE_CACHE_SHIFT; | |
1947 | bytes = PAGE_CACHE_SIZE - offset; | |
1948 | if (bytes > count) | |
1949 | bytes = count; | |
1950 | ||
1951 | /* | |
1952 | * Bring in the user page that we will copy from _first_. | |
1953 | * Otherwise there's a nasty deadlock on copying from the | |
1954 | * same page as we're writing to, without it being marked | |
1955 | * up-to-date. | |
1956 | */ | |
a5117181 MS |
1957 | maxlen = cur_iov->iov_len - iov_base; |
1958 | if (maxlen > bytes) | |
1959 | maxlen = bytes; | |
1960 | fault_in_pages_readable(buf, maxlen); | |
1da177e4 LT |
1961 | |
1962 | page = __grab_cache_page(mapping,index,&cached_page,&lru_pvec); | |
1963 | if (!page) { | |
1964 | status = -ENOMEM; | |
1965 | break; | |
1966 | } | |
1967 | ||
1968 | status = a_ops->prepare_write(file, page, offset, offset+bytes); | |
1969 | if (unlikely(status)) { | |
1970 | loff_t isize = i_size_read(inode); | |
994fc28c ZB |
1971 | |
1972 | if (status != AOP_TRUNCATED_PAGE) | |
1973 | unlock_page(page); | |
1974 | page_cache_release(page); | |
1975 | if (status == AOP_TRUNCATED_PAGE) | |
1976 | continue; | |
1da177e4 LT |
1977 | /* |
1978 | * prepare_write() may have instantiated a few blocks | |
1979 | * outside i_size. Trim these off again. | |
1980 | */ | |
1da177e4 LT |
1981 | if (pos + bytes > isize) |
1982 | vmtruncate(inode, isize); | |
1983 | break; | |
1984 | } | |
1985 | if (likely(nr_segs == 1)) | |
1986 | copied = filemap_copy_from_user(page, offset, | |
1987 | buf, bytes); | |
1988 | else | |
1989 | copied = filemap_copy_from_user_iovec(page, offset, | |
1990 | cur_iov, iov_base, bytes); | |
1991 | flush_dcache_page(page); | |
1992 | status = a_ops->commit_write(file, page, offset, offset+bytes); | |
994fc28c ZB |
1993 | if (status == AOP_TRUNCATED_PAGE) { |
1994 | page_cache_release(page); | |
1995 | continue; | |
1996 | } | |
1da177e4 LT |
1997 | if (likely(copied > 0)) { |
1998 | if (!status) | |
1999 | status = copied; | |
2000 | ||
2001 | if (status >= 0) { | |
2002 | written += status; | |
2003 | count -= status; | |
2004 | pos += status; | |
2005 | buf += status; | |
f021e921 | 2006 | if (unlikely(nr_segs > 1)) { |
1da177e4 LT |
2007 | filemap_set_next_iovec(&cur_iov, |
2008 | &iov_base, status); | |
b0cfbd99 BP |
2009 | if (count) |
2010 | buf = cur_iov->iov_base + | |
2011 | iov_base; | |
a5117181 MS |
2012 | } else { |
2013 | iov_base += status; | |
f021e921 | 2014 | } |
1da177e4 LT |
2015 | } |
2016 | } | |
2017 | if (unlikely(copied != bytes)) | |
2018 | if (status >= 0) | |
2019 | status = -EFAULT; | |
2020 | unlock_page(page); | |
2021 | mark_page_accessed(page); | |
2022 | page_cache_release(page); | |
2023 | if (status < 0) | |
2024 | break; | |
2025 | balance_dirty_pages_ratelimited(mapping); | |
2026 | cond_resched(); | |
2027 | } while (count); | |
2028 | *ppos = pos; | |
2029 | ||
2030 | if (cached_page) | |
2031 | page_cache_release(cached_page); | |
2032 | ||
2033 | /* | |
2034 | * For now, when the user asks for O_SYNC, we'll actually give O_DSYNC | |
2035 | */ | |
2036 | if (likely(status >= 0)) { | |
2037 | if (unlikely((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
2038 | if (!a_ops->writepage || !is_sync_kiocb(iocb)) | |
2039 | status = generic_osync_inode(inode, mapping, | |
2040 | OSYNC_METADATA|OSYNC_DATA); | |
2041 | } | |
2042 | } | |
2043 | ||
2044 | /* | |
2045 | * If we get here for O_DIRECT writes then we must have fallen through | |
2046 | * to buffered writes (block instantiation inside i_size). So we sync | |
2047 | * the file data here, to try to honour O_DIRECT expectations. | |
2048 | */ | |
2049 | if (unlikely(file->f_flags & O_DIRECT) && written) | |
2050 | status = filemap_write_and_wait(mapping); | |
2051 | ||
2052 | pagevec_lru_add(&lru_pvec); | |
2053 | return written ? written : status; | |
2054 | } | |
2055 | EXPORT_SYMBOL(generic_file_buffered_write); | |
2056 | ||
5ce7852c | 2057 | static ssize_t |
1da177e4 LT |
2058 | __generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, |
2059 | unsigned long nr_segs, loff_t *ppos) | |
2060 | { | |
2061 | struct file *file = iocb->ki_filp; | |
2062 | struct address_space * mapping = file->f_mapping; | |
2063 | size_t ocount; /* original count */ | |
2064 | size_t count; /* after file limit checks */ | |
2065 | struct inode *inode = mapping->host; | |
2066 | unsigned long seg; | |
2067 | loff_t pos; | |
2068 | ssize_t written; | |
2069 | ssize_t err; | |
2070 | ||
2071 | ocount = 0; | |
2072 | for (seg = 0; seg < nr_segs; seg++) { | |
2073 | const struct iovec *iv = &iov[seg]; | |
2074 | ||
2075 | /* | |
2076 | * If any segment has a negative length, or the cumulative | |
2077 | * length ever wraps negative then return -EINVAL. | |
2078 | */ | |
2079 | ocount += iv->iov_len; | |
2080 | if (unlikely((ssize_t)(ocount|iv->iov_len) < 0)) | |
2081 | return -EINVAL; | |
2082 | if (access_ok(VERIFY_READ, iv->iov_base, iv->iov_len)) | |
2083 | continue; | |
2084 | if (seg == 0) | |
2085 | return -EFAULT; | |
2086 | nr_segs = seg; | |
2087 | ocount -= iv->iov_len; /* This segment is no good */ | |
2088 | break; | |
2089 | } | |
2090 | ||
2091 | count = ocount; | |
2092 | pos = *ppos; | |
2093 | ||
2094 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | |
2095 | ||
2096 | /* We can write back this queue in page reclaim */ | |
2097 | current->backing_dev_info = mapping->backing_dev_info; | |
2098 | written = 0; | |
2099 | ||
2100 | err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); | |
2101 | if (err) | |
2102 | goto out; | |
2103 | ||
2104 | if (count == 0) | |
2105 | goto out; | |
2106 | ||
2107 | err = remove_suid(file->f_dentry); | |
2108 | if (err) | |
2109 | goto out; | |
2110 | ||
870f4817 | 2111 | file_update_time(file); |
1da177e4 LT |
2112 | |
2113 | /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ | |
2114 | if (unlikely(file->f_flags & O_DIRECT)) { | |
2115 | written = generic_file_direct_write(iocb, iov, | |
2116 | &nr_segs, pos, ppos, count, ocount); | |
2117 | if (written < 0 || written == count) | |
2118 | goto out; | |
2119 | /* | |
2120 | * direct-io write to a hole: fall through to buffered I/O | |
2121 | * for completing the rest of the request. | |
2122 | */ | |
2123 | pos += written; | |
2124 | count -= written; | |
2125 | } | |
2126 | ||
2127 | written = generic_file_buffered_write(iocb, iov, nr_segs, | |
2128 | pos, ppos, count, written); | |
2129 | out: | |
2130 | current->backing_dev_info = NULL; | |
2131 | return written ? written : err; | |
2132 | } | |
2133 | EXPORT_SYMBOL(generic_file_aio_write_nolock); | |
2134 | ||
2135 | ssize_t | |
2136 | generic_file_aio_write_nolock(struct kiocb *iocb, const struct iovec *iov, | |
2137 | unsigned long nr_segs, loff_t *ppos) | |
2138 | { | |
2139 | struct file *file = iocb->ki_filp; | |
2140 | struct address_space *mapping = file->f_mapping; | |
2141 | struct inode *inode = mapping->host; | |
2142 | ssize_t ret; | |
2143 | loff_t pos = *ppos; | |
2144 | ||
2145 | ret = __generic_file_aio_write_nolock(iocb, iov, nr_segs, ppos); | |
2146 | ||
2147 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
2148 | int err; | |
2149 | ||
2150 | err = sync_page_range_nolock(inode, mapping, pos, ret); | |
2151 | if (err < 0) | |
2152 | ret = err; | |
2153 | } | |
2154 | return ret; | |
2155 | } | |
2156 | ||
5ce7852c | 2157 | static ssize_t |
1da177e4 LT |
2158 | __generic_file_write_nolock(struct file *file, const struct iovec *iov, |
2159 | unsigned long nr_segs, loff_t *ppos) | |
2160 | { | |
2161 | struct kiocb kiocb; | |
2162 | ssize_t ret; | |
2163 | ||
2164 | init_sync_kiocb(&kiocb, file); | |
2165 | ret = __generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); | |
2166 | if (ret == -EIOCBQUEUED) | |
2167 | ret = wait_on_sync_kiocb(&kiocb); | |
2168 | return ret; | |
2169 | } | |
2170 | ||
2171 | ssize_t | |
2172 | generic_file_write_nolock(struct file *file, const struct iovec *iov, | |
2173 | unsigned long nr_segs, loff_t *ppos) | |
2174 | { | |
2175 | struct kiocb kiocb; | |
2176 | ssize_t ret; | |
2177 | ||
2178 | init_sync_kiocb(&kiocb, file); | |
2179 | ret = generic_file_aio_write_nolock(&kiocb, iov, nr_segs, ppos); | |
2180 | if (-EIOCBQUEUED == ret) | |
2181 | ret = wait_on_sync_kiocb(&kiocb); | |
2182 | return ret; | |
2183 | } | |
2184 | EXPORT_SYMBOL(generic_file_write_nolock); | |
2185 | ||
2186 | ssize_t generic_file_aio_write(struct kiocb *iocb, const char __user *buf, | |
2187 | size_t count, loff_t pos) | |
2188 | { | |
2189 | struct file *file = iocb->ki_filp; | |
2190 | struct address_space *mapping = file->f_mapping; | |
2191 | struct inode *inode = mapping->host; | |
2192 | ssize_t ret; | |
2193 | struct iovec local_iov = { .iov_base = (void __user *)buf, | |
2194 | .iov_len = count }; | |
2195 | ||
2196 | BUG_ON(iocb->ki_pos != pos); | |
2197 | ||
1b1dcc1b | 2198 | mutex_lock(&inode->i_mutex); |
1da177e4 LT |
2199 | ret = __generic_file_aio_write_nolock(iocb, &local_iov, 1, |
2200 | &iocb->ki_pos); | |
1b1dcc1b | 2201 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
2202 | |
2203 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
2204 | ssize_t err; | |
2205 | ||
2206 | err = sync_page_range(inode, mapping, pos, ret); | |
2207 | if (err < 0) | |
2208 | ret = err; | |
2209 | } | |
2210 | return ret; | |
2211 | } | |
2212 | EXPORT_SYMBOL(generic_file_aio_write); | |
2213 | ||
2214 | ssize_t generic_file_write(struct file *file, const char __user *buf, | |
2215 | size_t count, loff_t *ppos) | |
2216 | { | |
2217 | struct address_space *mapping = file->f_mapping; | |
2218 | struct inode *inode = mapping->host; | |
2219 | ssize_t ret; | |
2220 | struct iovec local_iov = { .iov_base = (void __user *)buf, | |
2221 | .iov_len = count }; | |
2222 | ||
1b1dcc1b | 2223 | mutex_lock(&inode->i_mutex); |
1da177e4 | 2224 | ret = __generic_file_write_nolock(file, &local_iov, 1, ppos); |
1b1dcc1b | 2225 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
2226 | |
2227 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
2228 | ssize_t err; | |
2229 | ||
2230 | err = sync_page_range(inode, mapping, *ppos - ret, ret); | |
2231 | if (err < 0) | |
2232 | ret = err; | |
2233 | } | |
2234 | return ret; | |
2235 | } | |
2236 | EXPORT_SYMBOL(generic_file_write); | |
2237 | ||
2238 | ssize_t generic_file_readv(struct file *filp, const struct iovec *iov, | |
2239 | unsigned long nr_segs, loff_t *ppos) | |
2240 | { | |
2241 | struct kiocb kiocb; | |
2242 | ssize_t ret; | |
2243 | ||
2244 | init_sync_kiocb(&kiocb, filp); | |
2245 | ret = __generic_file_aio_read(&kiocb, iov, nr_segs, ppos); | |
2246 | if (-EIOCBQUEUED == ret) | |
2247 | ret = wait_on_sync_kiocb(&kiocb); | |
2248 | return ret; | |
2249 | } | |
2250 | EXPORT_SYMBOL(generic_file_readv); | |
2251 | ||
2252 | ssize_t generic_file_writev(struct file *file, const struct iovec *iov, | |
2253 | unsigned long nr_segs, loff_t *ppos) | |
2254 | { | |
2255 | struct address_space *mapping = file->f_mapping; | |
2256 | struct inode *inode = mapping->host; | |
2257 | ssize_t ret; | |
2258 | ||
1b1dcc1b | 2259 | mutex_lock(&inode->i_mutex); |
1da177e4 | 2260 | ret = __generic_file_write_nolock(file, iov, nr_segs, ppos); |
1b1dcc1b | 2261 | mutex_unlock(&inode->i_mutex); |
1da177e4 LT |
2262 | |
2263 | if (ret > 0 && ((file->f_flags & O_SYNC) || IS_SYNC(inode))) { | |
2264 | int err; | |
2265 | ||
2266 | err = sync_page_range(inode, mapping, *ppos - ret, ret); | |
2267 | if (err < 0) | |
2268 | ret = err; | |
2269 | } | |
2270 | return ret; | |
2271 | } | |
2272 | EXPORT_SYMBOL(generic_file_writev); | |
2273 | ||
2274 | /* | |
1b1dcc1b | 2275 | * Called under i_mutex for writes to S_ISREG files. Returns -EIO if something |
1da177e4 LT |
2276 | * went wrong during pagecache shootdown. |
2277 | */ | |
5ce7852c | 2278 | static ssize_t |
1da177e4 LT |
2279 | generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
2280 | loff_t offset, unsigned long nr_segs) | |
2281 | { | |
2282 | struct file *file = iocb->ki_filp; | |
2283 | struct address_space *mapping = file->f_mapping; | |
2284 | ssize_t retval; | |
2285 | size_t write_len = 0; | |
2286 | ||
2287 | /* | |
2288 | * If it's a write, unmap all mmappings of the file up-front. This | |
2289 | * will cause any pte dirty bits to be propagated into the pageframes | |
2290 | * for the subsequent filemap_write_and_wait(). | |
2291 | */ | |
2292 | if (rw == WRITE) { | |
2293 | write_len = iov_length(iov, nr_segs); | |
2294 | if (mapping_mapped(mapping)) | |
2295 | unmap_mapping_range(mapping, offset, write_len, 0); | |
2296 | } | |
2297 | ||
2298 | retval = filemap_write_and_wait(mapping); | |
2299 | if (retval == 0) { | |
2300 | retval = mapping->a_ops->direct_IO(rw, iocb, iov, | |
2301 | offset, nr_segs); | |
2302 | if (rw == WRITE && mapping->nrpages) { | |
2303 | pgoff_t end = (offset + write_len - 1) | |
2304 | >> PAGE_CACHE_SHIFT; | |
2305 | int err = invalidate_inode_pages2_range(mapping, | |
2306 | offset >> PAGE_CACHE_SHIFT, end); | |
2307 | if (err) | |
2308 | retval = err; | |
2309 | } | |
2310 | } | |
2311 | return retval; | |
2312 | } |