]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
1da177e4 | 21 | /* |
9c5d760b | 22 | * Bits in mapping->flags. |
1da177e4 | 23 | */ |
9a896c9a | 24 | enum mapping_flags { |
9c5d760b MH |
25 | AS_EIO = 0, /* IO error on async write */ |
26 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
27 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
28 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
29 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 30 | /* writeback related tags are not used */ |
9c5d760b | 31 | AS_NO_WRITEBACK_TAGS = 5, |
9a896c9a | 32 | }; |
1da177e4 | 33 | |
8ed1e46a JL |
34 | /** |
35 | * mapping_set_error - record a writeback error in the address_space | |
36 | * @mapping - the mapping in which an error should be set | |
37 | * @error - the error to set in the mapping | |
38 | * | |
39 | * When writeback fails in some way, we must record that error so that | |
40 | * userspace can be informed when fsync and the like are called. We endeavor | |
41 | * to report errors on any file that was open at the time of the error. Some | |
42 | * internal callers also need to know when writeback errors have occurred. | |
43 | * | |
44 | * When a writeback error occurs, most filesystems will want to call | |
45 | * mapping_set_error to record the error in the mapping so that it can be | |
46 | * reported when the application calls fsync(2). | |
47 | */ | |
3e9f45bd GC |
48 | static inline void mapping_set_error(struct address_space *mapping, int error) |
49 | { | |
8ed1e46a JL |
50 | if (likely(!error)) |
51 | return; | |
52 | ||
53 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
54 | filemap_set_wb_err(mapping, error); | |
55 | ||
56 | /* Record it in flags for now, for legacy callers */ | |
57 | if (error == -ENOSPC) | |
58 | set_bit(AS_ENOSPC, &mapping->flags); | |
59 | else | |
60 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
61 | } |
62 | ||
ba9ddf49 LS |
63 | static inline void mapping_set_unevictable(struct address_space *mapping) |
64 | { | |
65 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
66 | } | |
67 | ||
89e004ea LS |
68 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
69 | { | |
70 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
71 | } | |
72 | ||
ba9ddf49 LS |
73 | static inline int mapping_unevictable(struct address_space *mapping) |
74 | { | |
088e5465 | 75 | if (mapping) |
89e004ea LS |
76 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
77 | return !!mapping; | |
ba9ddf49 | 78 | } |
ba9ddf49 | 79 | |
91b0abe3 JW |
80 | static inline void mapping_set_exiting(struct address_space *mapping) |
81 | { | |
82 | set_bit(AS_EXITING, &mapping->flags); | |
83 | } | |
84 | ||
85 | static inline int mapping_exiting(struct address_space *mapping) | |
86 | { | |
87 | return test_bit(AS_EXITING, &mapping->flags); | |
88 | } | |
89 | ||
371a096e HY |
90 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
91 | { | |
92 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
93 | } | |
94 | ||
95 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
96 | { | |
97 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
98 | } | |
99 | ||
dd0fc66f | 100 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 101 | { |
9c5d760b | 102 | return mapping->gfp_mask; |
1da177e4 LT |
103 | } |
104 | ||
c62d2555 MH |
105 | /* Restricts the given gfp_mask to what the mapping allows. */ |
106 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
107 | gfp_t gfp_mask) | |
108 | { | |
109 | return mapping_gfp_mask(mapping) & gfp_mask; | |
110 | } | |
111 | ||
1da177e4 LT |
112 | /* |
113 | * This is non-atomic. Only to be used before the mapping is activated. | |
114 | * Probably needs a barrier... | |
115 | */ | |
260b2367 | 116 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 117 | { |
9c5d760b | 118 | m->gfp_mask = mask; |
1da177e4 LT |
119 | } |
120 | ||
c6f92f9f | 121 | void release_pages(struct page **pages, int nr); |
1da177e4 | 122 | |
e286781d NP |
123 | /* |
124 | * speculatively take a reference to a page. | |
0139aa7b JK |
125 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
126 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
127 | * |
128 | * This function must be called inside the same rcu_read_lock() section as has | |
129 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 130 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
131 | * |
132 | * Unless an RCU grace period has passed, the count of all pages coming out | |
133 | * of the allocator must be considered unstable. page_count may return higher | |
134 | * than expected, and put_page must be able to do the right thing when the | |
135 | * page has been finished with, no matter what it is subsequently allocated | |
136 | * for (because put_page is what is used here to drop an invalid speculative | |
137 | * reference). | |
138 | * | |
139 | * This is the interesting part of the lockless pagecache (and lockless | |
140 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
141 | * has the following pattern: | |
142 | * 1. find page in radix tree | |
143 | * 2. conditionally increment refcount | |
144 | * 3. check the page is still in pagecache (if no, goto 1) | |
145 | * | |
0139aa7b | 146 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
e286781d NP |
147 | * following (with tree_lock held for write): |
148 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
149 | * B. remove page from pagecache | |
150 | * C. free the page | |
151 | * | |
152 | * There are 2 critical interleavings that matter: | |
153 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
154 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
155 | * subsequently, B will complete and 1 will find no page, causing the | |
156 | * lookup to return NULL. | |
157 | * | |
158 | * It is possible that between 1 and 2, the page is removed then the exact same | |
159 | * page is inserted into the same position in pagecache. That's OK: the | |
160 | * old find_get_page using tree_lock could equally have run before or after | |
161 | * such a re-insertion, depending on order that locks are granted. | |
162 | * | |
163 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
164 | * will find the page or it will not. Likewise, the old find_get_page could run | |
165 | * either before the insertion or afterwards, depending on timing. | |
166 | */ | |
167 | static inline int page_cache_get_speculative(struct page *page) | |
168 | { | |
8375ad98 | 169 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 170 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 171 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
172 | # endif |
173 | /* | |
174 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
175 | * this for us. | |
176 | * | |
177 | * Pagecache won't be truncated from interrupt context, so if we have | |
178 | * found a page in the radix tree here, we have pinned its refcount by | |
179 | * disabling preempt, and hence no need for the "speculative get" that | |
180 | * SMP requires. | |
181 | */ | |
309381fe | 182 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 183 | page_ref_inc(page); |
e286781d NP |
184 | |
185 | #else | |
186 | if (unlikely(!get_page_unless_zero(page))) { | |
187 | /* | |
188 | * Either the page has been freed, or will be freed. | |
189 | * In either case, retry here and the caller should | |
190 | * do the right thing (see comments above). | |
191 | */ | |
192 | return 0; | |
193 | } | |
194 | #endif | |
309381fe | 195 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
196 | |
197 | return 1; | |
198 | } | |
199 | ||
ce0ad7f0 NP |
200 | /* |
201 | * Same as above, but add instead of inc (could just be merged) | |
202 | */ | |
203 | static inline int page_cache_add_speculative(struct page *page, int count) | |
204 | { | |
205 | VM_BUG_ON(in_interrupt()); | |
206 | ||
b560d8ad | 207 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 208 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 209 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
ce0ad7f0 | 210 | # endif |
309381fe | 211 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 212 | page_ref_add(page, count); |
ce0ad7f0 NP |
213 | |
214 | #else | |
fe896d18 | 215 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
ce0ad7f0 NP |
216 | return 0; |
217 | #endif | |
309381fe | 218 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
219 | |
220 | return 1; | |
221 | } | |
222 | ||
44110fe3 | 223 | #ifdef CONFIG_NUMA |
2ae88149 | 224 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 225 | #else |
2ae88149 NP |
226 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
227 | { | |
228 | return alloc_pages(gfp, 0); | |
229 | } | |
230 | #endif | |
231 | ||
1da177e4 LT |
232 | static inline struct page *page_cache_alloc(struct address_space *x) |
233 | { | |
2ae88149 | 234 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
235 | } |
236 | ||
8a5c743e | 237 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 238 | { |
453f85d4 | 239 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
240 | } |
241 | ||
1da177e4 LT |
242 | typedef int filler_t(void *, struct page *); |
243 | ||
e7b563bb JW |
244 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
245 | pgoff_t index, unsigned long max_scan); | |
246 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
247 | pgoff_t index, unsigned long max_scan); | |
248 | ||
2457aec6 MG |
249 | #define FGP_ACCESSED 0x00000001 |
250 | #define FGP_LOCK 0x00000002 | |
251 | #define FGP_CREAT 0x00000004 | |
252 | #define FGP_WRITE 0x00000008 | |
253 | #define FGP_NOFS 0x00000010 | |
254 | #define FGP_NOWAIT 0x00000020 | |
255 | ||
256 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 257 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
258 | |
259 | /** | |
260 | * find_get_page - find and get a page reference | |
261 | * @mapping: the address_space to search | |
262 | * @offset: the page index | |
263 | * | |
264 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
265 | * page cache page, it is returned with an increased refcount. | |
266 | * | |
267 | * Otherwise, %NULL is returned. | |
268 | */ | |
269 | static inline struct page *find_get_page(struct address_space *mapping, | |
270 | pgoff_t offset) | |
271 | { | |
45f87de5 | 272 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
273 | } |
274 | ||
275 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
276 | pgoff_t offset, int fgp_flags) | |
277 | { | |
45f87de5 | 278 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
279 | } |
280 | ||
281 | /** | |
282 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 MG |
283 | * @mapping: the address_space to search |
284 | * @offset: the page index | |
285 | * | |
286 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
287 | * page cache page, it is returned locked and with an increased | |
288 | * refcount. | |
289 | * | |
290 | * Otherwise, %NULL is returned. | |
291 | * | |
292 | * find_lock_page() may sleep. | |
293 | */ | |
294 | static inline struct page *find_lock_page(struct address_space *mapping, | |
295 | pgoff_t offset) | |
296 | { | |
45f87de5 | 297 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
298 | } |
299 | ||
300 | /** | |
301 | * find_or_create_page - locate or add a pagecache page | |
302 | * @mapping: the page's address_space | |
303 | * @index: the page's index into the mapping | |
304 | * @gfp_mask: page allocation mode | |
305 | * | |
306 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
307 | * page cache page, it is returned locked and with an increased | |
308 | * refcount. | |
309 | * | |
310 | * If the page is not present, a new page is allocated using @gfp_mask | |
311 | * and added to the page cache and the VM's LRU list. The page is | |
312 | * returned locked and with an increased refcount. | |
313 | * | |
314 | * On memory exhaustion, %NULL is returned. | |
315 | * | |
316 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
317 | * atomic allocation! | |
318 | */ | |
319 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
320 | pgoff_t offset, gfp_t gfp_mask) | |
321 | { | |
322 | return pagecache_get_page(mapping, offset, | |
323 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
45f87de5 | 324 | gfp_mask); |
2457aec6 MG |
325 | } |
326 | ||
327 | /** | |
328 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
329 | * @mapping: target address_space | |
330 | * @index: the page index | |
331 | * | |
332 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
333 | * This is intended for speculative data generators, where the data can | |
334 | * be regenerated if the page couldn't be grabbed. This routine should | |
335 | * be safe to call while holding the lock for another page. | |
336 | * | |
337 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
338 | * and deadlock against the caller's locked page. | |
339 | */ | |
340 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
341 | pgoff_t index) | |
342 | { | |
343 | return pagecache_get_page(mapping, index, | |
344 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 345 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
346 | } |
347 | ||
0cd6144a | 348 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 349 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
350 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
351 | unsigned int nr_entries, struct page **entries, | |
352 | pgoff_t *indices); | |
b947cee4 JK |
353 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
354 | pgoff_t end, unsigned int nr_pages, | |
355 | struct page **pages); | |
356 | static inline unsigned find_get_pages(struct address_space *mapping, | |
357 | pgoff_t *start, unsigned int nr_pages, | |
358 | struct page **pages) | |
359 | { | |
360 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
361 | pages); | |
362 | } | |
ebf43500 JA |
363 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
364 | unsigned int nr_pages, struct page **pages); | |
72b045ae JK |
365 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
366 | pgoff_t end, int tag, unsigned int nr_pages, | |
367 | struct page **pages); | |
368 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
369 | pgoff_t *index, int tag, unsigned int nr_pages, | |
370 | struct page **pages) | |
371 | { | |
372 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
373 | nr_pages, pages); | |
374 | } | |
7e7f7749 RZ |
375 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
376 | int tag, unsigned int nr_entries, | |
377 | struct page **entries, pgoff_t *indices); | |
1da177e4 | 378 | |
54566b2c NP |
379 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
380 | pgoff_t index, unsigned flags); | |
afddba49 | 381 | |
1da177e4 LT |
382 | /* |
383 | * Returns locked page at given index in given cache, creating it if needed. | |
384 | */ | |
57f6b96c FW |
385 | static inline struct page *grab_cache_page(struct address_space *mapping, |
386 | pgoff_t index) | |
1da177e4 LT |
387 | { |
388 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
389 | } | |
390 | ||
1da177e4 | 391 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 392 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
393 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
394 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
395 | extern int read_cache_pages(struct address_space *mapping, |
396 | struct list_head *pages, filler_t *filler, void *data); | |
397 | ||
090d2b18 | 398 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 399 | pgoff_t index, void *data) |
090d2b18 PE |
400 | { |
401 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
402 | return read_cache_page(mapping, index, filler, data); | |
403 | } | |
404 | ||
a0f7a756 | 405 | /* |
5cbc198a KS |
406 | * Get index of the page with in radix-tree |
407 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 408 | */ |
5cbc198a | 409 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 410 | { |
e9b61f19 KS |
411 | pgoff_t pgoff; |
412 | ||
e9b61f19 | 413 | if (likely(!PageTransTail(page))) |
09cbfeaf | 414 | return page->index; |
e9b61f19 KS |
415 | |
416 | /* | |
417 | * We don't initialize ->index for tail pages: calculate based on | |
418 | * head page | |
419 | */ | |
09cbfeaf | 420 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
421 | pgoff += page - compound_head(page); |
422 | return pgoff; | |
a0f7a756 NH |
423 | } |
424 | ||
5cbc198a KS |
425 | /* |
426 | * Get the offset in PAGE_SIZE. | |
427 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
428 | */ | |
429 | static inline pgoff_t page_to_pgoff(struct page *page) | |
430 | { | |
431 | if (unlikely(PageHeadHuge(page))) | |
432 | return page->index << compound_order(page); | |
433 | ||
434 | return page_to_index(page); | |
435 | } | |
436 | ||
1da177e4 LT |
437 | /* |
438 | * Return byte-offset into filesystem object for page. | |
439 | */ | |
440 | static inline loff_t page_offset(struct page *page) | |
441 | { | |
09cbfeaf | 442 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
443 | } |
444 | ||
f981c595 MG |
445 | static inline loff_t page_file_offset(struct page *page) |
446 | { | |
8cd79788 | 447 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
448 | } |
449 | ||
0fe6e20b NH |
450 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
451 | unsigned long address); | |
452 | ||
1da177e4 LT |
453 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
454 | unsigned long address) | |
455 | { | |
0fe6e20b NH |
456 | pgoff_t pgoff; |
457 | if (unlikely(is_vm_hugetlb_page(vma))) | |
458 | return linear_hugepage_index(vma, address); | |
459 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 460 | pgoff += vma->vm_pgoff; |
09cbfeaf | 461 | return pgoff; |
1da177e4 LT |
462 | } |
463 | ||
b3c97528 HH |
464 | extern void __lock_page(struct page *page); |
465 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
466 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
467 | unsigned int flags); | |
b3c97528 | 468 | extern void unlock_page(struct page *page); |
1da177e4 | 469 | |
529ae9aa NP |
470 | static inline int trylock_page(struct page *page) |
471 | { | |
48c935ad | 472 | page = compound_head(page); |
8413ac9d | 473 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
474 | } |
475 | ||
db37648c NP |
476 | /* |
477 | * lock_page may only be called if we have the page's inode pinned. | |
478 | */ | |
1da177e4 LT |
479 | static inline void lock_page(struct page *page) |
480 | { | |
481 | might_sleep(); | |
529ae9aa | 482 | if (!trylock_page(page)) |
1da177e4 LT |
483 | __lock_page(page); |
484 | } | |
db37648c | 485 | |
2687a356 MW |
486 | /* |
487 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
488 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
489 | * killed while waiting. | |
490 | */ | |
491 | static inline int lock_page_killable(struct page *page) | |
492 | { | |
493 | might_sleep(); | |
529ae9aa | 494 | if (!trylock_page(page)) |
2687a356 MW |
495 | return __lock_page_killable(page); |
496 | return 0; | |
497 | } | |
498 | ||
d065bd81 ML |
499 | /* |
500 | * lock_page_or_retry - Lock the page, unless this would block and the | |
501 | * caller indicated that it can handle a retry. | |
9a95f3cf PC |
502 | * |
503 | * Return value and mmap_sem implications depend on flags; see | |
504 | * __lock_page_or_retry(). | |
d065bd81 ML |
505 | */ |
506 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
507 | unsigned int flags) | |
508 | { | |
509 | might_sleep(); | |
510 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
511 | } | |
512 | ||
1da177e4 | 513 | /* |
74d81bfa NP |
514 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
515 | * and should not be used directly. | |
1da177e4 | 516 | */ |
b3c97528 | 517 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 518 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 519 | |
1da177e4 LT |
520 | /* |
521 | * Wait for a page to be unlocked. | |
522 | * | |
523 | * This must be called with the caller "holding" the page, | |
524 | * ie with increased "page->count" so that the page won't | |
525 | * go away during the wait.. | |
526 | */ | |
527 | static inline void wait_on_page_locked(struct page *page) | |
528 | { | |
529 | if (PageLocked(page)) | |
48c935ad | 530 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
531 | } |
532 | ||
62906027 NP |
533 | static inline int wait_on_page_locked_killable(struct page *page) |
534 | { | |
535 | if (!PageLocked(page)) | |
536 | return 0; | |
537 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
538 | } | |
539 | ||
1da177e4 LT |
540 | /* |
541 | * Wait for a page to complete writeback | |
542 | */ | |
543 | static inline void wait_on_page_writeback(struct page *page) | |
544 | { | |
545 | if (PageWriteback(page)) | |
546 | wait_on_page_bit(page, PG_writeback); | |
547 | } | |
548 | ||
549 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 550 | void wait_for_stable_page(struct page *page); |
1da177e4 | 551 | |
c11f0c0b | 552 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 553 | |
385e1ca5 DH |
554 | /* |
555 | * Add an arbitrary waiter to a page's wait queue | |
556 | */ | |
ac6424b9 | 557 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 558 | |
1da177e4 | 559 | /* |
4bce9f6e | 560 | * Fault everything in given userspace address range in. |
1da177e4 LT |
561 | */ |
562 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 563 | { |
9923777d | 564 | char __user *end = uaddr + size - 1; |
f56f821f DV |
565 | |
566 | if (unlikely(size == 0)) | |
e23d4159 | 567 | return 0; |
f56f821f | 568 | |
e23d4159 AV |
569 | if (unlikely(uaddr > end)) |
570 | return -EFAULT; | |
f56f821f DV |
571 | /* |
572 | * Writing zeroes into userspace here is OK, because we know that if | |
573 | * the zero gets there, we'll be overwriting it. | |
574 | */ | |
e23d4159 AV |
575 | do { |
576 | if (unlikely(__put_user(0, uaddr) != 0)) | |
577 | return -EFAULT; | |
f56f821f | 578 | uaddr += PAGE_SIZE; |
e23d4159 | 579 | } while (uaddr <= end); |
f56f821f DV |
580 | |
581 | /* Check whether the range spilled into the next page. */ | |
582 | if (((unsigned long)uaddr & PAGE_MASK) == | |
583 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 584 | return __put_user(0, end); |
f56f821f | 585 | |
e23d4159 | 586 | return 0; |
f56f821f DV |
587 | } |
588 | ||
4bce9f6e | 589 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
590 | { |
591 | volatile char c; | |
f56f821f DV |
592 | const char __user *end = uaddr + size - 1; |
593 | ||
594 | if (unlikely(size == 0)) | |
e23d4159 | 595 | return 0; |
f56f821f | 596 | |
e23d4159 AV |
597 | if (unlikely(uaddr > end)) |
598 | return -EFAULT; | |
599 | ||
600 | do { | |
601 | if (unlikely(__get_user(c, uaddr) != 0)) | |
602 | return -EFAULT; | |
f56f821f | 603 | uaddr += PAGE_SIZE; |
e23d4159 | 604 | } while (uaddr <= end); |
f56f821f DV |
605 | |
606 | /* Check whether the range spilled into the next page. */ | |
607 | if (((unsigned long)uaddr & PAGE_MASK) == | |
608 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 609 | return __get_user(c, end); |
f56f821f DV |
610 | } |
611 | ||
90b75db6 | 612 | (void)c; |
e23d4159 | 613 | return 0; |
f56f821f DV |
614 | } |
615 | ||
529ae9aa NP |
616 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
617 | pgoff_t index, gfp_t gfp_mask); | |
618 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
619 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 620 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 621 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 622 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
aa65c29c JK |
623 | void delete_from_page_cache_batch(struct address_space *mapping, |
624 | struct pagevec *pvec); | |
529ae9aa NP |
625 | |
626 | /* | |
627 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 628 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
629 | */ |
630 | static inline int add_to_page_cache(struct page *page, | |
631 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
632 | { | |
633 | int error; | |
634 | ||
48c935ad | 635 | __SetPageLocked(page); |
529ae9aa NP |
636 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
637 | if (unlikely(error)) | |
48c935ad | 638 | __ClearPageLocked(page); |
529ae9aa NP |
639 | return error; |
640 | } | |
641 | ||
b57c2cb9 FF |
642 | static inline unsigned long dir_pages(struct inode *inode) |
643 | { | |
09cbfeaf KS |
644 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
645 | PAGE_SHIFT; | |
b57c2cb9 FF |
646 | } |
647 | ||
1da177e4 | 648 | #endif /* _LINUX_PAGEMAP_H */ |