]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 16 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
17 | |
18 | /* | |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
20 | * allocation mode flags. | |
21 | */ | |
9a896c9a LS |
22 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | |
9a896c9a | 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
9d1ba805 | 27 | AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ |
371a096e HY |
28 | /* writeback related tags are not used */ |
29 | AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5, | |
9a896c9a | 30 | }; |
1da177e4 | 31 | |
3e9f45bd GC |
32 | static inline void mapping_set_error(struct address_space *mapping, int error) |
33 | { | |
2185e69f | 34 | if (unlikely(error)) { |
3e9f45bd GC |
35 | if (error == -ENOSPC) |
36 | set_bit(AS_ENOSPC, &mapping->flags); | |
37 | else | |
38 | set_bit(AS_EIO, &mapping->flags); | |
39 | } | |
40 | } | |
41 | ||
ba9ddf49 LS |
42 | static inline void mapping_set_unevictable(struct address_space *mapping) |
43 | { | |
44 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
45 | } | |
46 | ||
89e004ea LS |
47 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
48 | { | |
49 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
50 | } | |
51 | ||
ba9ddf49 LS |
52 | static inline int mapping_unevictable(struct address_space *mapping) |
53 | { | |
088e5465 | 54 | if (mapping) |
89e004ea LS |
55 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
56 | return !!mapping; | |
ba9ddf49 | 57 | } |
ba9ddf49 | 58 | |
91b0abe3 JW |
59 | static inline void mapping_set_exiting(struct address_space *mapping) |
60 | { | |
61 | set_bit(AS_EXITING, &mapping->flags); | |
62 | } | |
63 | ||
64 | static inline int mapping_exiting(struct address_space *mapping) | |
65 | { | |
66 | return test_bit(AS_EXITING, &mapping->flags); | |
67 | } | |
68 | ||
371a096e HY |
69 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
70 | { | |
71 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
72 | } | |
73 | ||
74 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
75 | { | |
76 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
77 | } | |
78 | ||
dd0fc66f | 79 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 80 | { |
260b2367 | 81 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
82 | } |
83 | ||
c62d2555 MH |
84 | /* Restricts the given gfp_mask to what the mapping allows. */ |
85 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
86 | gfp_t gfp_mask) | |
87 | { | |
88 | return mapping_gfp_mask(mapping) & gfp_mask; | |
89 | } | |
90 | ||
1da177e4 LT |
91 | /* |
92 | * This is non-atomic. Only to be used before the mapping is activated. | |
93 | * Probably needs a barrier... | |
94 | */ | |
260b2367 | 95 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 96 | { |
260b2367 AV |
97 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
98 | (__force unsigned long)mask; | |
1da177e4 LT |
99 | } |
100 | ||
b745bc85 | 101 | void release_pages(struct page **pages, int nr, bool cold); |
1da177e4 | 102 | |
e286781d NP |
103 | /* |
104 | * speculatively take a reference to a page. | |
0139aa7b JK |
105 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
106 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
107 | * |
108 | * This function must be called inside the same rcu_read_lock() section as has | |
109 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 110 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
111 | * |
112 | * Unless an RCU grace period has passed, the count of all pages coming out | |
113 | * of the allocator must be considered unstable. page_count may return higher | |
114 | * than expected, and put_page must be able to do the right thing when the | |
115 | * page has been finished with, no matter what it is subsequently allocated | |
116 | * for (because put_page is what is used here to drop an invalid speculative | |
117 | * reference). | |
118 | * | |
119 | * This is the interesting part of the lockless pagecache (and lockless | |
120 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
121 | * has the following pattern: | |
122 | * 1. find page in radix tree | |
123 | * 2. conditionally increment refcount | |
124 | * 3. check the page is still in pagecache (if no, goto 1) | |
125 | * | |
0139aa7b | 126 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
e286781d NP |
127 | * following (with tree_lock held for write): |
128 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
129 | * B. remove page from pagecache | |
130 | * C. free the page | |
131 | * | |
132 | * There are 2 critical interleavings that matter: | |
133 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
134 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
135 | * subsequently, B will complete and 1 will find no page, causing the | |
136 | * lookup to return NULL. | |
137 | * | |
138 | * It is possible that between 1 and 2, the page is removed then the exact same | |
139 | * page is inserted into the same position in pagecache. That's OK: the | |
140 | * old find_get_page using tree_lock could equally have run before or after | |
141 | * such a re-insertion, depending on order that locks are granted. | |
142 | * | |
143 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
144 | * will find the page or it will not. Likewise, the old find_get_page could run | |
145 | * either before the insertion or afterwards, depending on timing. | |
146 | */ | |
147 | static inline int page_cache_get_speculative(struct page *page) | |
148 | { | |
149 | VM_BUG_ON(in_interrupt()); | |
150 | ||
8375ad98 | 151 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 152 | # ifdef CONFIG_PREEMPT_COUNT |
e286781d NP |
153 | VM_BUG_ON(!in_atomic()); |
154 | # endif | |
155 | /* | |
156 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
157 | * this for us. | |
158 | * | |
159 | * Pagecache won't be truncated from interrupt context, so if we have | |
160 | * found a page in the radix tree here, we have pinned its refcount by | |
161 | * disabling preempt, and hence no need for the "speculative get" that | |
162 | * SMP requires. | |
163 | */ | |
309381fe | 164 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 165 | page_ref_inc(page); |
e286781d NP |
166 | |
167 | #else | |
168 | if (unlikely(!get_page_unless_zero(page))) { | |
169 | /* | |
170 | * Either the page has been freed, or will be freed. | |
171 | * In either case, retry here and the caller should | |
172 | * do the right thing (see comments above). | |
173 | */ | |
174 | return 0; | |
175 | } | |
176 | #endif | |
309381fe | 177 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
178 | |
179 | return 1; | |
180 | } | |
181 | ||
ce0ad7f0 NP |
182 | /* |
183 | * Same as above, but add instead of inc (could just be merged) | |
184 | */ | |
185 | static inline int page_cache_add_speculative(struct page *page, int count) | |
186 | { | |
187 | VM_BUG_ON(in_interrupt()); | |
188 | ||
b560d8ad | 189 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 190 | # ifdef CONFIG_PREEMPT_COUNT |
ce0ad7f0 NP |
191 | VM_BUG_ON(!in_atomic()); |
192 | # endif | |
309381fe | 193 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 194 | page_ref_add(page, count); |
ce0ad7f0 NP |
195 | |
196 | #else | |
fe896d18 | 197 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
ce0ad7f0 NP |
198 | return 0; |
199 | #endif | |
309381fe | 200 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
201 | |
202 | return 1; | |
203 | } | |
204 | ||
44110fe3 | 205 | #ifdef CONFIG_NUMA |
2ae88149 | 206 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 207 | #else |
2ae88149 NP |
208 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
209 | { | |
210 | return alloc_pages(gfp, 0); | |
211 | } | |
212 | #endif | |
213 | ||
1da177e4 LT |
214 | static inline struct page *page_cache_alloc(struct address_space *x) |
215 | { | |
2ae88149 | 216 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
217 | } |
218 | ||
219 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
220 | { | |
2ae88149 | 221 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
222 | } |
223 | ||
8a5c743e | 224 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 225 | { |
8a5c743e MH |
226 | return mapping_gfp_mask(x) | |
227 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; | |
7b1de586 WF |
228 | } |
229 | ||
1da177e4 LT |
230 | typedef int filler_t(void *, struct page *); |
231 | ||
e7b563bb JW |
232 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
233 | pgoff_t index, unsigned long max_scan); | |
234 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
235 | pgoff_t index, unsigned long max_scan); | |
236 | ||
2457aec6 MG |
237 | #define FGP_ACCESSED 0x00000001 |
238 | #define FGP_LOCK 0x00000002 | |
239 | #define FGP_CREAT 0x00000004 | |
240 | #define FGP_WRITE 0x00000008 | |
241 | #define FGP_NOFS 0x00000010 | |
242 | #define FGP_NOWAIT 0x00000020 | |
243 | ||
244 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 245 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
246 | |
247 | /** | |
248 | * find_get_page - find and get a page reference | |
249 | * @mapping: the address_space to search | |
250 | * @offset: the page index | |
251 | * | |
252 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
253 | * page cache page, it is returned with an increased refcount. | |
254 | * | |
255 | * Otherwise, %NULL is returned. | |
256 | */ | |
257 | static inline struct page *find_get_page(struct address_space *mapping, | |
258 | pgoff_t offset) | |
259 | { | |
45f87de5 | 260 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
261 | } |
262 | ||
263 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
264 | pgoff_t offset, int fgp_flags) | |
265 | { | |
45f87de5 | 266 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
267 | } |
268 | ||
269 | /** | |
270 | * find_lock_page - locate, pin and lock a pagecache page | |
271 | * pagecache_get_page - find and get a page reference | |
272 | * @mapping: the address_space to search | |
273 | * @offset: the page index | |
274 | * | |
275 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
276 | * page cache page, it is returned locked and with an increased | |
277 | * refcount. | |
278 | * | |
279 | * Otherwise, %NULL is returned. | |
280 | * | |
281 | * find_lock_page() may sleep. | |
282 | */ | |
283 | static inline struct page *find_lock_page(struct address_space *mapping, | |
284 | pgoff_t offset) | |
285 | { | |
45f87de5 | 286 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
287 | } |
288 | ||
289 | /** | |
290 | * find_or_create_page - locate or add a pagecache page | |
291 | * @mapping: the page's address_space | |
292 | * @index: the page's index into the mapping | |
293 | * @gfp_mask: page allocation mode | |
294 | * | |
295 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
296 | * page cache page, it is returned locked and with an increased | |
297 | * refcount. | |
298 | * | |
299 | * If the page is not present, a new page is allocated using @gfp_mask | |
300 | * and added to the page cache and the VM's LRU list. The page is | |
301 | * returned locked and with an increased refcount. | |
302 | * | |
303 | * On memory exhaustion, %NULL is returned. | |
304 | * | |
305 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
306 | * atomic allocation! | |
307 | */ | |
308 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
309 | pgoff_t offset, gfp_t gfp_mask) | |
310 | { | |
311 | return pagecache_get_page(mapping, offset, | |
312 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
45f87de5 | 313 | gfp_mask); |
2457aec6 MG |
314 | } |
315 | ||
316 | /** | |
317 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
318 | * @mapping: target address_space | |
319 | * @index: the page index | |
320 | * | |
321 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
322 | * This is intended for speculative data generators, where the data can | |
323 | * be regenerated if the page couldn't be grabbed. This routine should | |
324 | * be safe to call while holding the lock for another page. | |
325 | * | |
326 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
327 | * and deadlock against the caller's locked page. | |
328 | */ | |
329 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
330 | pgoff_t index) | |
331 | { | |
332 | return pagecache_get_page(mapping, index, | |
333 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 334 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
335 | } |
336 | ||
0cd6144a | 337 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 338 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
339 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
340 | unsigned int nr_entries, struct page **entries, | |
341 | pgoff_t *indices); | |
1da177e4 LT |
342 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
343 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
344 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
345 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
346 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
347 | int tag, unsigned int nr_pages, struct page **pages); | |
7e7f7749 RZ |
348 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
349 | int tag, unsigned int nr_entries, | |
350 | struct page **entries, pgoff_t *indices); | |
1da177e4 | 351 | |
54566b2c NP |
352 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
353 | pgoff_t index, unsigned flags); | |
afddba49 | 354 | |
1da177e4 LT |
355 | /* |
356 | * Returns locked page at given index in given cache, creating it if needed. | |
357 | */ | |
57f6b96c FW |
358 | static inline struct page *grab_cache_page(struct address_space *mapping, |
359 | pgoff_t index) | |
1da177e4 LT |
360 | { |
361 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
362 | } | |
363 | ||
1da177e4 | 364 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 365 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
366 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
367 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
368 | extern int read_cache_pages(struct address_space *mapping, |
369 | struct list_head *pages, filler_t *filler, void *data); | |
370 | ||
090d2b18 | 371 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 372 | pgoff_t index, void *data) |
090d2b18 PE |
373 | { |
374 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
375 | return read_cache_page(mapping, index, filler, data); | |
376 | } | |
377 | ||
a0f7a756 NH |
378 | /* |
379 | * Get the offset in PAGE_SIZE. | |
380 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
381 | */ | |
382 | static inline pgoff_t page_to_pgoff(struct page *page) | |
383 | { | |
e9b61f19 KS |
384 | pgoff_t pgoff; |
385 | ||
a0f7a756 NH |
386 | if (unlikely(PageHeadHuge(page))) |
387 | return page->index << compound_order(page); | |
e9b61f19 KS |
388 | |
389 | if (likely(!PageTransTail(page))) | |
09cbfeaf | 390 | return page->index; |
e9b61f19 KS |
391 | |
392 | /* | |
393 | * We don't initialize ->index for tail pages: calculate based on | |
394 | * head page | |
395 | */ | |
09cbfeaf | 396 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
397 | pgoff += page - compound_head(page); |
398 | return pgoff; | |
a0f7a756 NH |
399 | } |
400 | ||
1da177e4 LT |
401 | /* |
402 | * Return byte-offset into filesystem object for page. | |
403 | */ | |
404 | static inline loff_t page_offset(struct page *page) | |
405 | { | |
09cbfeaf | 406 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
407 | } |
408 | ||
f981c595 MG |
409 | static inline loff_t page_file_offset(struct page *page) |
410 | { | |
8cd79788 | 411 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
412 | } |
413 | ||
0fe6e20b NH |
414 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
415 | unsigned long address); | |
416 | ||
1da177e4 LT |
417 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
418 | unsigned long address) | |
419 | { | |
0fe6e20b NH |
420 | pgoff_t pgoff; |
421 | if (unlikely(is_vm_hugetlb_page(vma))) | |
422 | return linear_hugepage_index(vma, address); | |
423 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 424 | pgoff += vma->vm_pgoff; |
09cbfeaf | 425 | return pgoff; |
1da177e4 LT |
426 | } |
427 | ||
b3c97528 HH |
428 | extern void __lock_page(struct page *page); |
429 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
430 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
431 | unsigned int flags); | |
b3c97528 | 432 | extern void unlock_page(struct page *page); |
1da177e4 | 433 | |
529ae9aa NP |
434 | static inline int trylock_page(struct page *page) |
435 | { | |
48c935ad | 436 | page = compound_head(page); |
8413ac9d | 437 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
438 | } |
439 | ||
db37648c NP |
440 | /* |
441 | * lock_page may only be called if we have the page's inode pinned. | |
442 | */ | |
1da177e4 LT |
443 | static inline void lock_page(struct page *page) |
444 | { | |
445 | might_sleep(); | |
529ae9aa | 446 | if (!trylock_page(page)) |
1da177e4 LT |
447 | __lock_page(page); |
448 | } | |
db37648c | 449 | |
2687a356 MW |
450 | /* |
451 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
452 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
453 | * killed while waiting. | |
454 | */ | |
455 | static inline int lock_page_killable(struct page *page) | |
456 | { | |
457 | might_sleep(); | |
529ae9aa | 458 | if (!trylock_page(page)) |
2687a356 MW |
459 | return __lock_page_killable(page); |
460 | return 0; | |
461 | } | |
462 | ||
d065bd81 ML |
463 | /* |
464 | * lock_page_or_retry - Lock the page, unless this would block and the | |
465 | * caller indicated that it can handle a retry. | |
9a95f3cf PC |
466 | * |
467 | * Return value and mmap_sem implications depend on flags; see | |
468 | * __lock_page_or_retry(). | |
d065bd81 ML |
469 | */ |
470 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
471 | unsigned int flags) | |
472 | { | |
473 | might_sleep(); | |
474 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
475 | } | |
476 | ||
1da177e4 | 477 | /* |
a4796e37 N |
478 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, |
479 | * and for filesystems which need to wait on PG_private. | |
1da177e4 | 480 | */ |
b3c97528 | 481 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 | 482 | |
f62e00cc | 483 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
cbbce822 N |
484 | extern int wait_on_page_bit_killable_timeout(struct page *page, |
485 | int bit_nr, unsigned long timeout); | |
f62e00cc KM |
486 | |
487 | static inline int wait_on_page_locked_killable(struct page *page) | |
488 | { | |
48c935ad KS |
489 | if (!PageLocked(page)) |
490 | return 0; | |
491 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
f62e00cc KM |
492 | } |
493 | ||
a4796e37 N |
494 | extern wait_queue_head_t *page_waitqueue(struct page *page); |
495 | static inline void wake_up_page(struct page *page, int bit) | |
496 | { | |
497 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | |
498 | } | |
499 | ||
1da177e4 LT |
500 | /* |
501 | * Wait for a page to be unlocked. | |
502 | * | |
503 | * This must be called with the caller "holding" the page, | |
504 | * ie with increased "page->count" so that the page won't | |
505 | * go away during the wait.. | |
506 | */ | |
507 | static inline void wait_on_page_locked(struct page *page) | |
508 | { | |
509 | if (PageLocked(page)) | |
48c935ad | 510 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
511 | } |
512 | ||
513 | /* | |
514 | * Wait for a page to complete writeback | |
515 | */ | |
516 | static inline void wait_on_page_writeback(struct page *page) | |
517 | { | |
518 | if (PageWriteback(page)) | |
519 | wait_on_page_bit(page, PG_writeback); | |
520 | } | |
521 | ||
522 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 523 | void wait_for_stable_page(struct page *page); |
1da177e4 | 524 | |
c11f0c0b | 525 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 526 | |
385e1ca5 DH |
527 | /* |
528 | * Add an arbitrary waiter to a page's wait queue | |
529 | */ | |
530 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
531 | ||
1da177e4 | 532 | /* |
b8ca9e3a ED |
533 | * Fault one or two userspace pages into pagetables. |
534 | * Return -EINVAL if more than two pages would be needed. | |
535 | * Return non-zero on a fault. | |
1da177e4 LT |
536 | */ |
537 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
538 | { | |
b8ca9e3a | 539 | int span, ret; |
1da177e4 | 540 | |
08291429 NP |
541 | if (unlikely(size == 0)) |
542 | return 0; | |
543 | ||
b8ca9e3a ED |
544 | span = offset_in_page(uaddr) + size; |
545 | if (span > 2 * PAGE_SIZE) | |
546 | return -EINVAL; | |
1da177e4 LT |
547 | /* |
548 | * Writing zeroes into userspace here is OK, because we know that if | |
549 | * the zero gets there, we'll be overwriting it. | |
550 | */ | |
551 | ret = __put_user(0, uaddr); | |
b8ca9e3a ED |
552 | if (ret == 0 && span > PAGE_SIZE) |
553 | ret = __put_user(0, uaddr + size - 1); | |
1da177e4 LT |
554 | return ret; |
555 | } | |
556 | ||
08291429 | 557 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
558 | { |
559 | volatile char c; | |
560 | int ret; | |
561 | ||
08291429 NP |
562 | if (unlikely(size == 0)) |
563 | return 0; | |
564 | ||
1da177e4 LT |
565 | ret = __get_user(c, uaddr); |
566 | if (ret == 0) { | |
567 | const char __user *end = uaddr + size - 1; | |
568 | ||
569 | if (((unsigned long)uaddr & PAGE_MASK) != | |
627295e4 | 570 | ((unsigned long)end & PAGE_MASK)) { |
f56f821f | 571 | ret = __get_user(c, end); |
627295e4 AK |
572 | (void)c; |
573 | } | |
1da177e4 | 574 | } |
08291429 | 575 | return ret; |
1da177e4 LT |
576 | } |
577 | ||
f56f821f DV |
578 | /* |
579 | * Multipage variants of the above prefault helpers, useful if more than | |
580 | * PAGE_SIZE of data needs to be prefaulted. These are separate from the above | |
581 | * functions (which only handle up to PAGE_SIZE) to avoid clobbering the | |
582 | * filemap.c hotpaths. | |
583 | */ | |
584 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) | |
585 | { | |
9923777d | 586 | char __user *end = uaddr + size - 1; |
f56f821f DV |
587 | |
588 | if (unlikely(size == 0)) | |
e23d4159 | 589 | return 0; |
f56f821f | 590 | |
e23d4159 AV |
591 | if (unlikely(uaddr > end)) |
592 | return -EFAULT; | |
f56f821f DV |
593 | /* |
594 | * Writing zeroes into userspace here is OK, because we know that if | |
595 | * the zero gets there, we'll be overwriting it. | |
596 | */ | |
e23d4159 AV |
597 | do { |
598 | if (unlikely(__put_user(0, uaddr) != 0)) | |
599 | return -EFAULT; | |
f56f821f | 600 | uaddr += PAGE_SIZE; |
e23d4159 | 601 | } while (uaddr <= end); |
f56f821f DV |
602 | |
603 | /* Check whether the range spilled into the next page. */ | |
604 | if (((unsigned long)uaddr & PAGE_MASK) == | |
605 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 606 | return __put_user(0, end); |
f56f821f | 607 | |
e23d4159 | 608 | return 0; |
f56f821f DV |
609 | } |
610 | ||
611 | static inline int fault_in_multipages_readable(const char __user *uaddr, | |
612 | int size) | |
613 | { | |
614 | volatile char c; | |
f56f821f DV |
615 | const char __user *end = uaddr + size - 1; |
616 | ||
617 | if (unlikely(size == 0)) | |
e23d4159 | 618 | return 0; |
f56f821f | 619 | |
e23d4159 AV |
620 | if (unlikely(uaddr > end)) |
621 | return -EFAULT; | |
622 | ||
623 | do { | |
624 | if (unlikely(__get_user(c, uaddr) != 0)) | |
625 | return -EFAULT; | |
f56f821f | 626 | uaddr += PAGE_SIZE; |
e23d4159 | 627 | } while (uaddr <= end); |
f56f821f DV |
628 | |
629 | /* Check whether the range spilled into the next page. */ | |
630 | if (((unsigned long)uaddr & PAGE_MASK) == | |
631 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 632 | return __get_user(c, end); |
f56f821f DV |
633 | } |
634 | ||
90b75db6 | 635 | (void)c; |
e23d4159 | 636 | return 0; |
f56f821f DV |
637 | } |
638 | ||
529ae9aa NP |
639 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
640 | pgoff_t index, gfp_t gfp_mask); | |
641 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
642 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 643 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 644 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 645 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
529ae9aa NP |
646 | |
647 | /* | |
648 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 649 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
650 | */ |
651 | static inline int add_to_page_cache(struct page *page, | |
652 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
653 | { | |
654 | int error; | |
655 | ||
48c935ad | 656 | __SetPageLocked(page); |
529ae9aa NP |
657 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
658 | if (unlikely(error)) | |
48c935ad | 659 | __ClearPageLocked(page); |
529ae9aa NP |
660 | return error; |
661 | } | |
662 | ||
b57c2cb9 FF |
663 | static inline unsigned long dir_pages(struct inode *inode) |
664 | { | |
09cbfeaf KS |
665 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
666 | PAGE_SHIFT; | |
b57c2cb9 FF |
667 | } |
668 | ||
1da177e4 | 669 | #endif /* _LINUX_PAGEMAP_H */ |