]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 16 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
17 | |
18 | /* | |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
20 | * allocation mode flags. | |
21 | */ | |
9a896c9a LS |
22 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | |
9a896c9a | 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
18468d93 | 27 | AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ |
91b0abe3 | 28 | AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ |
9a896c9a | 29 | }; |
1da177e4 | 30 | |
3e9f45bd GC |
31 | static inline void mapping_set_error(struct address_space *mapping, int error) |
32 | { | |
2185e69f | 33 | if (unlikely(error)) { |
3e9f45bd GC |
34 | if (error == -ENOSPC) |
35 | set_bit(AS_ENOSPC, &mapping->flags); | |
36 | else | |
37 | set_bit(AS_EIO, &mapping->flags); | |
38 | } | |
39 | } | |
40 | ||
ba9ddf49 LS |
41 | static inline void mapping_set_unevictable(struct address_space *mapping) |
42 | { | |
43 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
44 | } | |
45 | ||
89e004ea LS |
46 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
47 | { | |
48 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
49 | } | |
50 | ||
ba9ddf49 LS |
51 | static inline int mapping_unevictable(struct address_space *mapping) |
52 | { | |
088e5465 | 53 | if (mapping) |
89e004ea LS |
54 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
55 | return !!mapping; | |
ba9ddf49 | 56 | } |
ba9ddf49 | 57 | |
18468d93 RA |
58 | static inline void mapping_set_balloon(struct address_space *mapping) |
59 | { | |
60 | set_bit(AS_BALLOON_MAP, &mapping->flags); | |
61 | } | |
62 | ||
63 | static inline void mapping_clear_balloon(struct address_space *mapping) | |
64 | { | |
65 | clear_bit(AS_BALLOON_MAP, &mapping->flags); | |
66 | } | |
67 | ||
68 | static inline int mapping_balloon(struct address_space *mapping) | |
69 | { | |
70 | return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); | |
71 | } | |
72 | ||
91b0abe3 JW |
73 | static inline void mapping_set_exiting(struct address_space *mapping) |
74 | { | |
75 | set_bit(AS_EXITING, &mapping->flags); | |
76 | } | |
77 | ||
78 | static inline int mapping_exiting(struct address_space *mapping) | |
79 | { | |
80 | return test_bit(AS_EXITING, &mapping->flags); | |
81 | } | |
82 | ||
dd0fc66f | 83 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 84 | { |
260b2367 | 85 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
86 | } |
87 | ||
88 | /* | |
89 | * This is non-atomic. Only to be used before the mapping is activated. | |
90 | * Probably needs a barrier... | |
91 | */ | |
260b2367 | 92 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 93 | { |
260b2367 AV |
94 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
95 | (__force unsigned long)mask; | |
1da177e4 LT |
96 | } |
97 | ||
98 | /* | |
99 | * The page cache can done in larger chunks than | |
100 | * one page, because it allows for more efficient | |
101 | * throughput (it can then be mapped into user | |
102 | * space in smaller chunks for same flexibility). | |
103 | * | |
104 | * Or rather, it _will_ be done in larger chunks. | |
105 | */ | |
106 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | |
107 | #define PAGE_CACHE_SIZE PAGE_SIZE | |
108 | #define PAGE_CACHE_MASK PAGE_MASK | |
109 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | |
110 | ||
111 | #define page_cache_get(page) get_page(page) | |
112 | #define page_cache_release(page) put_page(page) | |
b745bc85 | 113 | void release_pages(struct page **pages, int nr, bool cold); |
1da177e4 | 114 | |
e286781d NP |
115 | /* |
116 | * speculatively take a reference to a page. | |
117 | * If the page is free (_count == 0), then _count is untouched, and 0 | |
118 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | |
119 | * | |
120 | * This function must be called inside the same rcu_read_lock() section as has | |
121 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
122 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | |
123 | * | |
124 | * Unless an RCU grace period has passed, the count of all pages coming out | |
125 | * of the allocator must be considered unstable. page_count may return higher | |
126 | * than expected, and put_page must be able to do the right thing when the | |
127 | * page has been finished with, no matter what it is subsequently allocated | |
128 | * for (because put_page is what is used here to drop an invalid speculative | |
129 | * reference). | |
130 | * | |
131 | * This is the interesting part of the lockless pagecache (and lockless | |
132 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
133 | * has the following pattern: | |
134 | * 1. find page in radix tree | |
135 | * 2. conditionally increment refcount | |
136 | * 3. check the page is still in pagecache (if no, goto 1) | |
137 | * | |
138 | * Remove-side that cares about stability of _count (eg. reclaim) has the | |
139 | * following (with tree_lock held for write): | |
140 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
141 | * B. remove page from pagecache | |
142 | * C. free the page | |
143 | * | |
144 | * There are 2 critical interleavings that matter: | |
145 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
146 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
147 | * subsequently, B will complete and 1 will find no page, causing the | |
148 | * lookup to return NULL. | |
149 | * | |
150 | * It is possible that between 1 and 2, the page is removed then the exact same | |
151 | * page is inserted into the same position in pagecache. That's OK: the | |
152 | * old find_get_page using tree_lock could equally have run before or after | |
153 | * such a re-insertion, depending on order that locks are granted. | |
154 | * | |
155 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
156 | * will find the page or it will not. Likewise, the old find_get_page could run | |
157 | * either before the insertion or afterwards, depending on timing. | |
158 | */ | |
159 | static inline int page_cache_get_speculative(struct page *page) | |
160 | { | |
161 | VM_BUG_ON(in_interrupt()); | |
162 | ||
8375ad98 | 163 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 164 | # ifdef CONFIG_PREEMPT_COUNT |
e286781d NP |
165 | VM_BUG_ON(!in_atomic()); |
166 | # endif | |
167 | /* | |
168 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
169 | * this for us. | |
170 | * | |
171 | * Pagecache won't be truncated from interrupt context, so if we have | |
172 | * found a page in the radix tree here, we have pinned its refcount by | |
173 | * disabling preempt, and hence no need for the "speculative get" that | |
174 | * SMP requires. | |
175 | */ | |
309381fe | 176 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
e286781d NP |
177 | atomic_inc(&page->_count); |
178 | ||
179 | #else | |
180 | if (unlikely(!get_page_unless_zero(page))) { | |
181 | /* | |
182 | * Either the page has been freed, or will be freed. | |
183 | * In either case, retry here and the caller should | |
184 | * do the right thing (see comments above). | |
185 | */ | |
186 | return 0; | |
187 | } | |
188 | #endif | |
309381fe | 189 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
190 | |
191 | return 1; | |
192 | } | |
193 | ||
ce0ad7f0 NP |
194 | /* |
195 | * Same as above, but add instead of inc (could just be merged) | |
196 | */ | |
197 | static inline int page_cache_add_speculative(struct page *page, int count) | |
198 | { | |
199 | VM_BUG_ON(in_interrupt()); | |
200 | ||
b560d8ad | 201 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 202 | # ifdef CONFIG_PREEMPT_COUNT |
ce0ad7f0 NP |
203 | VM_BUG_ON(!in_atomic()); |
204 | # endif | |
309381fe | 205 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
ce0ad7f0 NP |
206 | atomic_add(count, &page->_count); |
207 | ||
208 | #else | |
209 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | |
210 | return 0; | |
211 | #endif | |
309381fe | 212 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
213 | |
214 | return 1; | |
215 | } | |
216 | ||
e286781d NP |
217 | static inline int page_freeze_refs(struct page *page, int count) |
218 | { | |
219 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | |
220 | } | |
221 | ||
222 | static inline void page_unfreeze_refs(struct page *page, int count) | |
223 | { | |
309381fe | 224 | VM_BUG_ON_PAGE(page_count(page) != 0, page); |
e286781d NP |
225 | VM_BUG_ON(count == 0); |
226 | ||
227 | atomic_set(&page->_count, count); | |
228 | } | |
229 | ||
44110fe3 | 230 | #ifdef CONFIG_NUMA |
2ae88149 | 231 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 232 | #else |
2ae88149 NP |
233 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
234 | { | |
235 | return alloc_pages(gfp, 0); | |
236 | } | |
237 | #endif | |
238 | ||
1da177e4 LT |
239 | static inline struct page *page_cache_alloc(struct address_space *x) |
240 | { | |
2ae88149 | 241 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
242 | } |
243 | ||
244 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
245 | { | |
2ae88149 | 246 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
247 | } |
248 | ||
7b1de586 WF |
249 | static inline struct page *page_cache_alloc_readahead(struct address_space *x) |
250 | { | |
251 | return __page_cache_alloc(mapping_gfp_mask(x) | | |
252 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN); | |
253 | } | |
254 | ||
1da177e4 LT |
255 | typedef int filler_t(void *, struct page *); |
256 | ||
e7b563bb JW |
257 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
258 | pgoff_t index, unsigned long max_scan); | |
259 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
260 | pgoff_t index, unsigned long max_scan); | |
261 | ||
2457aec6 MG |
262 | #define FGP_ACCESSED 0x00000001 |
263 | #define FGP_LOCK 0x00000002 | |
264 | #define FGP_CREAT 0x00000004 | |
265 | #define FGP_WRITE 0x00000008 | |
266 | #define FGP_NOFS 0x00000010 | |
267 | #define FGP_NOWAIT 0x00000020 | |
268 | ||
269 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
270 | int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask); | |
271 | ||
272 | /** | |
273 | * find_get_page - find and get a page reference | |
274 | * @mapping: the address_space to search | |
275 | * @offset: the page index | |
276 | * | |
277 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
278 | * page cache page, it is returned with an increased refcount. | |
279 | * | |
280 | * Otherwise, %NULL is returned. | |
281 | */ | |
282 | static inline struct page *find_get_page(struct address_space *mapping, | |
283 | pgoff_t offset) | |
284 | { | |
285 | return pagecache_get_page(mapping, offset, 0, 0, 0); | |
286 | } | |
287 | ||
288 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
289 | pgoff_t offset, int fgp_flags) | |
290 | { | |
291 | return pagecache_get_page(mapping, offset, fgp_flags, 0, 0); | |
292 | } | |
293 | ||
294 | /** | |
295 | * find_lock_page - locate, pin and lock a pagecache page | |
296 | * pagecache_get_page - find and get a page reference | |
297 | * @mapping: the address_space to search | |
298 | * @offset: the page index | |
299 | * | |
300 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
301 | * page cache page, it is returned locked and with an increased | |
302 | * refcount. | |
303 | * | |
304 | * Otherwise, %NULL is returned. | |
305 | * | |
306 | * find_lock_page() may sleep. | |
307 | */ | |
308 | static inline struct page *find_lock_page(struct address_space *mapping, | |
309 | pgoff_t offset) | |
310 | { | |
311 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0); | |
312 | } | |
313 | ||
314 | /** | |
315 | * find_or_create_page - locate or add a pagecache page | |
316 | * @mapping: the page's address_space | |
317 | * @index: the page's index into the mapping | |
318 | * @gfp_mask: page allocation mode | |
319 | * | |
320 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
321 | * page cache page, it is returned locked and with an increased | |
322 | * refcount. | |
323 | * | |
324 | * If the page is not present, a new page is allocated using @gfp_mask | |
325 | * and added to the page cache and the VM's LRU list. The page is | |
326 | * returned locked and with an increased refcount. | |
327 | * | |
328 | * On memory exhaustion, %NULL is returned. | |
329 | * | |
330 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
331 | * atomic allocation! | |
332 | */ | |
333 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
334 | pgoff_t offset, gfp_t gfp_mask) | |
335 | { | |
336 | return pagecache_get_page(mapping, offset, | |
337 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
338 | gfp_mask, gfp_mask & GFP_RECLAIM_MASK); | |
339 | } | |
340 | ||
341 | /** | |
342 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
343 | * @mapping: target address_space | |
344 | * @index: the page index | |
345 | * | |
346 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
347 | * This is intended for speculative data generators, where the data can | |
348 | * be regenerated if the page couldn't be grabbed. This routine should | |
349 | * be safe to call while holding the lock for another page. | |
350 | * | |
351 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
352 | * and deadlock against the caller's locked page. | |
353 | */ | |
354 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
355 | pgoff_t index) | |
356 | { | |
357 | return pagecache_get_page(mapping, index, | |
358 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
359 | mapping_gfp_mask(mapping), | |
360 | GFP_NOFS); | |
361 | } | |
362 | ||
0cd6144a | 363 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 364 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
365 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
366 | unsigned int nr_entries, struct page **entries, | |
367 | pgoff_t *indices); | |
1da177e4 LT |
368 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
369 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
370 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
371 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
372 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
373 | int tag, unsigned int nr_pages, struct page **pages); | |
374 | ||
54566b2c NP |
375 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
376 | pgoff_t index, unsigned flags); | |
afddba49 | 377 | |
1da177e4 LT |
378 | /* |
379 | * Returns locked page at given index in given cache, creating it if needed. | |
380 | */ | |
57f6b96c FW |
381 | static inline struct page *grab_cache_page(struct address_space *mapping, |
382 | pgoff_t index) | |
1da177e4 LT |
383 | { |
384 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
385 | } | |
386 | ||
1da177e4 | 387 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 388 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
389 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
390 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
391 | extern int read_cache_pages(struct address_space *mapping, |
392 | struct list_head *pages, filler_t *filler, void *data); | |
393 | ||
090d2b18 | 394 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 395 | pgoff_t index, void *data) |
090d2b18 PE |
396 | { |
397 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
398 | return read_cache_page(mapping, index, filler, data); | |
399 | } | |
400 | ||
a0f7a756 NH |
401 | /* |
402 | * Get the offset in PAGE_SIZE. | |
403 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
404 | */ | |
405 | static inline pgoff_t page_to_pgoff(struct page *page) | |
406 | { | |
407 | if (unlikely(PageHeadHuge(page))) | |
408 | return page->index << compound_order(page); | |
409 | else | |
410 | return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
411 | } | |
412 | ||
1da177e4 LT |
413 | /* |
414 | * Return byte-offset into filesystem object for page. | |
415 | */ | |
416 | static inline loff_t page_offset(struct page *page) | |
417 | { | |
418 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | |
419 | } | |
420 | ||
f981c595 MG |
421 | static inline loff_t page_file_offset(struct page *page) |
422 | { | |
423 | return ((loff_t)page_file_index(page)) << PAGE_CACHE_SHIFT; | |
424 | } | |
425 | ||
0fe6e20b NH |
426 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
427 | unsigned long address); | |
428 | ||
1da177e4 LT |
429 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
430 | unsigned long address) | |
431 | { | |
0fe6e20b NH |
432 | pgoff_t pgoff; |
433 | if (unlikely(is_vm_hugetlb_page(vma))) | |
434 | return linear_hugepage_index(vma, address); | |
435 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 LT |
436 | pgoff += vma->vm_pgoff; |
437 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
438 | } | |
439 | ||
b3c97528 HH |
440 | extern void __lock_page(struct page *page); |
441 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
442 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
443 | unsigned int flags); | |
b3c97528 | 444 | extern void unlock_page(struct page *page); |
1da177e4 | 445 | |
f45840b5 | 446 | static inline void __set_page_locked(struct page *page) |
529ae9aa | 447 | { |
f45840b5 | 448 | __set_bit(PG_locked, &page->flags); |
529ae9aa NP |
449 | } |
450 | ||
f45840b5 | 451 | static inline void __clear_page_locked(struct page *page) |
529ae9aa | 452 | { |
f45840b5 | 453 | __clear_bit(PG_locked, &page->flags); |
529ae9aa NP |
454 | } |
455 | ||
456 | static inline int trylock_page(struct page *page) | |
457 | { | |
8413ac9d | 458 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
459 | } |
460 | ||
db37648c NP |
461 | /* |
462 | * lock_page may only be called if we have the page's inode pinned. | |
463 | */ | |
1da177e4 LT |
464 | static inline void lock_page(struct page *page) |
465 | { | |
466 | might_sleep(); | |
529ae9aa | 467 | if (!trylock_page(page)) |
1da177e4 LT |
468 | __lock_page(page); |
469 | } | |
db37648c | 470 | |
2687a356 MW |
471 | /* |
472 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
473 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
474 | * killed while waiting. | |
475 | */ | |
476 | static inline int lock_page_killable(struct page *page) | |
477 | { | |
478 | might_sleep(); | |
529ae9aa | 479 | if (!trylock_page(page)) |
2687a356 MW |
480 | return __lock_page_killable(page); |
481 | return 0; | |
482 | } | |
483 | ||
d065bd81 ML |
484 | /* |
485 | * lock_page_or_retry - Lock the page, unless this would block and the | |
486 | * caller indicated that it can handle a retry. | |
487 | */ | |
488 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
489 | unsigned int flags) | |
490 | { | |
491 | might_sleep(); | |
492 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
493 | } | |
494 | ||
1da177e4 LT |
495 | /* |
496 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | |
497 | * Never use this directly! | |
498 | */ | |
b3c97528 | 499 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 | 500 | |
f62e00cc KM |
501 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
502 | ||
503 | static inline int wait_on_page_locked_killable(struct page *page) | |
504 | { | |
505 | if (PageLocked(page)) | |
506 | return wait_on_page_bit_killable(page, PG_locked); | |
507 | return 0; | |
508 | } | |
509 | ||
1da177e4 LT |
510 | /* |
511 | * Wait for a page to be unlocked. | |
512 | * | |
513 | * This must be called with the caller "holding" the page, | |
514 | * ie with increased "page->count" so that the page won't | |
515 | * go away during the wait.. | |
516 | */ | |
517 | static inline void wait_on_page_locked(struct page *page) | |
518 | { | |
519 | if (PageLocked(page)) | |
520 | wait_on_page_bit(page, PG_locked); | |
521 | } | |
522 | ||
523 | /* | |
524 | * Wait for a page to complete writeback | |
525 | */ | |
526 | static inline void wait_on_page_writeback(struct page *page) | |
527 | { | |
528 | if (PageWriteback(page)) | |
529 | wait_on_page_bit(page, PG_writeback); | |
530 | } | |
531 | ||
532 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 533 | void wait_for_stable_page(struct page *page); |
1da177e4 | 534 | |
57d99845 MW |
535 | void page_endio(struct page *page, int rw, int err); |
536 | ||
385e1ca5 DH |
537 | /* |
538 | * Add an arbitrary waiter to a page's wait queue | |
539 | */ | |
540 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
541 | ||
1da177e4 LT |
542 | /* |
543 | * Fault a userspace page into pagetables. Return non-zero on a fault. | |
544 | * | |
545 | * This assumes that two userspace pages are always sufficient. That's | |
546 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | |
547 | */ | |
548 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
549 | { | |
550 | int ret; | |
551 | ||
08291429 NP |
552 | if (unlikely(size == 0)) |
553 | return 0; | |
554 | ||
1da177e4 LT |
555 | /* |
556 | * Writing zeroes into userspace here is OK, because we know that if | |
557 | * the zero gets there, we'll be overwriting it. | |
558 | */ | |
559 | ret = __put_user(0, uaddr); | |
560 | if (ret == 0) { | |
561 | char __user *end = uaddr + size - 1; | |
562 | ||
563 | /* | |
564 | * If the page was already mapped, this will get a cache miss | |
565 | * for sure, so try to avoid doing it. | |
566 | */ | |
567 | if (((unsigned long)uaddr & PAGE_MASK) != | |
568 | ((unsigned long)end & PAGE_MASK)) | |
f56f821f | 569 | ret = __put_user(0, end); |
1da177e4 LT |
570 | } |
571 | return ret; | |
572 | } | |
573 | ||
08291429 | 574 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
575 | { |
576 | volatile char c; | |
577 | int ret; | |
578 | ||
08291429 NP |
579 | if (unlikely(size == 0)) |
580 | return 0; | |
581 | ||
1da177e4 LT |
582 | ret = __get_user(c, uaddr); |
583 | if (ret == 0) { | |
584 | const char __user *end = uaddr + size - 1; | |
585 | ||
586 | if (((unsigned long)uaddr & PAGE_MASK) != | |
627295e4 | 587 | ((unsigned long)end & PAGE_MASK)) { |
f56f821f | 588 | ret = __get_user(c, end); |
627295e4 AK |
589 | (void)c; |
590 | } | |
1da177e4 | 591 | } |
08291429 | 592 | return ret; |
1da177e4 LT |
593 | } |
594 | ||
f56f821f DV |
595 | /* |
596 | * Multipage variants of the above prefault helpers, useful if more than | |
597 | * PAGE_SIZE of data needs to be prefaulted. These are separate from the above | |
598 | * functions (which only handle up to PAGE_SIZE) to avoid clobbering the | |
599 | * filemap.c hotpaths. | |
600 | */ | |
601 | static inline int fault_in_multipages_writeable(char __user *uaddr, int size) | |
602 | { | |
af2e8409 | 603 | int ret = 0; |
9923777d | 604 | char __user *end = uaddr + size - 1; |
f56f821f DV |
605 | |
606 | if (unlikely(size == 0)) | |
af2e8409 | 607 | return ret; |
f56f821f DV |
608 | |
609 | /* | |
610 | * Writing zeroes into userspace here is OK, because we know that if | |
611 | * the zero gets there, we'll be overwriting it. | |
612 | */ | |
613 | while (uaddr <= end) { | |
614 | ret = __put_user(0, uaddr); | |
615 | if (ret != 0) | |
616 | return ret; | |
617 | uaddr += PAGE_SIZE; | |
618 | } | |
619 | ||
620 | /* Check whether the range spilled into the next page. */ | |
621 | if (((unsigned long)uaddr & PAGE_MASK) == | |
622 | ((unsigned long)end & PAGE_MASK)) | |
623 | ret = __put_user(0, end); | |
624 | ||
625 | return ret; | |
626 | } | |
627 | ||
628 | static inline int fault_in_multipages_readable(const char __user *uaddr, | |
629 | int size) | |
630 | { | |
631 | volatile char c; | |
af2e8409 | 632 | int ret = 0; |
f56f821f DV |
633 | const char __user *end = uaddr + size - 1; |
634 | ||
635 | if (unlikely(size == 0)) | |
af2e8409 | 636 | return ret; |
f56f821f DV |
637 | |
638 | while (uaddr <= end) { | |
639 | ret = __get_user(c, uaddr); | |
640 | if (ret != 0) | |
641 | return ret; | |
642 | uaddr += PAGE_SIZE; | |
643 | } | |
644 | ||
645 | /* Check whether the range spilled into the next page. */ | |
646 | if (((unsigned long)uaddr & PAGE_MASK) == | |
647 | ((unsigned long)end & PAGE_MASK)) { | |
648 | ret = __get_user(c, end); | |
649 | (void)c; | |
650 | } | |
651 | ||
652 | return ret; | |
653 | } | |
654 | ||
529ae9aa NP |
655 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
656 | pgoff_t index, gfp_t gfp_mask); | |
657 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
658 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 659 | extern void delete_from_page_cache(struct page *page); |
91b0abe3 | 660 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 661 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
529ae9aa NP |
662 | |
663 | /* | |
664 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
f45840b5 | 665 | * the page is new, so we can just run __set_page_locked() against it. |
529ae9aa NP |
666 | */ |
667 | static inline int add_to_page_cache(struct page *page, | |
668 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
669 | { | |
670 | int error; | |
671 | ||
f45840b5 | 672 | __set_page_locked(page); |
529ae9aa NP |
673 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
674 | if (unlikely(error)) | |
f45840b5 | 675 | __clear_page_locked(page); |
529ae9aa NP |
676 | return error; |
677 | } | |
678 | ||
1da177e4 | 679 | #endif /* _LINUX_PAGEMAP_H */ |