]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 16 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
17 | |
18 | /* | |
9c5d760b | 19 | * Bits in mapping->flags. |
1da177e4 | 20 | */ |
9a896c9a | 21 | enum mapping_flags { |
9c5d760b MH |
22 | AS_EIO = 0, /* IO error on async write */ |
23 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
24 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
25 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
26 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 27 | /* writeback related tags are not used */ |
9c5d760b | 28 | AS_NO_WRITEBACK_TAGS = 5, |
9a896c9a | 29 | }; |
1da177e4 | 30 | |
3e9f45bd GC |
31 | static inline void mapping_set_error(struct address_space *mapping, int error) |
32 | { | |
2185e69f | 33 | if (unlikely(error)) { |
3e9f45bd GC |
34 | if (error == -ENOSPC) |
35 | set_bit(AS_ENOSPC, &mapping->flags); | |
36 | else | |
37 | set_bit(AS_EIO, &mapping->flags); | |
38 | } | |
39 | } | |
40 | ||
ba9ddf49 LS |
41 | static inline void mapping_set_unevictable(struct address_space *mapping) |
42 | { | |
43 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
44 | } | |
45 | ||
89e004ea LS |
46 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
47 | { | |
48 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
49 | } | |
50 | ||
ba9ddf49 LS |
51 | static inline int mapping_unevictable(struct address_space *mapping) |
52 | { | |
088e5465 | 53 | if (mapping) |
89e004ea LS |
54 | return test_bit(AS_UNEVICTABLE, &mapping->flags); |
55 | return !!mapping; | |
ba9ddf49 | 56 | } |
ba9ddf49 | 57 | |
91b0abe3 JW |
58 | static inline void mapping_set_exiting(struct address_space *mapping) |
59 | { | |
60 | set_bit(AS_EXITING, &mapping->flags); | |
61 | } | |
62 | ||
63 | static inline int mapping_exiting(struct address_space *mapping) | |
64 | { | |
65 | return test_bit(AS_EXITING, &mapping->flags); | |
66 | } | |
67 | ||
371a096e HY |
68 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
69 | { | |
70 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
71 | } | |
72 | ||
73 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
74 | { | |
75 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
76 | } | |
77 | ||
dd0fc66f | 78 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 79 | { |
9c5d760b | 80 | return mapping->gfp_mask; |
1da177e4 LT |
81 | } |
82 | ||
c62d2555 MH |
83 | /* Restricts the given gfp_mask to what the mapping allows. */ |
84 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
85 | gfp_t gfp_mask) | |
86 | { | |
87 | return mapping_gfp_mask(mapping) & gfp_mask; | |
88 | } | |
89 | ||
1da177e4 LT |
90 | /* |
91 | * This is non-atomic. Only to be used before the mapping is activated. | |
92 | * Probably needs a barrier... | |
93 | */ | |
260b2367 | 94 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 95 | { |
9c5d760b | 96 | m->gfp_mask = mask; |
1da177e4 LT |
97 | } |
98 | ||
b745bc85 | 99 | void release_pages(struct page **pages, int nr, bool cold); |
1da177e4 | 100 | |
e286781d NP |
101 | /* |
102 | * speculatively take a reference to a page. | |
0139aa7b JK |
103 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
104 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
105 | * |
106 | * This function must be called inside the same rcu_read_lock() section as has | |
107 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 108 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
109 | * |
110 | * Unless an RCU grace period has passed, the count of all pages coming out | |
111 | * of the allocator must be considered unstable. page_count may return higher | |
112 | * than expected, and put_page must be able to do the right thing when the | |
113 | * page has been finished with, no matter what it is subsequently allocated | |
114 | * for (because put_page is what is used here to drop an invalid speculative | |
115 | * reference). | |
116 | * | |
117 | * This is the interesting part of the lockless pagecache (and lockless | |
118 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
119 | * has the following pattern: | |
120 | * 1. find page in radix tree | |
121 | * 2. conditionally increment refcount | |
122 | * 3. check the page is still in pagecache (if no, goto 1) | |
123 | * | |
0139aa7b | 124 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
e286781d NP |
125 | * following (with tree_lock held for write): |
126 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
127 | * B. remove page from pagecache | |
128 | * C. free the page | |
129 | * | |
130 | * There are 2 critical interleavings that matter: | |
131 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
132 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
133 | * subsequently, B will complete and 1 will find no page, causing the | |
134 | * lookup to return NULL. | |
135 | * | |
136 | * It is possible that between 1 and 2, the page is removed then the exact same | |
137 | * page is inserted into the same position in pagecache. That's OK: the | |
138 | * old find_get_page using tree_lock could equally have run before or after | |
139 | * such a re-insertion, depending on order that locks are granted. | |
140 | * | |
141 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
142 | * will find the page or it will not. Likewise, the old find_get_page could run | |
143 | * either before the insertion or afterwards, depending on timing. | |
144 | */ | |
145 | static inline int page_cache_get_speculative(struct page *page) | |
146 | { | |
147 | VM_BUG_ON(in_interrupt()); | |
148 | ||
8375ad98 | 149 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 150 | # ifdef CONFIG_PREEMPT_COUNT |
e286781d NP |
151 | VM_BUG_ON(!in_atomic()); |
152 | # endif | |
153 | /* | |
154 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
155 | * this for us. | |
156 | * | |
157 | * Pagecache won't be truncated from interrupt context, so if we have | |
158 | * found a page in the radix tree here, we have pinned its refcount by | |
159 | * disabling preempt, and hence no need for the "speculative get" that | |
160 | * SMP requires. | |
161 | */ | |
309381fe | 162 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 163 | page_ref_inc(page); |
e286781d NP |
164 | |
165 | #else | |
166 | if (unlikely(!get_page_unless_zero(page))) { | |
167 | /* | |
168 | * Either the page has been freed, or will be freed. | |
169 | * In either case, retry here and the caller should | |
170 | * do the right thing (see comments above). | |
171 | */ | |
172 | return 0; | |
173 | } | |
174 | #endif | |
309381fe | 175 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
176 | |
177 | return 1; | |
178 | } | |
179 | ||
ce0ad7f0 NP |
180 | /* |
181 | * Same as above, but add instead of inc (could just be merged) | |
182 | */ | |
183 | static inline int page_cache_add_speculative(struct page *page, int count) | |
184 | { | |
185 | VM_BUG_ON(in_interrupt()); | |
186 | ||
b560d8ad | 187 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
bdd4e85d | 188 | # ifdef CONFIG_PREEMPT_COUNT |
ce0ad7f0 NP |
189 | VM_BUG_ON(!in_atomic()); |
190 | # endif | |
309381fe | 191 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
fe896d18 | 192 | page_ref_add(page, count); |
ce0ad7f0 NP |
193 | |
194 | #else | |
fe896d18 | 195 | if (unlikely(!page_ref_add_unless(page, count, 0))) |
ce0ad7f0 NP |
196 | return 0; |
197 | #endif | |
309381fe | 198 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); |
ce0ad7f0 NP |
199 | |
200 | return 1; | |
201 | } | |
202 | ||
44110fe3 | 203 | #ifdef CONFIG_NUMA |
2ae88149 | 204 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 205 | #else |
2ae88149 NP |
206 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
207 | { | |
208 | return alloc_pages(gfp, 0); | |
209 | } | |
210 | #endif | |
211 | ||
1da177e4 LT |
212 | static inline struct page *page_cache_alloc(struct address_space *x) |
213 | { | |
2ae88149 | 214 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
215 | } |
216 | ||
217 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
218 | { | |
2ae88149 | 219 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
220 | } |
221 | ||
8a5c743e | 222 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 223 | { |
8a5c743e MH |
224 | return mapping_gfp_mask(x) | |
225 | __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN; | |
7b1de586 WF |
226 | } |
227 | ||
1da177e4 LT |
228 | typedef int filler_t(void *, struct page *); |
229 | ||
e7b563bb JW |
230 | pgoff_t page_cache_next_hole(struct address_space *mapping, |
231 | pgoff_t index, unsigned long max_scan); | |
232 | pgoff_t page_cache_prev_hole(struct address_space *mapping, | |
233 | pgoff_t index, unsigned long max_scan); | |
234 | ||
2457aec6 MG |
235 | #define FGP_ACCESSED 0x00000001 |
236 | #define FGP_LOCK 0x00000002 | |
237 | #define FGP_CREAT 0x00000004 | |
238 | #define FGP_WRITE 0x00000008 | |
239 | #define FGP_NOFS 0x00000010 | |
240 | #define FGP_NOWAIT 0x00000020 | |
241 | ||
242 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 243 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
244 | |
245 | /** | |
246 | * find_get_page - find and get a page reference | |
247 | * @mapping: the address_space to search | |
248 | * @offset: the page index | |
249 | * | |
250 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
251 | * page cache page, it is returned with an increased refcount. | |
252 | * | |
253 | * Otherwise, %NULL is returned. | |
254 | */ | |
255 | static inline struct page *find_get_page(struct address_space *mapping, | |
256 | pgoff_t offset) | |
257 | { | |
45f87de5 | 258 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
259 | } |
260 | ||
261 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
262 | pgoff_t offset, int fgp_flags) | |
263 | { | |
45f87de5 | 264 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
265 | } |
266 | ||
267 | /** | |
268 | * find_lock_page - locate, pin and lock a pagecache page | |
269 | * pagecache_get_page - find and get a page reference | |
270 | * @mapping: the address_space to search | |
271 | * @offset: the page index | |
272 | * | |
273 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
274 | * page cache page, it is returned locked and with an increased | |
275 | * refcount. | |
276 | * | |
277 | * Otherwise, %NULL is returned. | |
278 | * | |
279 | * find_lock_page() may sleep. | |
280 | */ | |
281 | static inline struct page *find_lock_page(struct address_space *mapping, | |
282 | pgoff_t offset) | |
283 | { | |
45f87de5 | 284 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
285 | } |
286 | ||
287 | /** | |
288 | * find_or_create_page - locate or add a pagecache page | |
289 | * @mapping: the page's address_space | |
290 | * @index: the page's index into the mapping | |
291 | * @gfp_mask: page allocation mode | |
292 | * | |
293 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
294 | * page cache page, it is returned locked and with an increased | |
295 | * refcount. | |
296 | * | |
297 | * If the page is not present, a new page is allocated using @gfp_mask | |
298 | * and added to the page cache and the VM's LRU list. The page is | |
299 | * returned locked and with an increased refcount. | |
300 | * | |
301 | * On memory exhaustion, %NULL is returned. | |
302 | * | |
303 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
304 | * atomic allocation! | |
305 | */ | |
306 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
307 | pgoff_t offset, gfp_t gfp_mask) | |
308 | { | |
309 | return pagecache_get_page(mapping, offset, | |
310 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, | |
45f87de5 | 311 | gfp_mask); |
2457aec6 MG |
312 | } |
313 | ||
314 | /** | |
315 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
316 | * @mapping: target address_space | |
317 | * @index: the page index | |
318 | * | |
319 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
320 | * This is intended for speculative data generators, where the data can | |
321 | * be regenerated if the page couldn't be grabbed. This routine should | |
322 | * be safe to call while holding the lock for another page. | |
323 | * | |
324 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
325 | * and deadlock against the caller's locked page. | |
326 | */ | |
327 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
328 | pgoff_t index) | |
329 | { | |
330 | return pagecache_get_page(mapping, index, | |
331 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 332 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
333 | } |
334 | ||
0cd6144a | 335 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 336 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
337 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
338 | unsigned int nr_entries, struct page **entries, | |
339 | pgoff_t *indices); | |
1da177e4 LT |
340 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
341 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
342 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
343 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
344 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
345 | int tag, unsigned int nr_pages, struct page **pages); | |
7e7f7749 RZ |
346 | unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start, |
347 | int tag, unsigned int nr_entries, | |
348 | struct page **entries, pgoff_t *indices); | |
1da177e4 | 349 | |
54566b2c NP |
350 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
351 | pgoff_t index, unsigned flags); | |
afddba49 | 352 | |
1da177e4 LT |
353 | /* |
354 | * Returns locked page at given index in given cache, creating it if needed. | |
355 | */ | |
57f6b96c FW |
356 | static inline struct page *grab_cache_page(struct address_space *mapping, |
357 | pgoff_t index) | |
1da177e4 LT |
358 | { |
359 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
360 | } | |
361 | ||
1da177e4 | 362 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 363 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
364 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
365 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
366 | extern int read_cache_pages(struct address_space *mapping, |
367 | struct list_head *pages, filler_t *filler, void *data); | |
368 | ||
090d2b18 | 369 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 370 | pgoff_t index, void *data) |
090d2b18 PE |
371 | { |
372 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
373 | return read_cache_page(mapping, index, filler, data); | |
374 | } | |
375 | ||
a0f7a756 NH |
376 | /* |
377 | * Get the offset in PAGE_SIZE. | |
378 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
379 | */ | |
380 | static inline pgoff_t page_to_pgoff(struct page *page) | |
381 | { | |
e9b61f19 KS |
382 | pgoff_t pgoff; |
383 | ||
a0f7a756 NH |
384 | if (unlikely(PageHeadHuge(page))) |
385 | return page->index << compound_order(page); | |
e9b61f19 KS |
386 | |
387 | if (likely(!PageTransTail(page))) | |
09cbfeaf | 388 | return page->index; |
e9b61f19 KS |
389 | |
390 | /* | |
391 | * We don't initialize ->index for tail pages: calculate based on | |
392 | * head page | |
393 | */ | |
09cbfeaf | 394 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
395 | pgoff += page - compound_head(page); |
396 | return pgoff; | |
a0f7a756 NH |
397 | } |
398 | ||
1da177e4 LT |
399 | /* |
400 | * Return byte-offset into filesystem object for page. | |
401 | */ | |
402 | static inline loff_t page_offset(struct page *page) | |
403 | { | |
09cbfeaf | 404 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
405 | } |
406 | ||
f981c595 MG |
407 | static inline loff_t page_file_offset(struct page *page) |
408 | { | |
8cd79788 | 409 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
410 | } |
411 | ||
0fe6e20b NH |
412 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
413 | unsigned long address); | |
414 | ||
1da177e4 LT |
415 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
416 | unsigned long address) | |
417 | { | |
0fe6e20b NH |
418 | pgoff_t pgoff; |
419 | if (unlikely(is_vm_hugetlb_page(vma))) | |
420 | return linear_hugepage_index(vma, address); | |
421 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 422 | pgoff += vma->vm_pgoff; |
09cbfeaf | 423 | return pgoff; |
1da177e4 LT |
424 | } |
425 | ||
b3c97528 HH |
426 | extern void __lock_page(struct page *page); |
427 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
428 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
429 | unsigned int flags); | |
b3c97528 | 430 | extern void unlock_page(struct page *page); |
1da177e4 | 431 | |
529ae9aa NP |
432 | static inline int trylock_page(struct page *page) |
433 | { | |
48c935ad | 434 | page = compound_head(page); |
8413ac9d | 435 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
436 | } |
437 | ||
db37648c NP |
438 | /* |
439 | * lock_page may only be called if we have the page's inode pinned. | |
440 | */ | |
1da177e4 LT |
441 | static inline void lock_page(struct page *page) |
442 | { | |
443 | might_sleep(); | |
529ae9aa | 444 | if (!trylock_page(page)) |
1da177e4 LT |
445 | __lock_page(page); |
446 | } | |
db37648c | 447 | |
2687a356 MW |
448 | /* |
449 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
450 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
451 | * killed while waiting. | |
452 | */ | |
453 | static inline int lock_page_killable(struct page *page) | |
454 | { | |
455 | might_sleep(); | |
529ae9aa | 456 | if (!trylock_page(page)) |
2687a356 MW |
457 | return __lock_page_killable(page); |
458 | return 0; | |
459 | } | |
460 | ||
d065bd81 ML |
461 | /* |
462 | * lock_page_or_retry - Lock the page, unless this would block and the | |
463 | * caller indicated that it can handle a retry. | |
9a95f3cf PC |
464 | * |
465 | * Return value and mmap_sem implications depend on flags; see | |
466 | * __lock_page_or_retry(). | |
d065bd81 ML |
467 | */ |
468 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
469 | unsigned int flags) | |
470 | { | |
471 | might_sleep(); | |
472 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
473 | } | |
474 | ||
1da177e4 | 475 | /* |
a4796e37 N |
476 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, |
477 | * and for filesystems which need to wait on PG_private. | |
1da177e4 | 478 | */ |
b3c97528 | 479 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 | 480 | |
f62e00cc | 481 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
cbbce822 N |
482 | extern int wait_on_page_bit_killable_timeout(struct page *page, |
483 | int bit_nr, unsigned long timeout); | |
f62e00cc KM |
484 | |
485 | static inline int wait_on_page_locked_killable(struct page *page) | |
486 | { | |
48c935ad KS |
487 | if (!PageLocked(page)) |
488 | return 0; | |
489 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
f62e00cc KM |
490 | } |
491 | ||
a4796e37 N |
492 | extern wait_queue_head_t *page_waitqueue(struct page *page); |
493 | static inline void wake_up_page(struct page *page, int bit) | |
494 | { | |
495 | __wake_up_bit(page_waitqueue(page), &page->flags, bit); | |
496 | } | |
497 | ||
1da177e4 LT |
498 | /* |
499 | * Wait for a page to be unlocked. | |
500 | * | |
501 | * This must be called with the caller "holding" the page, | |
502 | * ie with increased "page->count" so that the page won't | |
503 | * go away during the wait.. | |
504 | */ | |
505 | static inline void wait_on_page_locked(struct page *page) | |
506 | { | |
507 | if (PageLocked(page)) | |
48c935ad | 508 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
509 | } |
510 | ||
511 | /* | |
512 | * Wait for a page to complete writeback | |
513 | */ | |
514 | static inline void wait_on_page_writeback(struct page *page) | |
515 | { | |
516 | if (PageWriteback(page)) | |
517 | wait_on_page_bit(page, PG_writeback); | |
518 | } | |
519 | ||
520 | extern void end_page_writeback(struct page *page); | |
1d1d1a76 | 521 | void wait_for_stable_page(struct page *page); |
1da177e4 | 522 | |
c11f0c0b | 523 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 524 | |
385e1ca5 DH |
525 | /* |
526 | * Add an arbitrary waiter to a page's wait queue | |
527 | */ | |
528 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
529 | ||
1da177e4 | 530 | /* |
4bce9f6e | 531 | * Fault everything in given userspace address range in. |
1da177e4 LT |
532 | */ |
533 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 534 | { |
9923777d | 535 | char __user *end = uaddr + size - 1; |
f56f821f DV |
536 | |
537 | if (unlikely(size == 0)) | |
e23d4159 | 538 | return 0; |
f56f821f | 539 | |
e23d4159 AV |
540 | if (unlikely(uaddr > end)) |
541 | return -EFAULT; | |
f56f821f DV |
542 | /* |
543 | * Writing zeroes into userspace here is OK, because we know that if | |
544 | * the zero gets there, we'll be overwriting it. | |
545 | */ | |
e23d4159 AV |
546 | do { |
547 | if (unlikely(__put_user(0, uaddr) != 0)) | |
548 | return -EFAULT; | |
f56f821f | 549 | uaddr += PAGE_SIZE; |
e23d4159 | 550 | } while (uaddr <= end); |
f56f821f DV |
551 | |
552 | /* Check whether the range spilled into the next page. */ | |
553 | if (((unsigned long)uaddr & PAGE_MASK) == | |
554 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 555 | return __put_user(0, end); |
f56f821f | 556 | |
e23d4159 | 557 | return 0; |
f56f821f DV |
558 | } |
559 | ||
4bce9f6e | 560 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
561 | { |
562 | volatile char c; | |
f56f821f DV |
563 | const char __user *end = uaddr + size - 1; |
564 | ||
565 | if (unlikely(size == 0)) | |
e23d4159 | 566 | return 0; |
f56f821f | 567 | |
e23d4159 AV |
568 | if (unlikely(uaddr > end)) |
569 | return -EFAULT; | |
570 | ||
571 | do { | |
572 | if (unlikely(__get_user(c, uaddr) != 0)) | |
573 | return -EFAULT; | |
f56f821f | 574 | uaddr += PAGE_SIZE; |
e23d4159 | 575 | } while (uaddr <= end); |
f56f821f DV |
576 | |
577 | /* Check whether the range spilled into the next page. */ | |
578 | if (((unsigned long)uaddr & PAGE_MASK) == | |
579 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 580 | return __get_user(c, end); |
f56f821f DV |
581 | } |
582 | ||
90b75db6 | 583 | (void)c; |
e23d4159 | 584 | return 0; |
f56f821f DV |
585 | } |
586 | ||
529ae9aa NP |
587 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
588 | pgoff_t index, gfp_t gfp_mask); | |
589 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
590 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 591 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 592 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 593 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
529ae9aa NP |
594 | |
595 | /* | |
596 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 597 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
598 | */ |
599 | static inline int add_to_page_cache(struct page *page, | |
600 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
601 | { | |
602 | int error; | |
603 | ||
48c935ad | 604 | __SetPageLocked(page); |
529ae9aa NP |
605 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
606 | if (unlikely(error)) | |
48c935ad | 607 | __ClearPageLocked(page); |
529ae9aa NP |
608 | return error; |
609 | } | |
610 | ||
b57c2cb9 FF |
611 | static inline unsigned long dir_pages(struct inode *inode) |
612 | { | |
09cbfeaf KS |
613 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
614 | PAGE_SHIFT; | |
b57c2cb9 FF |
615 | } |
616 | ||
1da177e4 | 617 | #endif /* _LINUX_PAGEMAP_H */ |