]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | #ifndef _LINUX_PAGEMAP_H |
2 | #define _LINUX_PAGEMAP_H | |
3 | ||
4 | /* | |
5 | * Copyright 1995 Linus Torvalds | |
6 | */ | |
7 | #include <linux/mm.h> | |
8 | #include <linux/fs.h> | |
9 | #include <linux/list.h> | |
10 | #include <linux/highmem.h> | |
11 | #include <linux/compiler.h> | |
12 | #include <asm/uaccess.h> | |
13 | #include <linux/gfp.h> | |
3e9f45bd | 14 | #include <linux/bitops.h> |
e286781d | 15 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 16 | #include <linux/hugetlb_inline.h> |
1da177e4 LT |
17 | |
18 | /* | |
19 | * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page | |
20 | * allocation mode flags. | |
21 | */ | |
9a896c9a LS |
22 | enum mapping_flags { |
23 | AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */ | |
24 | AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ | |
25 | AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ | |
9a896c9a | 26 | AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ |
9a896c9a | 27 | }; |
1da177e4 | 28 | |
3e9f45bd GC |
29 | static inline void mapping_set_error(struct address_space *mapping, int error) |
30 | { | |
2185e69f | 31 | if (unlikely(error)) { |
3e9f45bd GC |
32 | if (error == -ENOSPC) |
33 | set_bit(AS_ENOSPC, &mapping->flags); | |
34 | else | |
35 | set_bit(AS_EIO, &mapping->flags); | |
36 | } | |
37 | } | |
38 | ||
ba9ddf49 LS |
39 | static inline void mapping_set_unevictable(struct address_space *mapping) |
40 | { | |
41 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
42 | } | |
43 | ||
89e004ea LS |
44 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
45 | { | |
46 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
47 | } | |
48 | ||
ba9ddf49 LS |
49 | static inline int mapping_unevictable(struct address_space *mapping) |
50 | { | |
89e004ea LS |
51 | if (likely(mapping)) |
52 | return test_bit(AS_UNEVICTABLE, &mapping->flags); | |
53 | return !!mapping; | |
ba9ddf49 | 54 | } |
ba9ddf49 | 55 | |
dd0fc66f | 56 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 57 | { |
260b2367 | 58 | return (__force gfp_t)mapping->flags & __GFP_BITS_MASK; |
1da177e4 LT |
59 | } |
60 | ||
61 | /* | |
62 | * This is non-atomic. Only to be used before the mapping is activated. | |
63 | * Probably needs a barrier... | |
64 | */ | |
260b2367 | 65 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 66 | { |
260b2367 AV |
67 | m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) | |
68 | (__force unsigned long)mask; | |
1da177e4 LT |
69 | } |
70 | ||
71 | /* | |
72 | * The page cache can done in larger chunks than | |
73 | * one page, because it allows for more efficient | |
74 | * throughput (it can then be mapped into user | |
75 | * space in smaller chunks for same flexibility). | |
76 | * | |
77 | * Or rather, it _will_ be done in larger chunks. | |
78 | */ | |
79 | #define PAGE_CACHE_SHIFT PAGE_SHIFT | |
80 | #define PAGE_CACHE_SIZE PAGE_SIZE | |
81 | #define PAGE_CACHE_MASK PAGE_MASK | |
82 | #define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK) | |
83 | ||
84 | #define page_cache_get(page) get_page(page) | |
85 | #define page_cache_release(page) put_page(page) | |
86 | void release_pages(struct page **pages, int nr, int cold); | |
87 | ||
e286781d NP |
88 | /* |
89 | * speculatively take a reference to a page. | |
90 | * If the page is free (_count == 0), then _count is untouched, and 0 | |
91 | * is returned. Otherwise, _count is incremented by 1 and 1 is returned. | |
92 | * | |
93 | * This function must be called inside the same rcu_read_lock() section as has | |
94 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
95 | * this allows allocators to use a synchronize_rcu() to stabilize _count. | |
96 | * | |
97 | * Unless an RCU grace period has passed, the count of all pages coming out | |
98 | * of the allocator must be considered unstable. page_count may return higher | |
99 | * than expected, and put_page must be able to do the right thing when the | |
100 | * page has been finished with, no matter what it is subsequently allocated | |
101 | * for (because put_page is what is used here to drop an invalid speculative | |
102 | * reference). | |
103 | * | |
104 | * This is the interesting part of the lockless pagecache (and lockless | |
105 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
106 | * has the following pattern: | |
107 | * 1. find page in radix tree | |
108 | * 2. conditionally increment refcount | |
109 | * 3. check the page is still in pagecache (if no, goto 1) | |
110 | * | |
111 | * Remove-side that cares about stability of _count (eg. reclaim) has the | |
112 | * following (with tree_lock held for write): | |
113 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) | |
114 | * B. remove page from pagecache | |
115 | * C. free the page | |
116 | * | |
117 | * There are 2 critical interleavings that matter: | |
118 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
119 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
120 | * subsequently, B will complete and 1 will find no page, causing the | |
121 | * lookup to return NULL. | |
122 | * | |
123 | * It is possible that between 1 and 2, the page is removed then the exact same | |
124 | * page is inserted into the same position in pagecache. That's OK: the | |
125 | * old find_get_page using tree_lock could equally have run before or after | |
126 | * such a re-insertion, depending on order that locks are granted. | |
127 | * | |
128 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
129 | * will find the page or it will not. Likewise, the old find_get_page could run | |
130 | * either before the insertion or afterwards, depending on timing. | |
131 | */ | |
132 | static inline int page_cache_get_speculative(struct page *page) | |
133 | { | |
134 | VM_BUG_ON(in_interrupt()); | |
135 | ||
b560d8ad | 136 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
e286781d NP |
137 | # ifdef CONFIG_PREEMPT |
138 | VM_BUG_ON(!in_atomic()); | |
139 | # endif | |
140 | /* | |
141 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
142 | * this for us. | |
143 | * | |
144 | * Pagecache won't be truncated from interrupt context, so if we have | |
145 | * found a page in the radix tree here, we have pinned its refcount by | |
146 | * disabling preempt, and hence no need for the "speculative get" that | |
147 | * SMP requires. | |
148 | */ | |
149 | VM_BUG_ON(page_count(page) == 0); | |
150 | atomic_inc(&page->_count); | |
151 | ||
152 | #else | |
153 | if (unlikely(!get_page_unless_zero(page))) { | |
154 | /* | |
155 | * Either the page has been freed, or will be freed. | |
156 | * In either case, retry here and the caller should | |
157 | * do the right thing (see comments above). | |
158 | */ | |
159 | return 0; | |
160 | } | |
161 | #endif | |
162 | VM_BUG_ON(PageTail(page)); | |
163 | ||
164 | return 1; | |
165 | } | |
166 | ||
ce0ad7f0 NP |
167 | /* |
168 | * Same as above, but add instead of inc (could just be merged) | |
169 | */ | |
170 | static inline int page_cache_add_speculative(struct page *page, int count) | |
171 | { | |
172 | VM_BUG_ON(in_interrupt()); | |
173 | ||
b560d8ad | 174 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) |
ce0ad7f0 NP |
175 | # ifdef CONFIG_PREEMPT |
176 | VM_BUG_ON(!in_atomic()); | |
177 | # endif | |
178 | VM_BUG_ON(page_count(page) == 0); | |
179 | atomic_add(count, &page->_count); | |
180 | ||
181 | #else | |
182 | if (unlikely(!atomic_add_unless(&page->_count, count, 0))) | |
183 | return 0; | |
184 | #endif | |
185 | VM_BUG_ON(PageCompound(page) && page != compound_head(page)); | |
186 | ||
187 | return 1; | |
188 | } | |
189 | ||
e286781d NP |
190 | static inline int page_freeze_refs(struct page *page, int count) |
191 | { | |
192 | return likely(atomic_cmpxchg(&page->_count, count, 0) == count); | |
193 | } | |
194 | ||
195 | static inline void page_unfreeze_refs(struct page *page, int count) | |
196 | { | |
197 | VM_BUG_ON(page_count(page) != 0); | |
198 | VM_BUG_ON(count == 0); | |
199 | ||
200 | atomic_set(&page->_count, count); | |
201 | } | |
202 | ||
44110fe3 | 203 | #ifdef CONFIG_NUMA |
2ae88149 | 204 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 205 | #else |
2ae88149 NP |
206 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
207 | { | |
208 | return alloc_pages(gfp, 0); | |
209 | } | |
210 | #endif | |
211 | ||
1da177e4 LT |
212 | static inline struct page *page_cache_alloc(struct address_space *x) |
213 | { | |
2ae88149 | 214 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
215 | } |
216 | ||
217 | static inline struct page *page_cache_alloc_cold(struct address_space *x) | |
218 | { | |
2ae88149 | 219 | return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD); |
1da177e4 LT |
220 | } |
221 | ||
222 | typedef int filler_t(void *, struct page *); | |
223 | ||
224 | extern struct page * find_get_page(struct address_space *mapping, | |
57f6b96c | 225 | pgoff_t index); |
1da177e4 | 226 | extern struct page * find_lock_page(struct address_space *mapping, |
57f6b96c | 227 | pgoff_t index); |
1da177e4 | 228 | extern struct page * find_or_create_page(struct address_space *mapping, |
57f6b96c | 229 | pgoff_t index, gfp_t gfp_mask); |
1da177e4 LT |
230 | unsigned find_get_pages(struct address_space *mapping, pgoff_t start, |
231 | unsigned int nr_pages, struct page **pages); | |
ebf43500 JA |
232 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
233 | unsigned int nr_pages, struct page **pages); | |
1da177e4 LT |
234 | unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index, |
235 | int tag, unsigned int nr_pages, struct page **pages); | |
236 | ||
54566b2c NP |
237 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
238 | pgoff_t index, unsigned flags); | |
afddba49 | 239 | |
1da177e4 LT |
240 | /* |
241 | * Returns locked page at given index in given cache, creating it if needed. | |
242 | */ | |
57f6b96c FW |
243 | static inline struct page *grab_cache_page(struct address_space *mapping, |
244 | pgoff_t index) | |
1da177e4 LT |
245 | { |
246 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
247 | } | |
248 | ||
249 | extern struct page * grab_cache_page_nowait(struct address_space *mapping, | |
57f6b96c | 250 | pgoff_t index); |
6fe6900e | 251 | extern struct page * read_cache_page_async(struct address_space *mapping, |
57f6b96c | 252 | pgoff_t index, filler_t *filler, |
6fe6900e | 253 | void *data); |
1da177e4 | 254 | extern struct page * read_cache_page(struct address_space *mapping, |
57f6b96c | 255 | pgoff_t index, filler_t *filler, |
1da177e4 | 256 | void *data); |
0531b2aa LT |
257 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
258 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
259 | extern int read_cache_pages(struct address_space *mapping, |
260 | struct list_head *pages, filler_t *filler, void *data); | |
261 | ||
6fe6900e NP |
262 | static inline struct page *read_mapping_page_async( |
263 | struct address_space *mapping, | |
57f6b96c | 264 | pgoff_t index, void *data) |
6fe6900e NP |
265 | { |
266 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
267 | return read_cache_page_async(mapping, index, filler, data); | |
268 | } | |
269 | ||
090d2b18 | 270 | static inline struct page *read_mapping_page(struct address_space *mapping, |
57f6b96c | 271 | pgoff_t index, void *data) |
090d2b18 PE |
272 | { |
273 | filler_t *filler = (filler_t *)mapping->a_ops->readpage; | |
274 | return read_cache_page(mapping, index, filler, data); | |
275 | } | |
276 | ||
1da177e4 LT |
277 | /* |
278 | * Return byte-offset into filesystem object for page. | |
279 | */ | |
280 | static inline loff_t page_offset(struct page *page) | |
281 | { | |
282 | return ((loff_t)page->index) << PAGE_CACHE_SHIFT; | |
283 | } | |
284 | ||
0fe6e20b NH |
285 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
286 | unsigned long address); | |
287 | ||
1da177e4 LT |
288 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
289 | unsigned long address) | |
290 | { | |
0fe6e20b NH |
291 | pgoff_t pgoff; |
292 | if (unlikely(is_vm_hugetlb_page(vma))) | |
293 | return linear_hugepage_index(vma, address); | |
294 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 LT |
295 | pgoff += vma->vm_pgoff; |
296 | return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT); | |
297 | } | |
298 | ||
b3c97528 HH |
299 | extern void __lock_page(struct page *page); |
300 | extern int __lock_page_killable(struct page *page); | |
301 | extern void __lock_page_nosync(struct page *page); | |
d065bd81 ML |
302 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
303 | unsigned int flags); | |
b3c97528 | 304 | extern void unlock_page(struct page *page); |
1da177e4 | 305 | |
f45840b5 | 306 | static inline void __set_page_locked(struct page *page) |
529ae9aa | 307 | { |
f45840b5 | 308 | __set_bit(PG_locked, &page->flags); |
529ae9aa NP |
309 | } |
310 | ||
f45840b5 | 311 | static inline void __clear_page_locked(struct page *page) |
529ae9aa | 312 | { |
f45840b5 | 313 | __clear_bit(PG_locked, &page->flags); |
529ae9aa NP |
314 | } |
315 | ||
316 | static inline int trylock_page(struct page *page) | |
317 | { | |
8413ac9d | 318 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
319 | } |
320 | ||
db37648c NP |
321 | /* |
322 | * lock_page may only be called if we have the page's inode pinned. | |
323 | */ | |
1da177e4 LT |
324 | static inline void lock_page(struct page *page) |
325 | { | |
326 | might_sleep(); | |
529ae9aa | 327 | if (!trylock_page(page)) |
1da177e4 LT |
328 | __lock_page(page); |
329 | } | |
db37648c | 330 | |
2687a356 MW |
331 | /* |
332 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
333 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
334 | * killed while waiting. | |
335 | */ | |
336 | static inline int lock_page_killable(struct page *page) | |
337 | { | |
338 | might_sleep(); | |
529ae9aa | 339 | if (!trylock_page(page)) |
2687a356 MW |
340 | return __lock_page_killable(page); |
341 | return 0; | |
342 | } | |
343 | ||
db37648c NP |
344 | /* |
345 | * lock_page_nosync should only be used if we can't pin the page's inode. | |
346 | * Doesn't play quite so well with block device plugging. | |
347 | */ | |
348 | static inline void lock_page_nosync(struct page *page) | |
349 | { | |
350 | might_sleep(); | |
529ae9aa | 351 | if (!trylock_page(page)) |
db37648c NP |
352 | __lock_page_nosync(page); |
353 | } | |
1da177e4 | 354 | |
d065bd81 ML |
355 | /* |
356 | * lock_page_or_retry - Lock the page, unless this would block and the | |
357 | * caller indicated that it can handle a retry. | |
358 | */ | |
359 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
360 | unsigned int flags) | |
361 | { | |
362 | might_sleep(); | |
363 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
364 | } | |
365 | ||
1da177e4 LT |
366 | /* |
367 | * This is exported only for wait_on_page_locked/wait_on_page_writeback. | |
368 | * Never use this directly! | |
369 | */ | |
b3c97528 | 370 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
1da177e4 LT |
371 | |
372 | /* | |
373 | * Wait for a page to be unlocked. | |
374 | * | |
375 | * This must be called with the caller "holding" the page, | |
376 | * ie with increased "page->count" so that the page won't | |
377 | * go away during the wait.. | |
378 | */ | |
379 | static inline void wait_on_page_locked(struct page *page) | |
380 | { | |
381 | if (PageLocked(page)) | |
382 | wait_on_page_bit(page, PG_locked); | |
383 | } | |
384 | ||
385 | /* | |
386 | * Wait for a page to complete writeback | |
387 | */ | |
388 | static inline void wait_on_page_writeback(struct page *page) | |
389 | { | |
390 | if (PageWriteback(page)) | |
391 | wait_on_page_bit(page, PG_writeback); | |
392 | } | |
393 | ||
394 | extern void end_page_writeback(struct page *page); | |
395 | ||
385e1ca5 DH |
396 | /* |
397 | * Add an arbitrary waiter to a page's wait queue | |
398 | */ | |
399 | extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter); | |
400 | ||
1da177e4 LT |
401 | /* |
402 | * Fault a userspace page into pagetables. Return non-zero on a fault. | |
403 | * | |
404 | * This assumes that two userspace pages are always sufficient. That's | |
405 | * not true if PAGE_CACHE_SIZE > PAGE_SIZE. | |
406 | */ | |
407 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
408 | { | |
409 | int ret; | |
410 | ||
08291429 NP |
411 | if (unlikely(size == 0)) |
412 | return 0; | |
413 | ||
1da177e4 LT |
414 | /* |
415 | * Writing zeroes into userspace here is OK, because we know that if | |
416 | * the zero gets there, we'll be overwriting it. | |
417 | */ | |
418 | ret = __put_user(0, uaddr); | |
419 | if (ret == 0) { | |
420 | char __user *end = uaddr + size - 1; | |
421 | ||
422 | /* | |
423 | * If the page was already mapped, this will get a cache miss | |
424 | * for sure, so try to avoid doing it. | |
425 | */ | |
426 | if (((unsigned long)uaddr & PAGE_MASK) != | |
427 | ((unsigned long)end & PAGE_MASK)) | |
428 | ret = __put_user(0, end); | |
429 | } | |
430 | return ret; | |
431 | } | |
432 | ||
08291429 | 433 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
1da177e4 LT |
434 | { |
435 | volatile char c; | |
436 | int ret; | |
437 | ||
08291429 NP |
438 | if (unlikely(size == 0)) |
439 | return 0; | |
440 | ||
1da177e4 LT |
441 | ret = __get_user(c, uaddr); |
442 | if (ret == 0) { | |
443 | const char __user *end = uaddr + size - 1; | |
444 | ||
445 | if (((unsigned long)uaddr & PAGE_MASK) != | |
627295e4 | 446 | ((unsigned long)end & PAGE_MASK)) { |
08291429 | 447 | ret = __get_user(c, end); |
627295e4 AK |
448 | (void)c; |
449 | } | |
1da177e4 | 450 | } |
08291429 | 451 | return ret; |
1da177e4 LT |
452 | } |
453 | ||
529ae9aa NP |
454 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
455 | pgoff_t index, gfp_t gfp_mask); | |
456 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
457 | pgoff_t index, gfp_t gfp_mask); | |
458 | extern void remove_from_page_cache(struct page *page); | |
459 | extern void __remove_from_page_cache(struct page *page); | |
460 | ||
461 | /* | |
462 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
f45840b5 | 463 | * the page is new, so we can just run __set_page_locked() against it. |
529ae9aa NP |
464 | */ |
465 | static inline int add_to_page_cache(struct page *page, | |
466 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
467 | { | |
468 | int error; | |
469 | ||
f45840b5 | 470 | __set_page_locked(page); |
529ae9aa NP |
471 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
472 | if (unlikely(error)) | |
f45840b5 | 473 | __clear_page_locked(page); |
529ae9aa NP |
474 | return error; |
475 | } | |
476 | ||
1da177e4 | 477 | #endif /* _LINUX_PAGEMAP_H */ |