]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/pagemap.h
Merge branch 'akpm' (patches from Andrew)
[mirror_ubuntu-artful-kernel.git] / include / linux / pagemap.h
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <asm/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17
18 /*
19 * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
20 * allocation mode flags.
21 */
22 enum mapping_flags {
23 AS_EIO = __GFP_BITS_SHIFT + 0, /* IO error on async write */
24 AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */
25 AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */
26 AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */
27 AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */
28 /* writeback related tags are not used */
29 AS_NO_WRITEBACK_TAGS = __GFP_BITS_SHIFT + 5,
30 };
31
32 static inline void mapping_set_error(struct address_space *mapping, int error)
33 {
34 if (unlikely(error)) {
35 if (error == -ENOSPC)
36 set_bit(AS_ENOSPC, &mapping->flags);
37 else
38 set_bit(AS_EIO, &mapping->flags);
39 }
40 }
41
42 static inline void mapping_set_unevictable(struct address_space *mapping)
43 {
44 set_bit(AS_UNEVICTABLE, &mapping->flags);
45 }
46
47 static inline void mapping_clear_unevictable(struct address_space *mapping)
48 {
49 clear_bit(AS_UNEVICTABLE, &mapping->flags);
50 }
51
52 static inline int mapping_unevictable(struct address_space *mapping)
53 {
54 if (mapping)
55 return test_bit(AS_UNEVICTABLE, &mapping->flags);
56 return !!mapping;
57 }
58
59 static inline void mapping_set_exiting(struct address_space *mapping)
60 {
61 set_bit(AS_EXITING, &mapping->flags);
62 }
63
64 static inline int mapping_exiting(struct address_space *mapping)
65 {
66 return test_bit(AS_EXITING, &mapping->flags);
67 }
68
69 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
70 {
71 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
72 }
73
74 static inline int mapping_use_writeback_tags(struct address_space *mapping)
75 {
76 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
77 }
78
79 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
80 {
81 return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
82 }
83
84 /* Restricts the given gfp_mask to what the mapping allows. */
85 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
86 gfp_t gfp_mask)
87 {
88 return mapping_gfp_mask(mapping) & gfp_mask;
89 }
90
91 /*
92 * This is non-atomic. Only to be used before the mapping is activated.
93 * Probably needs a barrier...
94 */
95 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
96 {
97 m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
98 (__force unsigned long)mask;
99 }
100
101 void release_pages(struct page **pages, int nr, bool cold);
102
103 /*
104 * speculatively take a reference to a page.
105 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
106 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
107 *
108 * This function must be called inside the same rcu_read_lock() section as has
109 * been used to lookup the page in the pagecache radix-tree (or page table):
110 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
111 *
112 * Unless an RCU grace period has passed, the count of all pages coming out
113 * of the allocator must be considered unstable. page_count may return higher
114 * than expected, and put_page must be able to do the right thing when the
115 * page has been finished with, no matter what it is subsequently allocated
116 * for (because put_page is what is used here to drop an invalid speculative
117 * reference).
118 *
119 * This is the interesting part of the lockless pagecache (and lockless
120 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
121 * has the following pattern:
122 * 1. find page in radix tree
123 * 2. conditionally increment refcount
124 * 3. check the page is still in pagecache (if no, goto 1)
125 *
126 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
127 * following (with tree_lock held for write):
128 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
129 * B. remove page from pagecache
130 * C. free the page
131 *
132 * There are 2 critical interleavings that matter:
133 * - 2 runs before A: in this case, A sees elevated refcount and bails out
134 * - A runs before 2: in this case, 2 sees zero refcount and retries;
135 * subsequently, B will complete and 1 will find no page, causing the
136 * lookup to return NULL.
137 *
138 * It is possible that between 1 and 2, the page is removed then the exact same
139 * page is inserted into the same position in pagecache. That's OK: the
140 * old find_get_page using tree_lock could equally have run before or after
141 * such a re-insertion, depending on order that locks are granted.
142 *
143 * Lookups racing against pagecache insertion isn't a big problem: either 1
144 * will find the page or it will not. Likewise, the old find_get_page could run
145 * either before the insertion or afterwards, depending on timing.
146 */
147 static inline int page_cache_get_speculative(struct page *page)
148 {
149 VM_BUG_ON(in_interrupt());
150
151 #ifdef CONFIG_TINY_RCU
152 # ifdef CONFIG_PREEMPT_COUNT
153 VM_BUG_ON(!in_atomic());
154 # endif
155 /*
156 * Preempt must be disabled here - we rely on rcu_read_lock doing
157 * this for us.
158 *
159 * Pagecache won't be truncated from interrupt context, so if we have
160 * found a page in the radix tree here, we have pinned its refcount by
161 * disabling preempt, and hence no need for the "speculative get" that
162 * SMP requires.
163 */
164 VM_BUG_ON_PAGE(page_count(page) == 0, page);
165 page_ref_inc(page);
166
167 #else
168 if (unlikely(!get_page_unless_zero(page))) {
169 /*
170 * Either the page has been freed, or will be freed.
171 * In either case, retry here and the caller should
172 * do the right thing (see comments above).
173 */
174 return 0;
175 }
176 #endif
177 VM_BUG_ON_PAGE(PageTail(page), page);
178
179 return 1;
180 }
181
182 /*
183 * Same as above, but add instead of inc (could just be merged)
184 */
185 static inline int page_cache_add_speculative(struct page *page, int count)
186 {
187 VM_BUG_ON(in_interrupt());
188
189 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
190 # ifdef CONFIG_PREEMPT_COUNT
191 VM_BUG_ON(!in_atomic());
192 # endif
193 VM_BUG_ON_PAGE(page_count(page) == 0, page);
194 page_ref_add(page, count);
195
196 #else
197 if (unlikely(!page_ref_add_unless(page, count, 0)))
198 return 0;
199 #endif
200 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
201
202 return 1;
203 }
204
205 #ifdef CONFIG_NUMA
206 extern struct page *__page_cache_alloc(gfp_t gfp);
207 #else
208 static inline struct page *__page_cache_alloc(gfp_t gfp)
209 {
210 return alloc_pages(gfp, 0);
211 }
212 #endif
213
214 static inline struct page *page_cache_alloc(struct address_space *x)
215 {
216 return __page_cache_alloc(mapping_gfp_mask(x));
217 }
218
219 static inline struct page *page_cache_alloc_cold(struct address_space *x)
220 {
221 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
222 }
223
224 static inline gfp_t readahead_gfp_mask(struct address_space *x)
225 {
226 return mapping_gfp_mask(x) |
227 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
228 }
229
230 typedef int filler_t(void *, struct page *);
231
232 pgoff_t page_cache_next_hole(struct address_space *mapping,
233 pgoff_t index, unsigned long max_scan);
234 pgoff_t page_cache_prev_hole(struct address_space *mapping,
235 pgoff_t index, unsigned long max_scan);
236
237 #define FGP_ACCESSED 0x00000001
238 #define FGP_LOCK 0x00000002
239 #define FGP_CREAT 0x00000004
240 #define FGP_WRITE 0x00000008
241 #define FGP_NOFS 0x00000010
242 #define FGP_NOWAIT 0x00000020
243
244 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
245 int fgp_flags, gfp_t cache_gfp_mask);
246
247 /**
248 * find_get_page - find and get a page reference
249 * @mapping: the address_space to search
250 * @offset: the page index
251 *
252 * Looks up the page cache slot at @mapping & @offset. If there is a
253 * page cache page, it is returned with an increased refcount.
254 *
255 * Otherwise, %NULL is returned.
256 */
257 static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259 {
260 return pagecache_get_page(mapping, offset, 0, 0);
261 }
262
263 static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265 {
266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
267 }
268
269 /**
270 * find_lock_page - locate, pin and lock a pagecache page
271 * pagecache_get_page - find and get a page reference
272 * @mapping: the address_space to search
273 * @offset: the page index
274 *
275 * Looks up the page cache slot at @mapping & @offset. If there is a
276 * page cache page, it is returned locked and with an increased
277 * refcount.
278 *
279 * Otherwise, %NULL is returned.
280 *
281 * find_lock_page() may sleep.
282 */
283 static inline struct page *find_lock_page(struct address_space *mapping,
284 pgoff_t offset)
285 {
286 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
287 }
288
289 /**
290 * find_or_create_page - locate or add a pagecache page
291 * @mapping: the page's address_space
292 * @index: the page's index into the mapping
293 * @gfp_mask: page allocation mode
294 *
295 * Looks up the page cache slot at @mapping & @offset. If there is a
296 * page cache page, it is returned locked and with an increased
297 * refcount.
298 *
299 * If the page is not present, a new page is allocated using @gfp_mask
300 * and added to the page cache and the VM's LRU list. The page is
301 * returned locked and with an increased refcount.
302 *
303 * On memory exhaustion, %NULL is returned.
304 *
305 * find_or_create_page() may sleep, even if @gfp_flags specifies an
306 * atomic allocation!
307 */
308 static inline struct page *find_or_create_page(struct address_space *mapping,
309 pgoff_t offset, gfp_t gfp_mask)
310 {
311 return pagecache_get_page(mapping, offset,
312 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
313 gfp_mask);
314 }
315
316 /**
317 * grab_cache_page_nowait - returns locked page at given index in given cache
318 * @mapping: target address_space
319 * @index: the page index
320 *
321 * Same as grab_cache_page(), but do not wait if the page is unavailable.
322 * This is intended for speculative data generators, where the data can
323 * be regenerated if the page couldn't be grabbed. This routine should
324 * be safe to call while holding the lock for another page.
325 *
326 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
327 * and deadlock against the caller's locked page.
328 */
329 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
330 pgoff_t index)
331 {
332 return pagecache_get_page(mapping, index,
333 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
334 mapping_gfp_mask(mapping));
335 }
336
337 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
338 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
339 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
340 unsigned int nr_entries, struct page **entries,
341 pgoff_t *indices);
342 unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
343 unsigned int nr_pages, struct page **pages);
344 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
345 unsigned int nr_pages, struct page **pages);
346 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
347 int tag, unsigned int nr_pages, struct page **pages);
348 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
349 int tag, unsigned int nr_entries,
350 struct page **entries, pgoff_t *indices);
351
352 struct page *grab_cache_page_write_begin(struct address_space *mapping,
353 pgoff_t index, unsigned flags);
354
355 /*
356 * Returns locked page at given index in given cache, creating it if needed.
357 */
358 static inline struct page *grab_cache_page(struct address_space *mapping,
359 pgoff_t index)
360 {
361 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
362 }
363
364 extern struct page * read_cache_page(struct address_space *mapping,
365 pgoff_t index, filler_t *filler, void *data);
366 extern struct page * read_cache_page_gfp(struct address_space *mapping,
367 pgoff_t index, gfp_t gfp_mask);
368 extern int read_cache_pages(struct address_space *mapping,
369 struct list_head *pages, filler_t *filler, void *data);
370
371 static inline struct page *read_mapping_page(struct address_space *mapping,
372 pgoff_t index, void *data)
373 {
374 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
375 return read_cache_page(mapping, index, filler, data);
376 }
377
378 /*
379 * Get the offset in PAGE_SIZE.
380 * (TODO: hugepage should have ->index in PAGE_SIZE)
381 */
382 static inline pgoff_t page_to_pgoff(struct page *page)
383 {
384 pgoff_t pgoff;
385
386 if (unlikely(PageHeadHuge(page)))
387 return page->index << compound_order(page);
388
389 if (likely(!PageTransTail(page)))
390 return page->index;
391
392 /*
393 * We don't initialize ->index for tail pages: calculate based on
394 * head page
395 */
396 pgoff = compound_head(page)->index;
397 pgoff += page - compound_head(page);
398 return pgoff;
399 }
400
401 /*
402 * Return byte-offset into filesystem object for page.
403 */
404 static inline loff_t page_offset(struct page *page)
405 {
406 return ((loff_t)page->index) << PAGE_SHIFT;
407 }
408
409 static inline loff_t page_file_offset(struct page *page)
410 {
411 return ((loff_t)page_index(page)) << PAGE_SHIFT;
412 }
413
414 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
415 unsigned long address);
416
417 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
418 unsigned long address)
419 {
420 pgoff_t pgoff;
421 if (unlikely(is_vm_hugetlb_page(vma)))
422 return linear_hugepage_index(vma, address);
423 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
424 pgoff += vma->vm_pgoff;
425 return pgoff;
426 }
427
428 extern void __lock_page(struct page *page);
429 extern int __lock_page_killable(struct page *page);
430 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
431 unsigned int flags);
432 extern void unlock_page(struct page *page);
433
434 static inline int trylock_page(struct page *page)
435 {
436 page = compound_head(page);
437 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
438 }
439
440 /*
441 * lock_page may only be called if we have the page's inode pinned.
442 */
443 static inline void lock_page(struct page *page)
444 {
445 might_sleep();
446 if (!trylock_page(page))
447 __lock_page(page);
448 }
449
450 /*
451 * lock_page_killable is like lock_page but can be interrupted by fatal
452 * signals. It returns 0 if it locked the page and -EINTR if it was
453 * killed while waiting.
454 */
455 static inline int lock_page_killable(struct page *page)
456 {
457 might_sleep();
458 if (!trylock_page(page))
459 return __lock_page_killable(page);
460 return 0;
461 }
462
463 /*
464 * lock_page_or_retry - Lock the page, unless this would block and the
465 * caller indicated that it can handle a retry.
466 *
467 * Return value and mmap_sem implications depend on flags; see
468 * __lock_page_or_retry().
469 */
470 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
471 unsigned int flags)
472 {
473 might_sleep();
474 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
475 }
476
477 /*
478 * This is exported only for wait_on_page_locked/wait_on_page_writeback,
479 * and for filesystems which need to wait on PG_private.
480 */
481 extern void wait_on_page_bit(struct page *page, int bit_nr);
482
483 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
484 extern int wait_on_page_bit_killable_timeout(struct page *page,
485 int bit_nr, unsigned long timeout);
486
487 static inline int wait_on_page_locked_killable(struct page *page)
488 {
489 if (!PageLocked(page))
490 return 0;
491 return wait_on_page_bit_killable(compound_head(page), PG_locked);
492 }
493
494 extern wait_queue_head_t *page_waitqueue(struct page *page);
495 static inline void wake_up_page(struct page *page, int bit)
496 {
497 __wake_up_bit(page_waitqueue(page), &page->flags, bit);
498 }
499
500 /*
501 * Wait for a page to be unlocked.
502 *
503 * This must be called with the caller "holding" the page,
504 * ie with increased "page->count" so that the page won't
505 * go away during the wait..
506 */
507 static inline void wait_on_page_locked(struct page *page)
508 {
509 if (PageLocked(page))
510 wait_on_page_bit(compound_head(page), PG_locked);
511 }
512
513 /*
514 * Wait for a page to complete writeback
515 */
516 static inline void wait_on_page_writeback(struct page *page)
517 {
518 if (PageWriteback(page))
519 wait_on_page_bit(page, PG_writeback);
520 }
521
522 extern void end_page_writeback(struct page *page);
523 void wait_for_stable_page(struct page *page);
524
525 void page_endio(struct page *page, bool is_write, int err);
526
527 /*
528 * Add an arbitrary waiter to a page's wait queue
529 */
530 extern void add_page_wait_queue(struct page *page, wait_queue_t *waiter);
531
532 /*
533 * Fault one or two userspace pages into pagetables.
534 * Return -EINVAL if more than two pages would be needed.
535 * Return non-zero on a fault.
536 */
537 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
538 {
539 int span, ret;
540
541 if (unlikely(size == 0))
542 return 0;
543
544 span = offset_in_page(uaddr) + size;
545 if (span > 2 * PAGE_SIZE)
546 return -EINVAL;
547 /*
548 * Writing zeroes into userspace here is OK, because we know that if
549 * the zero gets there, we'll be overwriting it.
550 */
551 ret = __put_user(0, uaddr);
552 if (ret == 0 && span > PAGE_SIZE)
553 ret = __put_user(0, uaddr + size - 1);
554 return ret;
555 }
556
557 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
558 {
559 volatile char c;
560 int ret;
561
562 if (unlikely(size == 0))
563 return 0;
564
565 ret = __get_user(c, uaddr);
566 if (ret == 0) {
567 const char __user *end = uaddr + size - 1;
568
569 if (((unsigned long)uaddr & PAGE_MASK) !=
570 ((unsigned long)end & PAGE_MASK)) {
571 ret = __get_user(c, end);
572 (void)c;
573 }
574 }
575 return ret;
576 }
577
578 /*
579 * Multipage variants of the above prefault helpers, useful if more than
580 * PAGE_SIZE of data needs to be prefaulted. These are separate from the above
581 * functions (which only handle up to PAGE_SIZE) to avoid clobbering the
582 * filemap.c hotpaths.
583 */
584 static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
585 {
586 char __user *end = uaddr + size - 1;
587
588 if (unlikely(size == 0))
589 return 0;
590
591 if (unlikely(uaddr > end))
592 return -EFAULT;
593 /*
594 * Writing zeroes into userspace here is OK, because we know that if
595 * the zero gets there, we'll be overwriting it.
596 */
597 do {
598 if (unlikely(__put_user(0, uaddr) != 0))
599 return -EFAULT;
600 uaddr += PAGE_SIZE;
601 } while (uaddr <= end);
602
603 /* Check whether the range spilled into the next page. */
604 if (((unsigned long)uaddr & PAGE_MASK) ==
605 ((unsigned long)end & PAGE_MASK))
606 return __put_user(0, end);
607
608 return 0;
609 }
610
611 static inline int fault_in_multipages_readable(const char __user *uaddr,
612 int size)
613 {
614 volatile char c;
615 const char __user *end = uaddr + size - 1;
616
617 if (unlikely(size == 0))
618 return 0;
619
620 if (unlikely(uaddr > end))
621 return -EFAULT;
622
623 do {
624 if (unlikely(__get_user(c, uaddr) != 0))
625 return -EFAULT;
626 uaddr += PAGE_SIZE;
627 } while (uaddr <= end);
628
629 /* Check whether the range spilled into the next page. */
630 if (((unsigned long)uaddr & PAGE_MASK) ==
631 ((unsigned long)end & PAGE_MASK)) {
632 return __get_user(c, end);
633 }
634
635 (void)c;
636 return 0;
637 }
638
639 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
640 pgoff_t index, gfp_t gfp_mask);
641 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
642 pgoff_t index, gfp_t gfp_mask);
643 extern void delete_from_page_cache(struct page *page);
644 extern void __delete_from_page_cache(struct page *page, void *shadow);
645 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
646
647 /*
648 * Like add_to_page_cache_locked, but used to add newly allocated pages:
649 * the page is new, so we can just run __SetPageLocked() against it.
650 */
651 static inline int add_to_page_cache(struct page *page,
652 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
653 {
654 int error;
655
656 __SetPageLocked(page);
657 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
658 if (unlikely(error))
659 __ClearPageLocked(page);
660 return error;
661 }
662
663 static inline unsigned long dir_pages(struct inode *inode)
664 {
665 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
666 PAGE_SHIFT;
667 }
668
669 #endif /* _LINUX_PAGEMAP_H */