]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - include/linux/pagemap.h
PM / OPP: Support updating performance state of device's power domain
[mirror_ubuntu-bionic-kernel.git] / include / linux / pagemap.h
1 #ifndef _LINUX_PAGEMAP_H
2 #define _LINUX_PAGEMAP_H
3
4 /*
5 * Copyright 1995 Linus Torvalds
6 */
7 #include <linux/mm.h>
8 #include <linux/fs.h>
9 #include <linux/list.h>
10 #include <linux/highmem.h>
11 #include <linux/compiler.h>
12 #include <linux/uaccess.h>
13 #include <linux/gfp.h>
14 #include <linux/bitops.h>
15 #include <linux/hardirq.h> /* for in_interrupt() */
16 #include <linux/hugetlb_inline.h>
17
18 /*
19 * Bits in mapping->flags.
20 */
21 enum mapping_flags {
22 AS_EIO = 0, /* IO error on async write */
23 AS_ENOSPC = 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
25 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
26 AS_EXITING = 4, /* final truncate in progress */
27 /* writeback related tags are not used */
28 AS_NO_WRITEBACK_TAGS = 5,
29 };
30
31 /**
32 * mapping_set_error - record a writeback error in the address_space
33 * @mapping - the mapping in which an error should be set
34 * @error - the error to set in the mapping
35 *
36 * When writeback fails in some way, we must record that error so that
37 * userspace can be informed when fsync and the like are called. We endeavor
38 * to report errors on any file that was open at the time of the error. Some
39 * internal callers also need to know when writeback errors have occurred.
40 *
41 * When a writeback error occurs, most filesystems will want to call
42 * mapping_set_error to record the error in the mapping so that it can be
43 * reported when the application calls fsync(2).
44 */
45 static inline void mapping_set_error(struct address_space *mapping, int error)
46 {
47 if (likely(!error))
48 return;
49
50 /* Record in wb_err for checkers using errseq_t based tracking */
51 filemap_set_wb_err(mapping, error);
52
53 /* Record it in flags for now, for legacy callers */
54 if (error == -ENOSPC)
55 set_bit(AS_ENOSPC, &mapping->flags);
56 else
57 set_bit(AS_EIO, &mapping->flags);
58 }
59
60 static inline void mapping_set_unevictable(struct address_space *mapping)
61 {
62 set_bit(AS_UNEVICTABLE, &mapping->flags);
63 }
64
65 static inline void mapping_clear_unevictable(struct address_space *mapping)
66 {
67 clear_bit(AS_UNEVICTABLE, &mapping->flags);
68 }
69
70 static inline int mapping_unevictable(struct address_space *mapping)
71 {
72 if (mapping)
73 return test_bit(AS_UNEVICTABLE, &mapping->flags);
74 return !!mapping;
75 }
76
77 static inline void mapping_set_exiting(struct address_space *mapping)
78 {
79 set_bit(AS_EXITING, &mapping->flags);
80 }
81
82 static inline int mapping_exiting(struct address_space *mapping)
83 {
84 return test_bit(AS_EXITING, &mapping->flags);
85 }
86
87 static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
88 {
89 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
90 }
91
92 static inline int mapping_use_writeback_tags(struct address_space *mapping)
93 {
94 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
95 }
96
97 static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
98 {
99 return mapping->gfp_mask;
100 }
101
102 /* Restricts the given gfp_mask to what the mapping allows. */
103 static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
104 gfp_t gfp_mask)
105 {
106 return mapping_gfp_mask(mapping) & gfp_mask;
107 }
108
109 /*
110 * This is non-atomic. Only to be used before the mapping is activated.
111 * Probably needs a barrier...
112 */
113 static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
114 {
115 m->gfp_mask = mask;
116 }
117
118 void release_pages(struct page **pages, int nr, bool cold);
119
120 /*
121 * speculatively take a reference to a page.
122 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
123 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
124 *
125 * This function must be called inside the same rcu_read_lock() section as has
126 * been used to lookup the page in the pagecache radix-tree (or page table):
127 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
128 *
129 * Unless an RCU grace period has passed, the count of all pages coming out
130 * of the allocator must be considered unstable. page_count may return higher
131 * than expected, and put_page must be able to do the right thing when the
132 * page has been finished with, no matter what it is subsequently allocated
133 * for (because put_page is what is used here to drop an invalid speculative
134 * reference).
135 *
136 * This is the interesting part of the lockless pagecache (and lockless
137 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
138 * has the following pattern:
139 * 1. find page in radix tree
140 * 2. conditionally increment refcount
141 * 3. check the page is still in pagecache (if no, goto 1)
142 *
143 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
144 * following (with tree_lock held for write):
145 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
146 * B. remove page from pagecache
147 * C. free the page
148 *
149 * There are 2 critical interleavings that matter:
150 * - 2 runs before A: in this case, A sees elevated refcount and bails out
151 * - A runs before 2: in this case, 2 sees zero refcount and retries;
152 * subsequently, B will complete and 1 will find no page, causing the
153 * lookup to return NULL.
154 *
155 * It is possible that between 1 and 2, the page is removed then the exact same
156 * page is inserted into the same position in pagecache. That's OK: the
157 * old find_get_page using tree_lock could equally have run before or after
158 * such a re-insertion, depending on order that locks are granted.
159 *
160 * Lookups racing against pagecache insertion isn't a big problem: either 1
161 * will find the page or it will not. Likewise, the old find_get_page could run
162 * either before the insertion or afterwards, depending on timing.
163 */
164 static inline int page_cache_get_speculative(struct page *page)
165 {
166 #ifdef CONFIG_TINY_RCU
167 # ifdef CONFIG_PREEMPT_COUNT
168 VM_BUG_ON(!in_atomic() && !irqs_disabled());
169 # endif
170 /*
171 * Preempt must be disabled here - we rely on rcu_read_lock doing
172 * this for us.
173 *
174 * Pagecache won't be truncated from interrupt context, so if we have
175 * found a page in the radix tree here, we have pinned its refcount by
176 * disabling preempt, and hence no need for the "speculative get" that
177 * SMP requires.
178 */
179 VM_BUG_ON_PAGE(page_count(page) == 0, page);
180 page_ref_inc(page);
181
182 #else
183 if (unlikely(!get_page_unless_zero(page))) {
184 /*
185 * Either the page has been freed, or will be freed.
186 * In either case, retry here and the caller should
187 * do the right thing (see comments above).
188 */
189 return 0;
190 }
191 #endif
192 VM_BUG_ON_PAGE(PageTail(page), page);
193
194 return 1;
195 }
196
197 /*
198 * Same as above, but add instead of inc (could just be merged)
199 */
200 static inline int page_cache_add_speculative(struct page *page, int count)
201 {
202 VM_BUG_ON(in_interrupt());
203
204 #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
205 # ifdef CONFIG_PREEMPT_COUNT
206 VM_BUG_ON(!in_atomic() && !irqs_disabled());
207 # endif
208 VM_BUG_ON_PAGE(page_count(page) == 0, page);
209 page_ref_add(page, count);
210
211 #else
212 if (unlikely(!page_ref_add_unless(page, count, 0)))
213 return 0;
214 #endif
215 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
216
217 return 1;
218 }
219
220 #ifdef CONFIG_NUMA
221 extern struct page *__page_cache_alloc(gfp_t gfp);
222 #else
223 static inline struct page *__page_cache_alloc(gfp_t gfp)
224 {
225 return alloc_pages(gfp, 0);
226 }
227 #endif
228
229 static inline struct page *page_cache_alloc(struct address_space *x)
230 {
231 return __page_cache_alloc(mapping_gfp_mask(x));
232 }
233
234 static inline struct page *page_cache_alloc_cold(struct address_space *x)
235 {
236 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
237 }
238
239 static inline gfp_t readahead_gfp_mask(struct address_space *x)
240 {
241 return mapping_gfp_mask(x) |
242 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
243 }
244
245 typedef int filler_t(void *, struct page *);
246
247 pgoff_t page_cache_next_hole(struct address_space *mapping,
248 pgoff_t index, unsigned long max_scan);
249 pgoff_t page_cache_prev_hole(struct address_space *mapping,
250 pgoff_t index, unsigned long max_scan);
251
252 #define FGP_ACCESSED 0x00000001
253 #define FGP_LOCK 0x00000002
254 #define FGP_CREAT 0x00000004
255 #define FGP_WRITE 0x00000008
256 #define FGP_NOFS 0x00000010
257 #define FGP_NOWAIT 0x00000020
258
259 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
260 int fgp_flags, gfp_t cache_gfp_mask);
261
262 /**
263 * find_get_page - find and get a page reference
264 * @mapping: the address_space to search
265 * @offset: the page index
266 *
267 * Looks up the page cache slot at @mapping & @offset. If there is a
268 * page cache page, it is returned with an increased refcount.
269 *
270 * Otherwise, %NULL is returned.
271 */
272 static inline struct page *find_get_page(struct address_space *mapping,
273 pgoff_t offset)
274 {
275 return pagecache_get_page(mapping, offset, 0, 0);
276 }
277
278 static inline struct page *find_get_page_flags(struct address_space *mapping,
279 pgoff_t offset, int fgp_flags)
280 {
281 return pagecache_get_page(mapping, offset, fgp_flags, 0);
282 }
283
284 /**
285 * find_lock_page - locate, pin and lock a pagecache page
286 * @mapping: the address_space to search
287 * @offset: the page index
288 *
289 * Looks up the page cache slot at @mapping & @offset. If there is a
290 * page cache page, it is returned locked and with an increased
291 * refcount.
292 *
293 * Otherwise, %NULL is returned.
294 *
295 * find_lock_page() may sleep.
296 */
297 static inline struct page *find_lock_page(struct address_space *mapping,
298 pgoff_t offset)
299 {
300 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
301 }
302
303 /**
304 * find_or_create_page - locate or add a pagecache page
305 * @mapping: the page's address_space
306 * @index: the page's index into the mapping
307 * @gfp_mask: page allocation mode
308 *
309 * Looks up the page cache slot at @mapping & @offset. If there is a
310 * page cache page, it is returned locked and with an increased
311 * refcount.
312 *
313 * If the page is not present, a new page is allocated using @gfp_mask
314 * and added to the page cache and the VM's LRU list. The page is
315 * returned locked and with an increased refcount.
316 *
317 * On memory exhaustion, %NULL is returned.
318 *
319 * find_or_create_page() may sleep, even if @gfp_flags specifies an
320 * atomic allocation!
321 */
322 static inline struct page *find_or_create_page(struct address_space *mapping,
323 pgoff_t offset, gfp_t gfp_mask)
324 {
325 return pagecache_get_page(mapping, offset,
326 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
327 gfp_mask);
328 }
329
330 /**
331 * grab_cache_page_nowait - returns locked page at given index in given cache
332 * @mapping: target address_space
333 * @index: the page index
334 *
335 * Same as grab_cache_page(), but do not wait if the page is unavailable.
336 * This is intended for speculative data generators, where the data can
337 * be regenerated if the page couldn't be grabbed. This routine should
338 * be safe to call while holding the lock for another page.
339 *
340 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
341 * and deadlock against the caller's locked page.
342 */
343 static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
344 pgoff_t index)
345 {
346 return pagecache_get_page(mapping, index,
347 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
348 mapping_gfp_mask(mapping));
349 }
350
351 struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
352 struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
353 unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_entries, struct page **entries,
355 pgoff_t *indices);
356 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
357 pgoff_t end, unsigned int nr_pages,
358 struct page **pages);
359 static inline unsigned find_get_pages(struct address_space *mapping,
360 pgoff_t *start, unsigned int nr_pages,
361 struct page **pages)
362 {
363 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
364 pages);
365 }
366 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
367 unsigned int nr_pages, struct page **pages);
368 unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
369 int tag, unsigned int nr_pages, struct page **pages);
370 unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
371 int tag, unsigned int nr_entries,
372 struct page **entries, pgoff_t *indices);
373
374 struct page *grab_cache_page_write_begin(struct address_space *mapping,
375 pgoff_t index, unsigned flags);
376
377 /*
378 * Returns locked page at given index in given cache, creating it if needed.
379 */
380 static inline struct page *grab_cache_page(struct address_space *mapping,
381 pgoff_t index)
382 {
383 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
384 }
385
386 extern struct page * read_cache_page(struct address_space *mapping,
387 pgoff_t index, filler_t *filler, void *data);
388 extern struct page * read_cache_page_gfp(struct address_space *mapping,
389 pgoff_t index, gfp_t gfp_mask);
390 extern int read_cache_pages(struct address_space *mapping,
391 struct list_head *pages, filler_t *filler, void *data);
392
393 static inline struct page *read_mapping_page(struct address_space *mapping,
394 pgoff_t index, void *data)
395 {
396 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
397 return read_cache_page(mapping, index, filler, data);
398 }
399
400 /*
401 * Get index of the page with in radix-tree
402 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
403 */
404 static inline pgoff_t page_to_index(struct page *page)
405 {
406 pgoff_t pgoff;
407
408 if (likely(!PageTransTail(page)))
409 return page->index;
410
411 /*
412 * We don't initialize ->index for tail pages: calculate based on
413 * head page
414 */
415 pgoff = compound_head(page)->index;
416 pgoff += page - compound_head(page);
417 return pgoff;
418 }
419
420 /*
421 * Get the offset in PAGE_SIZE.
422 * (TODO: hugepage should have ->index in PAGE_SIZE)
423 */
424 static inline pgoff_t page_to_pgoff(struct page *page)
425 {
426 if (unlikely(PageHeadHuge(page)))
427 return page->index << compound_order(page);
428
429 return page_to_index(page);
430 }
431
432 /*
433 * Return byte-offset into filesystem object for page.
434 */
435 static inline loff_t page_offset(struct page *page)
436 {
437 return ((loff_t)page->index) << PAGE_SHIFT;
438 }
439
440 static inline loff_t page_file_offset(struct page *page)
441 {
442 return ((loff_t)page_index(page)) << PAGE_SHIFT;
443 }
444
445 extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
446 unsigned long address);
447
448 static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
449 unsigned long address)
450 {
451 pgoff_t pgoff;
452 if (unlikely(is_vm_hugetlb_page(vma)))
453 return linear_hugepage_index(vma, address);
454 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
455 pgoff += vma->vm_pgoff;
456 return pgoff;
457 }
458
459 extern void __lock_page(struct page *page);
460 extern int __lock_page_killable(struct page *page);
461 extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
462 unsigned int flags);
463 extern void unlock_page(struct page *page);
464
465 static inline int trylock_page(struct page *page)
466 {
467 page = compound_head(page);
468 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
469 }
470
471 /*
472 * lock_page may only be called if we have the page's inode pinned.
473 */
474 static inline void lock_page(struct page *page)
475 {
476 might_sleep();
477 if (!trylock_page(page))
478 __lock_page(page);
479 }
480
481 /*
482 * lock_page_killable is like lock_page but can be interrupted by fatal
483 * signals. It returns 0 if it locked the page and -EINTR if it was
484 * killed while waiting.
485 */
486 static inline int lock_page_killable(struct page *page)
487 {
488 might_sleep();
489 if (!trylock_page(page))
490 return __lock_page_killable(page);
491 return 0;
492 }
493
494 /*
495 * lock_page_or_retry - Lock the page, unless this would block and the
496 * caller indicated that it can handle a retry.
497 *
498 * Return value and mmap_sem implications depend on flags; see
499 * __lock_page_or_retry().
500 */
501 static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
502 unsigned int flags)
503 {
504 might_sleep();
505 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
506 }
507
508 /*
509 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
510 * and should not be used directly.
511 */
512 extern void wait_on_page_bit(struct page *page, int bit_nr);
513 extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
514
515 /*
516 * Wait for a page to be unlocked.
517 *
518 * This must be called with the caller "holding" the page,
519 * ie with increased "page->count" so that the page won't
520 * go away during the wait..
521 */
522 static inline void wait_on_page_locked(struct page *page)
523 {
524 if (PageLocked(page))
525 wait_on_page_bit(compound_head(page), PG_locked);
526 }
527
528 static inline int wait_on_page_locked_killable(struct page *page)
529 {
530 if (!PageLocked(page))
531 return 0;
532 return wait_on_page_bit_killable(compound_head(page), PG_locked);
533 }
534
535 /*
536 * Wait for a page to complete writeback
537 */
538 static inline void wait_on_page_writeback(struct page *page)
539 {
540 if (PageWriteback(page))
541 wait_on_page_bit(page, PG_writeback);
542 }
543
544 extern void end_page_writeback(struct page *page);
545 void wait_for_stable_page(struct page *page);
546
547 void page_endio(struct page *page, bool is_write, int err);
548
549 /*
550 * Add an arbitrary waiter to a page's wait queue
551 */
552 extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
553
554 /*
555 * Fault everything in given userspace address range in.
556 */
557 static inline int fault_in_pages_writeable(char __user *uaddr, int size)
558 {
559 char __user *end = uaddr + size - 1;
560
561 if (unlikely(size == 0))
562 return 0;
563
564 if (unlikely(uaddr > end))
565 return -EFAULT;
566 /*
567 * Writing zeroes into userspace here is OK, because we know that if
568 * the zero gets there, we'll be overwriting it.
569 */
570 do {
571 if (unlikely(__put_user(0, uaddr) != 0))
572 return -EFAULT;
573 uaddr += PAGE_SIZE;
574 } while (uaddr <= end);
575
576 /* Check whether the range spilled into the next page. */
577 if (((unsigned long)uaddr & PAGE_MASK) ==
578 ((unsigned long)end & PAGE_MASK))
579 return __put_user(0, end);
580
581 return 0;
582 }
583
584 static inline int fault_in_pages_readable(const char __user *uaddr, int size)
585 {
586 volatile char c;
587 const char __user *end = uaddr + size - 1;
588
589 if (unlikely(size == 0))
590 return 0;
591
592 if (unlikely(uaddr > end))
593 return -EFAULT;
594
595 do {
596 if (unlikely(__get_user(c, uaddr) != 0))
597 return -EFAULT;
598 uaddr += PAGE_SIZE;
599 } while (uaddr <= end);
600
601 /* Check whether the range spilled into the next page. */
602 if (((unsigned long)uaddr & PAGE_MASK) ==
603 ((unsigned long)end & PAGE_MASK)) {
604 return __get_user(c, end);
605 }
606
607 (void)c;
608 return 0;
609 }
610
611 int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
612 pgoff_t index, gfp_t gfp_mask);
613 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
614 pgoff_t index, gfp_t gfp_mask);
615 extern void delete_from_page_cache(struct page *page);
616 extern void __delete_from_page_cache(struct page *page, void *shadow);
617 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
618
619 /*
620 * Like add_to_page_cache_locked, but used to add newly allocated pages:
621 * the page is new, so we can just run __SetPageLocked() against it.
622 */
623 static inline int add_to_page_cache(struct page *page,
624 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
625 {
626 int error;
627
628 __SetPageLocked(page);
629 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
630 if (unlikely(error))
631 __ClearPageLocked(page);
632 return error;
633 }
634
635 static inline unsigned long dir_pages(struct inode *inode)
636 {
637 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
638 PAGE_SHIFT;
639 }
640
641 #endif /* _LINUX_PAGEMAP_H */