]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - include/linux/pagemap.h
mm: remove the exporting of totalram_pages
[mirror_ubuntu-jammy-kernel.git] / include / linux / pagemap.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4
LT
2#ifndef _LINUX_PAGEMAP_H
3#define _LINUX_PAGEMAP_H
4
5/*
6 * Copyright 1995 Linus Torvalds
7 */
8#include <linux/mm.h>
9#include <linux/fs.h>
10#include <linux/list.h>
11#include <linux/highmem.h>
12#include <linux/compiler.h>
7c0f6ba6 13#include <linux/uaccess.h>
1da177e4 14#include <linux/gfp.h>
3e9f45bd 15#include <linux/bitops.h>
e286781d 16#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 17#include <linux/hugetlb_inline.h>
1da177e4 18
aa65c29c
JK
19struct pagevec;
20
1da177e4 21/*
9c5d760b 22 * Bits in mapping->flags.
1da177e4 23 */
9a896c9a 24enum mapping_flags {
9c5d760b
MH
25 AS_EIO = 0, /* IO error on async write */
26 AS_ENOSPC = 1, /* ENOSPC on async write */
27 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
28 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
29 AS_EXITING = 4, /* final truncate in progress */
371a096e 30 /* writeback related tags are not used */
9c5d760b 31 AS_NO_WRITEBACK_TAGS = 5,
9a896c9a 32};
1da177e4 33
8ed1e46a
JL
34/**
35 * mapping_set_error - record a writeback error in the address_space
36 * @mapping - the mapping in which an error should be set
37 * @error - the error to set in the mapping
38 *
39 * When writeback fails in some way, we must record that error so that
40 * userspace can be informed when fsync and the like are called. We endeavor
41 * to report errors on any file that was open at the time of the error. Some
42 * internal callers also need to know when writeback errors have occurred.
43 *
44 * When a writeback error occurs, most filesystems will want to call
45 * mapping_set_error to record the error in the mapping so that it can be
46 * reported when the application calls fsync(2).
47 */
3e9f45bd
GC
48static inline void mapping_set_error(struct address_space *mapping, int error)
49{
8ed1e46a
JL
50 if (likely(!error))
51 return;
52
53 /* Record in wb_err for checkers using errseq_t based tracking */
54 filemap_set_wb_err(mapping, error);
55
56 /* Record it in flags for now, for legacy callers */
57 if (error == -ENOSPC)
58 set_bit(AS_ENOSPC, &mapping->flags);
59 else
60 set_bit(AS_EIO, &mapping->flags);
3e9f45bd
GC
61}
62
ba9ddf49
LS
63static inline void mapping_set_unevictable(struct address_space *mapping)
64{
65 set_bit(AS_UNEVICTABLE, &mapping->flags);
66}
67
89e004ea
LS
68static inline void mapping_clear_unevictable(struct address_space *mapping)
69{
70 clear_bit(AS_UNEVICTABLE, &mapping->flags);
71}
72
ba9ddf49
LS
73static inline int mapping_unevictable(struct address_space *mapping)
74{
088e5465 75 if (mapping)
89e004ea
LS
76 return test_bit(AS_UNEVICTABLE, &mapping->flags);
77 return !!mapping;
ba9ddf49 78}
ba9ddf49 79
91b0abe3
JW
80static inline void mapping_set_exiting(struct address_space *mapping)
81{
82 set_bit(AS_EXITING, &mapping->flags);
83}
84
85static inline int mapping_exiting(struct address_space *mapping)
86{
87 return test_bit(AS_EXITING, &mapping->flags);
88}
89
371a096e
HY
90static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
91{
92 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
93}
94
95static inline int mapping_use_writeback_tags(struct address_space *mapping)
96{
97 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
98}
99
dd0fc66f 100static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 101{
9c5d760b 102 return mapping->gfp_mask;
1da177e4
LT
103}
104
c62d2555
MH
105/* Restricts the given gfp_mask to what the mapping allows. */
106static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
107 gfp_t gfp_mask)
108{
109 return mapping_gfp_mask(mapping) & gfp_mask;
110}
111
1da177e4
LT
112/*
113 * This is non-atomic. Only to be used before the mapping is activated.
114 * Probably needs a barrier...
115 */
260b2367 116static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 117{
9c5d760b 118 m->gfp_mask = mask;
1da177e4
LT
119}
120
c6f92f9f 121void release_pages(struct page **pages, int nr);
1da177e4 122
e286781d
NP
123/*
124 * speculatively take a reference to a page.
0139aa7b
JK
125 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
126 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
e286781d
NP
127 *
128 * This function must be called inside the same rcu_read_lock() section as has
129 * been used to lookup the page in the pagecache radix-tree (or page table):
0139aa7b 130 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
e286781d
NP
131 *
132 * Unless an RCU grace period has passed, the count of all pages coming out
133 * of the allocator must be considered unstable. page_count may return higher
134 * than expected, and put_page must be able to do the right thing when the
135 * page has been finished with, no matter what it is subsequently allocated
136 * for (because put_page is what is used here to drop an invalid speculative
137 * reference).
138 *
139 * This is the interesting part of the lockless pagecache (and lockless
140 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
141 * has the following pattern:
142 * 1. find page in radix tree
143 * 2. conditionally increment refcount
144 * 3. check the page is still in pagecache (if no, goto 1)
145 *
0139aa7b 146 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
b93b0163 147 * following (with the i_pages lock held):
e286781d
NP
148 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
149 * B. remove page from pagecache
150 * C. free the page
151 *
152 * There are 2 critical interleavings that matter:
153 * - 2 runs before A: in this case, A sees elevated refcount and bails out
154 * - A runs before 2: in this case, 2 sees zero refcount and retries;
155 * subsequently, B will complete and 1 will find no page, causing the
156 * lookup to return NULL.
157 *
158 * It is possible that between 1 and 2, the page is removed then the exact same
159 * page is inserted into the same position in pagecache. That's OK: the
b93b0163 160 * old find_get_page using a lock could equally have run before or after
e286781d
NP
161 * such a re-insertion, depending on order that locks are granted.
162 *
163 * Lookups racing against pagecache insertion isn't a big problem: either 1
164 * will find the page or it will not. Likewise, the old find_get_page could run
165 * either before the insertion or afterwards, depending on timing.
166 */
494eec70 167static inline int __page_cache_add_speculative(struct page *page, int count)
e286781d 168{
8375ad98 169#ifdef CONFIG_TINY_RCU
bdd4e85d 170# ifdef CONFIG_PREEMPT_COUNT
591a3d7c 171 VM_BUG_ON(!in_atomic() && !irqs_disabled());
e286781d
NP
172# endif
173 /*
174 * Preempt must be disabled here - we rely on rcu_read_lock doing
175 * this for us.
176 *
177 * Pagecache won't be truncated from interrupt context, so if we have
178 * found a page in the radix tree here, we have pinned its refcount by
179 * disabling preempt, and hence no need for the "speculative get" that
180 * SMP requires.
181 */
309381fe 182 VM_BUG_ON_PAGE(page_count(page) == 0, page);
494eec70 183 page_ref_add(page, count);
e286781d
NP
184
185#else
494eec70 186 if (unlikely(!page_ref_add_unless(page, count, 0))) {
e286781d
NP
187 /*
188 * Either the page has been freed, or will be freed.
189 * In either case, retry here and the caller should
190 * do the right thing (see comments above).
191 */
192 return 0;
193 }
194#endif
309381fe 195 VM_BUG_ON_PAGE(PageTail(page), page);
e286781d
NP
196
197 return 1;
198}
199
494eec70 200static inline int page_cache_get_speculative(struct page *page)
ce0ad7f0 201{
494eec70 202 return __page_cache_add_speculative(page, 1);
203}
ce0ad7f0 204
494eec70 205static inline int page_cache_add_speculative(struct page *page, int count)
206{
207 return __page_cache_add_speculative(page, count);
ce0ad7f0
NP
208}
209
44110fe3 210#ifdef CONFIG_NUMA
2ae88149 211extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 212#else
2ae88149
NP
213static inline struct page *__page_cache_alloc(gfp_t gfp)
214{
215 return alloc_pages(gfp, 0);
216}
217#endif
218
1da177e4
LT
219static inline struct page *page_cache_alloc(struct address_space *x)
220{
2ae88149 221 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
222}
223
8a5c743e 224static inline gfp_t readahead_gfp_mask(struct address_space *x)
7b1de586 225{
453f85d4 226 return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN;
7b1de586
WF
227}
228
1da177e4
LT
229typedef int filler_t(void *, struct page *);
230
0d3f9296 231pgoff_t page_cache_next_miss(struct address_space *mapping,
e7b563bb 232 pgoff_t index, unsigned long max_scan);
0d3f9296 233pgoff_t page_cache_prev_miss(struct address_space *mapping,
e7b563bb
JW
234 pgoff_t index, unsigned long max_scan);
235
2457aec6
MG
236#define FGP_ACCESSED 0x00000001
237#define FGP_LOCK 0x00000002
238#define FGP_CREAT 0x00000004
239#define FGP_WRITE 0x00000008
240#define FGP_NOFS 0x00000010
241#define FGP_NOWAIT 0x00000020
a75d4c33 242#define FGP_FOR_MMAP 0x00000040
2457aec6
MG
243
244struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 245 int fgp_flags, gfp_t cache_gfp_mask);
2457aec6
MG
246
247/**
248 * find_get_page - find and get a page reference
249 * @mapping: the address_space to search
250 * @offset: the page index
251 *
252 * Looks up the page cache slot at @mapping & @offset. If there is a
253 * page cache page, it is returned with an increased refcount.
254 *
255 * Otherwise, %NULL is returned.
256 */
257static inline struct page *find_get_page(struct address_space *mapping,
258 pgoff_t offset)
259{
45f87de5 260 return pagecache_get_page(mapping, offset, 0, 0);
2457aec6
MG
261}
262
263static inline struct page *find_get_page_flags(struct address_space *mapping,
264 pgoff_t offset, int fgp_flags)
265{
45f87de5 266 return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec6
MG
267}
268
269/**
270 * find_lock_page - locate, pin and lock a pagecache page
2457aec6
MG
271 * @mapping: the address_space to search
272 * @offset: the page index
273 *
274 * Looks up the page cache slot at @mapping & @offset. If there is a
275 * page cache page, it is returned locked and with an increased
276 * refcount.
277 *
278 * Otherwise, %NULL is returned.
279 *
280 * find_lock_page() may sleep.
281 */
282static inline struct page *find_lock_page(struct address_space *mapping,
283 pgoff_t offset)
284{
45f87de5 285 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
2457aec6
MG
286}
287
288/**
289 * find_or_create_page - locate or add a pagecache page
290 * @mapping: the page's address_space
291 * @index: the page's index into the mapping
292 * @gfp_mask: page allocation mode
293 *
294 * Looks up the page cache slot at @mapping & @offset. If there is a
295 * page cache page, it is returned locked and with an increased
296 * refcount.
297 *
298 * If the page is not present, a new page is allocated using @gfp_mask
299 * and added to the page cache and the VM's LRU list. The page is
300 * returned locked and with an increased refcount.
301 *
302 * On memory exhaustion, %NULL is returned.
303 *
304 * find_or_create_page() may sleep, even if @gfp_flags specifies an
305 * atomic allocation!
306 */
307static inline struct page *find_or_create_page(struct address_space *mapping,
308 pgoff_t offset, gfp_t gfp_mask)
309{
310 return pagecache_get_page(mapping, offset,
311 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de5 312 gfp_mask);
2457aec6
MG
313}
314
315/**
316 * grab_cache_page_nowait - returns locked page at given index in given cache
317 * @mapping: target address_space
318 * @index: the page index
319 *
320 * Same as grab_cache_page(), but do not wait if the page is unavailable.
321 * This is intended for speculative data generators, where the data can
322 * be regenerated if the page couldn't be grabbed. This routine should
323 * be safe to call while holding the lock for another page.
324 *
325 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
326 * and deadlock against the caller's locked page.
327 */
328static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
329 pgoff_t index)
330{
331 return pagecache_get_page(mapping, index,
332 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de5 333 mapping_gfp_mask(mapping));
2457aec6
MG
334}
335
0cd6144a 336struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a 337struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a
JW
338unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
339 unsigned int nr_entries, struct page **entries,
340 pgoff_t *indices);
b947cee4
JK
341unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start,
342 pgoff_t end, unsigned int nr_pages,
343 struct page **pages);
344static inline unsigned find_get_pages(struct address_space *mapping,
345 pgoff_t *start, unsigned int nr_pages,
346 struct page **pages)
347{
348 return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages,
349 pages);
350}
ebf43500
JA
351unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
352 unsigned int nr_pages, struct page **pages);
72b045ae 353unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index,
a6906972 354 pgoff_t end, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
355 struct page **pages);
356static inline unsigned find_get_pages_tag(struct address_space *mapping,
a6906972 357 pgoff_t *index, xa_mark_t tag, unsigned int nr_pages,
72b045ae
JK
358 struct page **pages)
359{
360 return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag,
361 nr_pages, pages);
362}
1da177e4 363
54566b2c
NP
364struct page *grab_cache_page_write_begin(struct address_space *mapping,
365 pgoff_t index, unsigned flags);
afddba49 366
1da177e4
LT
367/*
368 * Returns locked page at given index in given cache, creating it if needed.
369 */
57f6b96c
FW
370static inline struct page *grab_cache_page(struct address_space *mapping,
371 pgoff_t index)
1da177e4
LT
372{
373 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
374}
375
1da177e4 376extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7 377 pgoff_t index, filler_t *filler, void *data);
0531b2aa
LT
378extern struct page * read_cache_page_gfp(struct address_space *mapping,
379 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
380extern int read_cache_pages(struct address_space *mapping,
381 struct list_head *pages, filler_t *filler, void *data);
382
090d2b18 383static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 384 pgoff_t index, void *data)
090d2b18
PE
385{
386 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
387 return read_cache_page(mapping, index, filler, data);
388}
389
a0f7a756 390/*
5cbc198a
KS
391 * Get index of the page with in radix-tree
392 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
a0f7a756 393 */
5cbc198a 394static inline pgoff_t page_to_index(struct page *page)
a0f7a756 395{
e9b61f19
KS
396 pgoff_t pgoff;
397
e9b61f19 398 if (likely(!PageTransTail(page)))
09cbfeaf 399 return page->index;
e9b61f19
KS
400
401 /*
402 * We don't initialize ->index for tail pages: calculate based on
403 * head page
404 */
09cbfeaf 405 pgoff = compound_head(page)->index;
e9b61f19
KS
406 pgoff += page - compound_head(page);
407 return pgoff;
a0f7a756
NH
408}
409
5cbc198a
KS
410/*
411 * Get the offset in PAGE_SIZE.
412 * (TODO: hugepage should have ->index in PAGE_SIZE)
413 */
414static inline pgoff_t page_to_pgoff(struct page *page)
415{
416 if (unlikely(PageHeadHuge(page)))
417 return page->index << compound_order(page);
418
419 return page_to_index(page);
420}
421
1da177e4
LT
422/*
423 * Return byte-offset into filesystem object for page.
424 */
425static inline loff_t page_offset(struct page *page)
426{
09cbfeaf 427 return ((loff_t)page->index) << PAGE_SHIFT;
1da177e4
LT
428}
429
f981c595
MG
430static inline loff_t page_file_offset(struct page *page)
431{
8cd79788 432 return ((loff_t)page_index(page)) << PAGE_SHIFT;
f981c595
MG
433}
434
0fe6e20b
NH
435extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
436 unsigned long address);
437
1da177e4
LT
438static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
439 unsigned long address)
440{
0fe6e20b
NH
441 pgoff_t pgoff;
442 if (unlikely(is_vm_hugetlb_page(vma)))
443 return linear_hugepage_index(vma, address);
444 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4 445 pgoff += vma->vm_pgoff;
09cbfeaf 446 return pgoff;
1da177e4
LT
447}
448
b3c97528
HH
449extern void __lock_page(struct page *page);
450extern int __lock_page_killable(struct page *page);
d065bd81
ML
451extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
452 unsigned int flags);
b3c97528 453extern void unlock_page(struct page *page);
1da177e4 454
529ae9aa
NP
455static inline int trylock_page(struct page *page)
456{
48c935ad 457 page = compound_head(page);
8413ac9d 458 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
459}
460
db37648c
NP
461/*
462 * lock_page may only be called if we have the page's inode pinned.
463 */
1da177e4
LT
464static inline void lock_page(struct page *page)
465{
466 might_sleep();
529ae9aa 467 if (!trylock_page(page))
1da177e4
LT
468 __lock_page(page);
469}
db37648c 470
2687a356
MW
471/*
472 * lock_page_killable is like lock_page but can be interrupted by fatal
473 * signals. It returns 0 if it locked the page and -EINTR if it was
474 * killed while waiting.
475 */
476static inline int lock_page_killable(struct page *page)
477{
478 might_sleep();
529ae9aa 479 if (!trylock_page(page))
2687a356
MW
480 return __lock_page_killable(page);
481 return 0;
482}
483
d065bd81
ML
484/*
485 * lock_page_or_retry - Lock the page, unless this would block and the
486 * caller indicated that it can handle a retry.
9a95f3cf
PC
487 *
488 * Return value and mmap_sem implications depend on flags; see
489 * __lock_page_or_retry().
d065bd81
ML
490 */
491static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
492 unsigned int flags)
493{
494 might_sleep();
495 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
496}
497
1da177e4 498/*
74d81bfa
NP
499 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
500 * and should not be used directly.
1da177e4 501 */
b3c97528 502extern void wait_on_page_bit(struct page *page, int bit_nr);
f62e00cc 503extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
a4796e37 504
1da177e4
LT
505/*
506 * Wait for a page to be unlocked.
507 *
508 * This must be called with the caller "holding" the page,
509 * ie with increased "page->count" so that the page won't
510 * go away during the wait..
511 */
512static inline void wait_on_page_locked(struct page *page)
513{
514 if (PageLocked(page))
48c935ad 515 wait_on_page_bit(compound_head(page), PG_locked);
1da177e4
LT
516}
517
62906027
NP
518static inline int wait_on_page_locked_killable(struct page *page)
519{
520 if (!PageLocked(page))
521 return 0;
522 return wait_on_page_bit_killable(compound_head(page), PG_locked);
523}
524
9a1ea439
HD
525extern void put_and_wait_on_page_locked(struct page *page);
526
19343b5b 527void wait_on_page_writeback(struct page *page);
1da177e4 528extern void end_page_writeback(struct page *page);
1d1d1a76 529void wait_for_stable_page(struct page *page);
1da177e4 530
c11f0c0b 531void page_endio(struct page *page, bool is_write, int err);
57d99845 532
385e1ca5
DH
533/*
534 * Add an arbitrary waiter to a page's wait queue
535 */
ac6424b9 536extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
385e1ca5 537
1da177e4 538/*
4bce9f6e 539 * Fault everything in given userspace address range in.
1da177e4
LT
540 */
541static inline int fault_in_pages_writeable(char __user *uaddr, int size)
f56f821f 542{
9923777d 543 char __user *end = uaddr + size - 1;
f56f821f
DV
544
545 if (unlikely(size == 0))
e23d4159 546 return 0;
f56f821f 547
e23d4159
AV
548 if (unlikely(uaddr > end))
549 return -EFAULT;
f56f821f
DV
550 /*
551 * Writing zeroes into userspace here is OK, because we know that if
552 * the zero gets there, we'll be overwriting it.
553 */
e23d4159
AV
554 do {
555 if (unlikely(__put_user(0, uaddr) != 0))
556 return -EFAULT;
f56f821f 557 uaddr += PAGE_SIZE;
e23d4159 558 } while (uaddr <= end);
f56f821f
DV
559
560 /* Check whether the range spilled into the next page. */
561 if (((unsigned long)uaddr & PAGE_MASK) ==
562 ((unsigned long)end & PAGE_MASK))
e23d4159 563 return __put_user(0, end);
f56f821f 564
e23d4159 565 return 0;
f56f821f
DV
566}
567
4bce9f6e 568static inline int fault_in_pages_readable(const char __user *uaddr, int size)
f56f821f
DV
569{
570 volatile char c;
f56f821f
DV
571 const char __user *end = uaddr + size - 1;
572
573 if (unlikely(size == 0))
e23d4159 574 return 0;
f56f821f 575
e23d4159
AV
576 if (unlikely(uaddr > end))
577 return -EFAULT;
578
579 do {
580 if (unlikely(__get_user(c, uaddr) != 0))
581 return -EFAULT;
f56f821f 582 uaddr += PAGE_SIZE;
e23d4159 583 } while (uaddr <= end);
f56f821f
DV
584
585 /* Check whether the range spilled into the next page. */
586 if (((unsigned long)uaddr & PAGE_MASK) ==
587 ((unsigned long)end & PAGE_MASK)) {
e23d4159 588 return __get_user(c, end);
f56f821f
DV
589 }
590
90b75db6 591 (void)c;
e23d4159 592 return 0;
f56f821f
DV
593}
594
529ae9aa
NP
595int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
596 pgoff_t index, gfp_t gfp_mask);
597int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
598 pgoff_t index, gfp_t gfp_mask);
97cecb5a 599extern void delete_from_page_cache(struct page *page);
62cccb8c 600extern void __delete_from_page_cache(struct page *page, void *shadow);
ef6a3c63 601int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
aa65c29c
JK
602void delete_from_page_cache_batch(struct address_space *mapping,
603 struct pagevec *pvec);
529ae9aa
NP
604
605/*
606 * Like add_to_page_cache_locked, but used to add newly allocated pages:
48c935ad 607 * the page is new, so we can just run __SetPageLocked() against it.
529ae9aa
NP
608 */
609static inline int add_to_page_cache(struct page *page,
610 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
611{
612 int error;
613
48c935ad 614 __SetPageLocked(page);
529ae9aa
NP
615 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
616 if (unlikely(error))
48c935ad 617 __ClearPageLocked(page);
529ae9aa
NP
618 return error;
619}
620
b57c2cb9
FF
621static inline unsigned long dir_pages(struct inode *inode)
622{
09cbfeaf
KS
623 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
624 PAGE_SHIFT;
b57c2cb9
FF
625}
626
1da177e4 627#endif /* _LINUX_PAGEMAP_H */