]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - include/linux/pagemap.h
UBUNTU: SAUCE: LSM stacking: allow selecting multiple LSMs using kernel boot params
[mirror_ubuntu-artful-kernel.git] / include / linux / pagemap.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_PAGEMAP_H
2#define _LINUX_PAGEMAP_H
3
4/*
5 * Copyright 1995 Linus Torvalds
6 */
7#include <linux/mm.h>
8#include <linux/fs.h>
9#include <linux/list.h>
10#include <linux/highmem.h>
11#include <linux/compiler.h>
7c0f6ba6 12#include <linux/uaccess.h>
1da177e4 13#include <linux/gfp.h>
3e9f45bd 14#include <linux/bitops.h>
e286781d 15#include <linux/hardirq.h> /* for in_interrupt() */
8edf344c 16#include <linux/hugetlb_inline.h>
1da177e4
LT
17
18/*
9c5d760b 19 * Bits in mapping->flags.
1da177e4 20 */
9a896c9a 21enum mapping_flags {
9c5d760b
MH
22 AS_EIO = 0, /* IO error on async write */
23 AS_ENOSPC = 1, /* ENOSPC on async write */
24 AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */
25 AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */
26 AS_EXITING = 4, /* final truncate in progress */
371a096e 27 /* writeback related tags are not used */
9c5d760b 28 AS_NO_WRITEBACK_TAGS = 5,
9a896c9a 29};
1da177e4 30
8ed1e46a
JL
31/**
32 * mapping_set_error - record a writeback error in the address_space
33 * @mapping - the mapping in which an error should be set
34 * @error - the error to set in the mapping
35 *
36 * When writeback fails in some way, we must record that error so that
37 * userspace can be informed when fsync and the like are called. We endeavor
38 * to report errors on any file that was open at the time of the error. Some
39 * internal callers also need to know when writeback errors have occurred.
40 *
41 * When a writeback error occurs, most filesystems will want to call
42 * mapping_set_error to record the error in the mapping so that it can be
43 * reported when the application calls fsync(2).
44 */
3e9f45bd
GC
45static inline void mapping_set_error(struct address_space *mapping, int error)
46{
8ed1e46a
JL
47 if (likely(!error))
48 return;
49
50 /* Record in wb_err for checkers using errseq_t based tracking */
51 filemap_set_wb_err(mapping, error);
52
53 /* Record it in flags for now, for legacy callers */
54 if (error == -ENOSPC)
55 set_bit(AS_ENOSPC, &mapping->flags);
56 else
57 set_bit(AS_EIO, &mapping->flags);
3e9f45bd
GC
58}
59
ba9ddf49
LS
60static inline void mapping_set_unevictable(struct address_space *mapping)
61{
62 set_bit(AS_UNEVICTABLE, &mapping->flags);
63}
64
89e004ea
LS
65static inline void mapping_clear_unevictable(struct address_space *mapping)
66{
67 clear_bit(AS_UNEVICTABLE, &mapping->flags);
68}
69
ba9ddf49
LS
70static inline int mapping_unevictable(struct address_space *mapping)
71{
088e5465 72 if (mapping)
89e004ea
LS
73 return test_bit(AS_UNEVICTABLE, &mapping->flags);
74 return !!mapping;
ba9ddf49 75}
ba9ddf49 76
91b0abe3
JW
77static inline void mapping_set_exiting(struct address_space *mapping)
78{
79 set_bit(AS_EXITING, &mapping->flags);
80}
81
82static inline int mapping_exiting(struct address_space *mapping)
83{
84 return test_bit(AS_EXITING, &mapping->flags);
85}
86
371a096e
HY
87static inline void mapping_set_no_writeback_tags(struct address_space *mapping)
88{
89 set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
90}
91
92static inline int mapping_use_writeback_tags(struct address_space *mapping)
93{
94 return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
95}
96
dd0fc66f 97static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
1da177e4 98{
9c5d760b 99 return mapping->gfp_mask;
1da177e4
LT
100}
101
c62d2555
MH
102/* Restricts the given gfp_mask to what the mapping allows. */
103static inline gfp_t mapping_gfp_constraint(struct address_space *mapping,
104 gfp_t gfp_mask)
105{
106 return mapping_gfp_mask(mapping) & gfp_mask;
107}
108
1da177e4
LT
109/*
110 * This is non-atomic. Only to be used before the mapping is activated.
111 * Probably needs a barrier...
112 */
260b2367 113static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
1da177e4 114{
9c5d760b 115 m->gfp_mask = mask;
1da177e4
LT
116}
117
b745bc85 118void release_pages(struct page **pages, int nr, bool cold);
1da177e4 119
e286781d
NP
120/*
121 * speculatively take a reference to a page.
0139aa7b
JK
122 * If the page is free (_refcount == 0), then _refcount is untouched, and 0
123 * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned.
e286781d
NP
124 *
125 * This function must be called inside the same rcu_read_lock() section as has
126 * been used to lookup the page in the pagecache radix-tree (or page table):
0139aa7b 127 * this allows allocators to use a synchronize_rcu() to stabilize _refcount.
e286781d
NP
128 *
129 * Unless an RCU grace period has passed, the count of all pages coming out
130 * of the allocator must be considered unstable. page_count may return higher
131 * than expected, and put_page must be able to do the right thing when the
132 * page has been finished with, no matter what it is subsequently allocated
133 * for (because put_page is what is used here to drop an invalid speculative
134 * reference).
135 *
136 * This is the interesting part of the lockless pagecache (and lockless
137 * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
138 * has the following pattern:
139 * 1. find page in radix tree
140 * 2. conditionally increment refcount
141 * 3. check the page is still in pagecache (if no, goto 1)
142 *
0139aa7b 143 * Remove-side that cares about stability of _refcount (eg. reclaim) has the
e286781d
NP
144 * following (with tree_lock held for write):
145 * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
146 * B. remove page from pagecache
147 * C. free the page
148 *
149 * There are 2 critical interleavings that matter:
150 * - 2 runs before A: in this case, A sees elevated refcount and bails out
151 * - A runs before 2: in this case, 2 sees zero refcount and retries;
152 * subsequently, B will complete and 1 will find no page, causing the
153 * lookup to return NULL.
154 *
155 * It is possible that between 1 and 2, the page is removed then the exact same
156 * page is inserted into the same position in pagecache. That's OK: the
157 * old find_get_page using tree_lock could equally have run before or after
158 * such a re-insertion, depending on order that locks are granted.
159 *
160 * Lookups racing against pagecache insertion isn't a big problem: either 1
161 * will find the page or it will not. Likewise, the old find_get_page could run
162 * either before the insertion or afterwards, depending on timing.
163 */
164static inline int page_cache_get_speculative(struct page *page)
165{
8375ad98 166#ifdef CONFIG_TINY_RCU
bdd4e85d 167# ifdef CONFIG_PREEMPT_COUNT
591a3d7c 168 VM_BUG_ON(!in_atomic() && !irqs_disabled());
e286781d
NP
169# endif
170 /*
171 * Preempt must be disabled here - we rely on rcu_read_lock doing
172 * this for us.
173 *
174 * Pagecache won't be truncated from interrupt context, so if we have
175 * found a page in the radix tree here, we have pinned its refcount by
176 * disabling preempt, and hence no need for the "speculative get" that
177 * SMP requires.
178 */
309381fe 179 VM_BUG_ON_PAGE(page_count(page) == 0, page);
fe896d18 180 page_ref_inc(page);
e286781d
NP
181
182#else
183 if (unlikely(!get_page_unless_zero(page))) {
184 /*
185 * Either the page has been freed, or will be freed.
186 * In either case, retry here and the caller should
187 * do the right thing (see comments above).
188 */
189 return 0;
190 }
191#endif
309381fe 192 VM_BUG_ON_PAGE(PageTail(page), page);
e286781d
NP
193
194 return 1;
195}
196
ce0ad7f0
NP
197/*
198 * Same as above, but add instead of inc (could just be merged)
199 */
200static inline int page_cache_add_speculative(struct page *page, int count)
201{
202 VM_BUG_ON(in_interrupt());
203
b560d8ad 204#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
bdd4e85d 205# ifdef CONFIG_PREEMPT_COUNT
591a3d7c 206 VM_BUG_ON(!in_atomic() && !irqs_disabled());
ce0ad7f0 207# endif
309381fe 208 VM_BUG_ON_PAGE(page_count(page) == 0, page);
fe896d18 209 page_ref_add(page, count);
ce0ad7f0
NP
210
211#else
fe896d18 212 if (unlikely(!page_ref_add_unless(page, count, 0)))
ce0ad7f0
NP
213 return 0;
214#endif
309381fe 215 VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page);
ce0ad7f0
NP
216
217 return 1;
218}
219
44110fe3 220#ifdef CONFIG_NUMA
2ae88149 221extern struct page *__page_cache_alloc(gfp_t gfp);
44110fe3 222#else
2ae88149
NP
223static inline struct page *__page_cache_alloc(gfp_t gfp)
224{
225 return alloc_pages(gfp, 0);
226}
227#endif
228
1da177e4
LT
229static inline struct page *page_cache_alloc(struct address_space *x)
230{
2ae88149 231 return __page_cache_alloc(mapping_gfp_mask(x));
1da177e4
LT
232}
233
234static inline struct page *page_cache_alloc_cold(struct address_space *x)
235{
2ae88149 236 return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
1da177e4
LT
237}
238
8a5c743e 239static inline gfp_t readahead_gfp_mask(struct address_space *x)
7b1de586 240{
8a5c743e
MH
241 return mapping_gfp_mask(x) |
242 __GFP_COLD | __GFP_NORETRY | __GFP_NOWARN;
7b1de586
WF
243}
244
1da177e4
LT
245typedef int filler_t(void *, struct page *);
246
e7b563bb
JW
247pgoff_t page_cache_next_hole(struct address_space *mapping,
248 pgoff_t index, unsigned long max_scan);
249pgoff_t page_cache_prev_hole(struct address_space *mapping,
250 pgoff_t index, unsigned long max_scan);
251
2457aec6
MG
252#define FGP_ACCESSED 0x00000001
253#define FGP_LOCK 0x00000002
254#define FGP_CREAT 0x00000004
255#define FGP_WRITE 0x00000008
256#define FGP_NOFS 0x00000010
257#define FGP_NOWAIT 0x00000020
258
259struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
45f87de5 260 int fgp_flags, gfp_t cache_gfp_mask);
2457aec6
MG
261
262/**
263 * find_get_page - find and get a page reference
264 * @mapping: the address_space to search
265 * @offset: the page index
266 *
267 * Looks up the page cache slot at @mapping & @offset. If there is a
268 * page cache page, it is returned with an increased refcount.
269 *
270 * Otherwise, %NULL is returned.
271 */
272static inline struct page *find_get_page(struct address_space *mapping,
273 pgoff_t offset)
274{
45f87de5 275 return pagecache_get_page(mapping, offset, 0, 0);
2457aec6
MG
276}
277
278static inline struct page *find_get_page_flags(struct address_space *mapping,
279 pgoff_t offset, int fgp_flags)
280{
45f87de5 281 return pagecache_get_page(mapping, offset, fgp_flags, 0);
2457aec6
MG
282}
283
284/**
285 * find_lock_page - locate, pin and lock a pagecache page
2457aec6
MG
286 * @mapping: the address_space to search
287 * @offset: the page index
288 *
289 * Looks up the page cache slot at @mapping & @offset. If there is a
290 * page cache page, it is returned locked and with an increased
291 * refcount.
292 *
293 * Otherwise, %NULL is returned.
294 *
295 * find_lock_page() may sleep.
296 */
297static inline struct page *find_lock_page(struct address_space *mapping,
298 pgoff_t offset)
299{
45f87de5 300 return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
2457aec6
MG
301}
302
303/**
304 * find_or_create_page - locate or add a pagecache page
305 * @mapping: the page's address_space
306 * @index: the page's index into the mapping
307 * @gfp_mask: page allocation mode
308 *
309 * Looks up the page cache slot at @mapping & @offset. If there is a
310 * page cache page, it is returned locked and with an increased
311 * refcount.
312 *
313 * If the page is not present, a new page is allocated using @gfp_mask
314 * and added to the page cache and the VM's LRU list. The page is
315 * returned locked and with an increased refcount.
316 *
317 * On memory exhaustion, %NULL is returned.
318 *
319 * find_or_create_page() may sleep, even if @gfp_flags specifies an
320 * atomic allocation!
321 */
322static inline struct page *find_or_create_page(struct address_space *mapping,
323 pgoff_t offset, gfp_t gfp_mask)
324{
325 return pagecache_get_page(mapping, offset,
326 FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
45f87de5 327 gfp_mask);
2457aec6
MG
328}
329
330/**
331 * grab_cache_page_nowait - returns locked page at given index in given cache
332 * @mapping: target address_space
333 * @index: the page index
334 *
335 * Same as grab_cache_page(), but do not wait if the page is unavailable.
336 * This is intended for speculative data generators, where the data can
337 * be regenerated if the page couldn't be grabbed. This routine should
338 * be safe to call while holding the lock for another page.
339 *
340 * Clear __GFP_FS when allocating the page to avoid recursion into the fs
341 * and deadlock against the caller's locked page.
342 */
343static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
344 pgoff_t index)
345{
346 return pagecache_get_page(mapping, index,
347 FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
45f87de5 348 mapping_gfp_mask(mapping));
2457aec6
MG
349}
350
0cd6144a 351struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a 352struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset);
0cd6144a
JW
353unsigned find_get_entries(struct address_space *mapping, pgoff_t start,
354 unsigned int nr_entries, struct page **entries,
355 pgoff_t *indices);
1da177e4
LT
356unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
357 unsigned int nr_pages, struct page **pages);
ebf43500
JA
358unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
359 unsigned int nr_pages, struct page **pages);
1da177e4
LT
360unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
361 int tag, unsigned int nr_pages, struct page **pages);
7e7f7749
RZ
362unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
363 int tag, unsigned int nr_entries,
364 struct page **entries, pgoff_t *indices);
1da177e4 365
54566b2c
NP
366struct page *grab_cache_page_write_begin(struct address_space *mapping,
367 pgoff_t index, unsigned flags);
afddba49 368
1da177e4
LT
369/*
370 * Returns locked page at given index in given cache, creating it if needed.
371 */
57f6b96c
FW
372static inline struct page *grab_cache_page(struct address_space *mapping,
373 pgoff_t index)
1da177e4
LT
374{
375 return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
376}
377
1da177e4 378extern struct page * read_cache_page(struct address_space *mapping,
5e5358e7 379 pgoff_t index, filler_t *filler, void *data);
0531b2aa
LT
380extern struct page * read_cache_page_gfp(struct address_space *mapping,
381 pgoff_t index, gfp_t gfp_mask);
1da177e4
LT
382extern int read_cache_pages(struct address_space *mapping,
383 struct list_head *pages, filler_t *filler, void *data);
384
090d2b18 385static inline struct page *read_mapping_page(struct address_space *mapping,
5e5358e7 386 pgoff_t index, void *data)
090d2b18
PE
387{
388 filler_t *filler = (filler_t *)mapping->a_ops->readpage;
389 return read_cache_page(mapping, index, filler, data);
390}
391
a0f7a756 392/*
5cbc198a
KS
393 * Get index of the page with in radix-tree
394 * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
a0f7a756 395 */
5cbc198a 396static inline pgoff_t page_to_index(struct page *page)
a0f7a756 397{
e9b61f19
KS
398 pgoff_t pgoff;
399
e9b61f19 400 if (likely(!PageTransTail(page)))
09cbfeaf 401 return page->index;
e9b61f19
KS
402
403 /*
404 * We don't initialize ->index for tail pages: calculate based on
405 * head page
406 */
09cbfeaf 407 pgoff = compound_head(page)->index;
e9b61f19
KS
408 pgoff += page - compound_head(page);
409 return pgoff;
a0f7a756
NH
410}
411
5cbc198a
KS
412/*
413 * Get the offset in PAGE_SIZE.
414 * (TODO: hugepage should have ->index in PAGE_SIZE)
415 */
416static inline pgoff_t page_to_pgoff(struct page *page)
417{
418 if (unlikely(PageHeadHuge(page)))
419 return page->index << compound_order(page);
420
421 return page_to_index(page);
422}
423
1da177e4
LT
424/*
425 * Return byte-offset into filesystem object for page.
426 */
427static inline loff_t page_offset(struct page *page)
428{
09cbfeaf 429 return ((loff_t)page->index) << PAGE_SHIFT;
1da177e4
LT
430}
431
f981c595
MG
432static inline loff_t page_file_offset(struct page *page)
433{
8cd79788 434 return ((loff_t)page_index(page)) << PAGE_SHIFT;
f981c595
MG
435}
436
0fe6e20b
NH
437extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
438 unsigned long address);
439
1da177e4
LT
440static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
441 unsigned long address)
442{
0fe6e20b
NH
443 pgoff_t pgoff;
444 if (unlikely(is_vm_hugetlb_page(vma)))
445 return linear_hugepage_index(vma, address);
446 pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
1da177e4 447 pgoff += vma->vm_pgoff;
09cbfeaf 448 return pgoff;
1da177e4
LT
449}
450
b3c97528
HH
451extern void __lock_page(struct page *page);
452extern int __lock_page_killable(struct page *page);
d065bd81
ML
453extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
454 unsigned int flags);
b3c97528 455extern void unlock_page(struct page *page);
1da177e4 456
529ae9aa
NP
457static inline int trylock_page(struct page *page)
458{
48c935ad 459 page = compound_head(page);
8413ac9d 460 return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
529ae9aa
NP
461}
462
db37648c
NP
463/*
464 * lock_page may only be called if we have the page's inode pinned.
465 */
1da177e4
LT
466static inline void lock_page(struct page *page)
467{
468 might_sleep();
529ae9aa 469 if (!trylock_page(page))
1da177e4
LT
470 __lock_page(page);
471}
db37648c 472
2687a356
MW
473/*
474 * lock_page_killable is like lock_page but can be interrupted by fatal
475 * signals. It returns 0 if it locked the page and -EINTR if it was
476 * killed while waiting.
477 */
478static inline int lock_page_killable(struct page *page)
479{
480 might_sleep();
529ae9aa 481 if (!trylock_page(page))
2687a356
MW
482 return __lock_page_killable(page);
483 return 0;
484}
485
d065bd81
ML
486/*
487 * lock_page_or_retry - Lock the page, unless this would block and the
488 * caller indicated that it can handle a retry.
9a95f3cf
PC
489 *
490 * Return value and mmap_sem implications depend on flags; see
491 * __lock_page_or_retry().
d065bd81
ML
492 */
493static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm,
494 unsigned int flags)
495{
496 might_sleep();
497 return trylock_page(page) || __lock_page_or_retry(page, mm, flags);
498}
499
1da177e4 500/*
74d81bfa
NP
501 * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc.,
502 * and should not be used directly.
1da177e4 503 */
b3c97528 504extern void wait_on_page_bit(struct page *page, int bit_nr);
f62e00cc 505extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
a4796e37 506
1da177e4
LT
507/*
508 * Wait for a page to be unlocked.
509 *
510 * This must be called with the caller "holding" the page,
511 * ie with increased "page->count" so that the page won't
512 * go away during the wait..
513 */
514static inline void wait_on_page_locked(struct page *page)
515{
516 if (PageLocked(page))
48c935ad 517 wait_on_page_bit(compound_head(page), PG_locked);
1da177e4
LT
518}
519
62906027
NP
520static inline int wait_on_page_locked_killable(struct page *page)
521{
522 if (!PageLocked(page))
523 return 0;
524 return wait_on_page_bit_killable(compound_head(page), PG_locked);
525}
526
1da177e4
LT
527/*
528 * Wait for a page to complete writeback
529 */
530static inline void wait_on_page_writeback(struct page *page)
531{
532 if (PageWriteback(page))
533 wait_on_page_bit(page, PG_writeback);
534}
535
536extern void end_page_writeback(struct page *page);
1d1d1a76 537void wait_for_stable_page(struct page *page);
1da177e4 538
c11f0c0b 539void page_endio(struct page *page, bool is_write, int err);
57d99845 540
385e1ca5
DH
541/*
542 * Add an arbitrary waiter to a page's wait queue
543 */
ac6424b9 544extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter);
385e1ca5 545
1da177e4 546/*
4bce9f6e 547 * Fault everything in given userspace address range in.
1da177e4
LT
548 */
549static inline int fault_in_pages_writeable(char __user *uaddr, int size)
f56f821f 550{
9923777d 551 char __user *end = uaddr + size - 1;
f56f821f
DV
552
553 if (unlikely(size == 0))
e23d4159 554 return 0;
f56f821f 555
e23d4159
AV
556 if (unlikely(uaddr > end))
557 return -EFAULT;
f56f821f
DV
558 /*
559 * Writing zeroes into userspace here is OK, because we know that if
560 * the zero gets there, we'll be overwriting it.
561 */
e23d4159
AV
562 do {
563 if (unlikely(__put_user(0, uaddr) != 0))
564 return -EFAULT;
f56f821f 565 uaddr += PAGE_SIZE;
e23d4159 566 } while (uaddr <= end);
f56f821f
DV
567
568 /* Check whether the range spilled into the next page. */
569 if (((unsigned long)uaddr & PAGE_MASK) ==
570 ((unsigned long)end & PAGE_MASK))
e23d4159 571 return __put_user(0, end);
f56f821f 572
e23d4159 573 return 0;
f56f821f
DV
574}
575
4bce9f6e 576static inline int fault_in_pages_readable(const char __user *uaddr, int size)
f56f821f
DV
577{
578 volatile char c;
f56f821f
DV
579 const char __user *end = uaddr + size - 1;
580
581 if (unlikely(size == 0))
e23d4159 582 return 0;
f56f821f 583
e23d4159
AV
584 if (unlikely(uaddr > end))
585 return -EFAULT;
586
587 do {
588 if (unlikely(__get_user(c, uaddr) != 0))
589 return -EFAULT;
f56f821f 590 uaddr += PAGE_SIZE;
e23d4159 591 } while (uaddr <= end);
f56f821f
DV
592
593 /* Check whether the range spilled into the next page. */
594 if (((unsigned long)uaddr & PAGE_MASK) ==
595 ((unsigned long)end & PAGE_MASK)) {
e23d4159 596 return __get_user(c, end);
f56f821f
DV
597 }
598
90b75db6 599 (void)c;
e23d4159 600 return 0;
f56f821f
DV
601}
602
529ae9aa
NP
603int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
604 pgoff_t index, gfp_t gfp_mask);
605int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
606 pgoff_t index, gfp_t gfp_mask);
97cecb5a 607extern void delete_from_page_cache(struct page *page);
62cccb8c 608extern void __delete_from_page_cache(struct page *page, void *shadow);
ef6a3c63 609int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
529ae9aa
NP
610
611/*
612 * Like add_to_page_cache_locked, but used to add newly allocated pages:
48c935ad 613 * the page is new, so we can just run __SetPageLocked() against it.
529ae9aa
NP
614 */
615static inline int add_to_page_cache(struct page *page,
616 struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
617{
618 int error;
619
48c935ad 620 __SetPageLocked(page);
529ae9aa
NP
621 error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
622 if (unlikely(error))
48c935ad 623 __ClearPageLocked(page);
529ae9aa
NP
624 return error;
625}
626
b57c2cb9
FF
627static inline unsigned long dir_pages(struct inode *inode)
628{
09cbfeaf
KS
629 return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >>
630 PAGE_SHIFT;
b57c2cb9
FF
631}
632
1da177e4 633#endif /* _LINUX_PAGEMAP_H */