]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
1da177e4 | 21 | /* |
9c5d760b | 22 | * Bits in mapping->flags. |
1da177e4 | 23 | */ |
9a896c9a | 24 | enum mapping_flags { |
9c5d760b MH |
25 | AS_EIO = 0, /* IO error on async write */ |
26 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
27 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
28 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
29 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 30 | /* writeback related tags are not used */ |
9c5d760b | 31 | AS_NO_WRITEBACK_TAGS = 5, |
9a896c9a | 32 | }; |
1da177e4 | 33 | |
8ed1e46a JL |
34 | /** |
35 | * mapping_set_error - record a writeback error in the address_space | |
767e5ee5 MWO |
36 | * @mapping: the mapping in which an error should be set |
37 | * @error: the error to set in the mapping | |
8ed1e46a JL |
38 | * |
39 | * When writeback fails in some way, we must record that error so that | |
40 | * userspace can be informed when fsync and the like are called. We endeavor | |
41 | * to report errors on any file that was open at the time of the error. Some | |
42 | * internal callers also need to know when writeback errors have occurred. | |
43 | * | |
44 | * When a writeback error occurs, most filesystems will want to call | |
45 | * mapping_set_error to record the error in the mapping so that it can be | |
46 | * reported when the application calls fsync(2). | |
47 | */ | |
3e9f45bd GC |
48 | static inline void mapping_set_error(struct address_space *mapping, int error) |
49 | { | |
8ed1e46a JL |
50 | if (likely(!error)) |
51 | return; | |
52 | ||
53 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
735e4ae5 JL |
54 | __filemap_set_wb_err(mapping, error); |
55 | ||
56 | /* Record it in superblock */ | |
57 | errseq_set(&mapping->host->i_sb->s_wb_err, error); | |
8ed1e46a JL |
58 | |
59 | /* Record it in flags for now, for legacy callers */ | |
60 | if (error == -ENOSPC) | |
61 | set_bit(AS_ENOSPC, &mapping->flags); | |
62 | else | |
63 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
64 | } |
65 | ||
ba9ddf49 LS |
66 | static inline void mapping_set_unevictable(struct address_space *mapping) |
67 | { | |
68 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
69 | } | |
70 | ||
89e004ea LS |
71 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
72 | { | |
73 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
74 | } | |
75 | ||
1eb6234e | 76 | static inline bool mapping_unevictable(struct address_space *mapping) |
ba9ddf49 | 77 | { |
1eb6234e | 78 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
ba9ddf49 | 79 | } |
ba9ddf49 | 80 | |
91b0abe3 JW |
81 | static inline void mapping_set_exiting(struct address_space *mapping) |
82 | { | |
83 | set_bit(AS_EXITING, &mapping->flags); | |
84 | } | |
85 | ||
86 | static inline int mapping_exiting(struct address_space *mapping) | |
87 | { | |
88 | return test_bit(AS_EXITING, &mapping->flags); | |
89 | } | |
90 | ||
371a096e HY |
91 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
92 | { | |
93 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
94 | } | |
95 | ||
96 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
97 | { | |
98 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
99 | } | |
100 | ||
dd0fc66f | 101 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 102 | { |
9c5d760b | 103 | return mapping->gfp_mask; |
1da177e4 LT |
104 | } |
105 | ||
c62d2555 MH |
106 | /* Restricts the given gfp_mask to what the mapping allows. */ |
107 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
108 | gfp_t gfp_mask) | |
109 | { | |
110 | return mapping_gfp_mask(mapping) & gfp_mask; | |
111 | } | |
112 | ||
1da177e4 LT |
113 | /* |
114 | * This is non-atomic. Only to be used before the mapping is activated. | |
115 | * Probably needs a barrier... | |
116 | */ | |
260b2367 | 117 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 118 | { |
9c5d760b | 119 | m->gfp_mask = mask; |
1da177e4 LT |
120 | } |
121 | ||
c6f92f9f | 122 | void release_pages(struct page **pages, int nr); |
1da177e4 | 123 | |
e286781d NP |
124 | /* |
125 | * speculatively take a reference to a page. | |
0139aa7b JK |
126 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
127 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
128 | * |
129 | * This function must be called inside the same rcu_read_lock() section as has | |
130 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 131 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
132 | * |
133 | * Unless an RCU grace period has passed, the count of all pages coming out | |
134 | * of the allocator must be considered unstable. page_count may return higher | |
135 | * than expected, and put_page must be able to do the right thing when the | |
136 | * page has been finished with, no matter what it is subsequently allocated | |
137 | * for (because put_page is what is used here to drop an invalid speculative | |
138 | * reference). | |
139 | * | |
140 | * This is the interesting part of the lockless pagecache (and lockless | |
141 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
142 | * has the following pattern: | |
143 | * 1. find page in radix tree | |
144 | * 2. conditionally increment refcount | |
145 | * 3. check the page is still in pagecache (if no, goto 1) | |
146 | * | |
0139aa7b | 147 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
b93b0163 | 148 | * following (with the i_pages lock held): |
e286781d NP |
149 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
150 | * B. remove page from pagecache | |
151 | * C. free the page | |
152 | * | |
153 | * There are 2 critical interleavings that matter: | |
154 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
155 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
156 | * subsequently, B will complete and 1 will find no page, causing the | |
157 | * lookup to return NULL. | |
158 | * | |
159 | * It is possible that between 1 and 2, the page is removed then the exact same | |
160 | * page is inserted into the same position in pagecache. That's OK: the | |
b93b0163 | 161 | * old find_get_page using a lock could equally have run before or after |
e286781d NP |
162 | * such a re-insertion, depending on order that locks are granted. |
163 | * | |
164 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
165 | * will find the page or it will not. Likewise, the old find_get_page could run | |
166 | * either before the insertion or afterwards, depending on timing. | |
167 | */ | |
494eec70 | 168 | static inline int __page_cache_add_speculative(struct page *page, int count) |
e286781d | 169 | { |
8375ad98 | 170 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 171 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 172 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
173 | # endif |
174 | /* | |
175 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
176 | * this for us. | |
177 | * | |
178 | * Pagecache won't be truncated from interrupt context, so if we have | |
179 | * found a page in the radix tree here, we have pinned its refcount by | |
180 | * disabling preempt, and hence no need for the "speculative get" that | |
181 | * SMP requires. | |
182 | */ | |
309381fe | 183 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
494eec70 | 184 | page_ref_add(page, count); |
e286781d NP |
185 | |
186 | #else | |
494eec70 | 187 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
e286781d NP |
188 | /* |
189 | * Either the page has been freed, or will be freed. | |
190 | * In either case, retry here and the caller should | |
191 | * do the right thing (see comments above). | |
192 | */ | |
193 | return 0; | |
194 | } | |
195 | #endif | |
309381fe | 196 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
197 | |
198 | return 1; | |
199 | } | |
200 | ||
494eec70 | 201 | static inline int page_cache_get_speculative(struct page *page) |
ce0ad7f0 | 202 | { |
494eec70 | 203 | return __page_cache_add_speculative(page, 1); |
204 | } | |
ce0ad7f0 | 205 | |
494eec70 | 206 | static inline int page_cache_add_speculative(struct page *page, int count) |
207 | { | |
208 | return __page_cache_add_speculative(page, count); | |
ce0ad7f0 NP |
209 | } |
210 | ||
b03143ac GJ |
211 | /** |
212 | * attach_page_private - Attach private data to a page. | |
213 | * @page: Page to attach data to. | |
214 | * @data: Data to attach to page. | |
215 | * | |
216 | * Attaching private data to a page increments the page's reference count. | |
217 | * The data must be detached before the page will be freed. | |
218 | */ | |
219 | static inline void attach_page_private(struct page *page, void *data) | |
220 | { | |
221 | get_page(page); | |
222 | set_page_private(page, (unsigned long)data); | |
223 | SetPagePrivate(page); | |
224 | } | |
225 | ||
226 | /** | |
227 | * detach_page_private - Detach private data from a page. | |
228 | * @page: Page to detach data from. | |
229 | * | |
230 | * Removes the data that was previously attached to the page and decrements | |
231 | * the refcount on the page. | |
232 | * | |
233 | * Return: Data that was attached to the page. | |
234 | */ | |
235 | static inline void *detach_page_private(struct page *page) | |
236 | { | |
237 | void *data = (void *)page_private(page); | |
238 | ||
239 | if (!PagePrivate(page)) | |
240 | return NULL; | |
241 | ClearPagePrivate(page); | |
242 | set_page_private(page, 0); | |
243 | put_page(page); | |
244 | ||
245 | return data; | |
246 | } | |
247 | ||
44110fe3 | 248 | #ifdef CONFIG_NUMA |
2ae88149 | 249 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 250 | #else |
2ae88149 NP |
251 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
252 | { | |
253 | return alloc_pages(gfp, 0); | |
254 | } | |
255 | #endif | |
256 | ||
1da177e4 LT |
257 | static inline struct page *page_cache_alloc(struct address_space *x) |
258 | { | |
2ae88149 | 259 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
260 | } |
261 | ||
8a5c743e | 262 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 263 | { |
453f85d4 | 264 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
265 | } |
266 | ||
1da177e4 LT |
267 | typedef int filler_t(void *, struct page *); |
268 | ||
0d3f9296 | 269 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
e7b563bb | 270 | pgoff_t index, unsigned long max_scan); |
0d3f9296 | 271 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
e7b563bb JW |
272 | pgoff_t index, unsigned long max_scan); |
273 | ||
2457aec6 MG |
274 | #define FGP_ACCESSED 0x00000001 |
275 | #define FGP_LOCK 0x00000002 | |
276 | #define FGP_CREAT 0x00000004 | |
277 | #define FGP_WRITE 0x00000008 | |
278 | #define FGP_NOFS 0x00000010 | |
279 | #define FGP_NOWAIT 0x00000020 | |
a75d4c33 | 280 | #define FGP_FOR_MMAP 0x00000040 |
2457aec6 MG |
281 | |
282 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 283 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
284 | |
285 | /** | |
286 | * find_get_page - find and get a page reference | |
287 | * @mapping: the address_space to search | |
288 | * @offset: the page index | |
289 | * | |
290 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
291 | * page cache page, it is returned with an increased refcount. | |
292 | * | |
293 | * Otherwise, %NULL is returned. | |
294 | */ | |
295 | static inline struct page *find_get_page(struct address_space *mapping, | |
296 | pgoff_t offset) | |
297 | { | |
45f87de5 | 298 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
299 | } |
300 | ||
301 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
302 | pgoff_t offset, int fgp_flags) | |
303 | { | |
45f87de5 | 304 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
305 | } |
306 | ||
307 | /** | |
308 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 MG |
309 | * @mapping: the address_space to search |
310 | * @offset: the page index | |
311 | * | |
312 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
313 | * page cache page, it is returned locked and with an increased | |
314 | * refcount. | |
315 | * | |
316 | * Otherwise, %NULL is returned. | |
317 | * | |
318 | * find_lock_page() may sleep. | |
319 | */ | |
320 | static inline struct page *find_lock_page(struct address_space *mapping, | |
321 | pgoff_t offset) | |
322 | { | |
45f87de5 | 323 | return pagecache_get_page(mapping, offset, FGP_LOCK, 0); |
2457aec6 MG |
324 | } |
325 | ||
326 | /** | |
327 | * find_or_create_page - locate or add a pagecache page | |
328 | * @mapping: the page's address_space | |
329 | * @index: the page's index into the mapping | |
330 | * @gfp_mask: page allocation mode | |
331 | * | |
332 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
333 | * page cache page, it is returned locked and with an increased | |
334 | * refcount. | |
335 | * | |
336 | * If the page is not present, a new page is allocated using @gfp_mask | |
337 | * and added to the page cache and the VM's LRU list. The page is | |
338 | * returned locked and with an increased refcount. | |
339 | * | |
340 | * On memory exhaustion, %NULL is returned. | |
341 | * | |
342 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
343 | * atomic allocation! | |
344 | */ | |
345 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
767e5ee5 | 346 | pgoff_t index, gfp_t gfp_mask) |
2457aec6 | 347 | { |
767e5ee5 | 348 | return pagecache_get_page(mapping, index, |
2457aec6 | 349 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
45f87de5 | 350 | gfp_mask); |
2457aec6 MG |
351 | } |
352 | ||
353 | /** | |
354 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
355 | * @mapping: target address_space | |
356 | * @index: the page index | |
357 | * | |
358 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
359 | * This is intended for speculative data generators, where the data can | |
360 | * be regenerated if the page couldn't be grabbed. This routine should | |
361 | * be safe to call while holding the lock for another page. | |
362 | * | |
363 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
364 | * and deadlock against the caller's locked page. | |
365 | */ | |
366 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
367 | pgoff_t index) | |
368 | { | |
369 | return pagecache_get_page(mapping, index, | |
370 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 371 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
372 | } |
373 | ||
ec848215 MWO |
374 | /* |
375 | * Given the page we found in the page cache, return the page corresponding | |
376 | * to this index in the file | |
377 | */ | |
378 | static inline struct page *find_subpage(struct page *head, pgoff_t index) | |
4101196b | 379 | { |
ec848215 MWO |
380 | /* HugeTLBfs wants the head page regardless */ |
381 | if (PageHuge(head)) | |
382 | return head; | |
4101196b | 383 | |
a0650604 | 384 | return head + (index & (hpage_nr_pages(head) - 1)); |
4101196b MWO |
385 | } |
386 | ||
0cd6144a | 387 | struct page *find_get_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a | 388 | struct page *find_lock_entry(struct address_space *mapping, pgoff_t offset); |
0cd6144a JW |
389 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
390 | unsigned int nr_entries, struct page **entries, | |
391 | pgoff_t *indices); | |
b947cee4 JK |
392 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
393 | pgoff_t end, unsigned int nr_pages, | |
394 | struct page **pages); | |
395 | static inline unsigned find_get_pages(struct address_space *mapping, | |
396 | pgoff_t *start, unsigned int nr_pages, | |
397 | struct page **pages) | |
398 | { | |
399 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
400 | pages); | |
401 | } | |
ebf43500 JA |
402 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
403 | unsigned int nr_pages, struct page **pages); | |
72b045ae | 404 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
a6906972 | 405 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
406 | struct page **pages); |
407 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
a6906972 | 408 | pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
409 | struct page **pages) |
410 | { | |
411 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
412 | nr_pages, pages); | |
413 | } | |
1da177e4 | 414 | |
54566b2c NP |
415 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
416 | pgoff_t index, unsigned flags); | |
afddba49 | 417 | |
1da177e4 LT |
418 | /* |
419 | * Returns locked page at given index in given cache, creating it if needed. | |
420 | */ | |
57f6b96c FW |
421 | static inline struct page *grab_cache_page(struct address_space *mapping, |
422 | pgoff_t index) | |
1da177e4 LT |
423 | { |
424 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
425 | } | |
426 | ||
1da177e4 | 427 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 428 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
429 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
430 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
431 | extern int read_cache_pages(struct address_space *mapping, |
432 | struct list_head *pages, filler_t *filler, void *data); | |
433 | ||
090d2b18 | 434 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 435 | pgoff_t index, void *data) |
090d2b18 | 436 | { |
6c45b454 | 437 | return read_cache_page(mapping, index, NULL, data); |
090d2b18 PE |
438 | } |
439 | ||
a0f7a756 | 440 | /* |
5cbc198a KS |
441 | * Get index of the page with in radix-tree |
442 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 443 | */ |
5cbc198a | 444 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 445 | { |
e9b61f19 KS |
446 | pgoff_t pgoff; |
447 | ||
e9b61f19 | 448 | if (likely(!PageTransTail(page))) |
09cbfeaf | 449 | return page->index; |
e9b61f19 KS |
450 | |
451 | /* | |
452 | * We don't initialize ->index for tail pages: calculate based on | |
453 | * head page | |
454 | */ | |
09cbfeaf | 455 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
456 | pgoff += page - compound_head(page); |
457 | return pgoff; | |
a0f7a756 NH |
458 | } |
459 | ||
5cbc198a KS |
460 | /* |
461 | * Get the offset in PAGE_SIZE. | |
462 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
463 | */ | |
464 | static inline pgoff_t page_to_pgoff(struct page *page) | |
465 | { | |
466 | if (unlikely(PageHeadHuge(page))) | |
467 | return page->index << compound_order(page); | |
468 | ||
469 | return page_to_index(page); | |
470 | } | |
471 | ||
1da177e4 LT |
472 | /* |
473 | * Return byte-offset into filesystem object for page. | |
474 | */ | |
475 | static inline loff_t page_offset(struct page *page) | |
476 | { | |
09cbfeaf | 477 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
478 | } |
479 | ||
f981c595 MG |
480 | static inline loff_t page_file_offset(struct page *page) |
481 | { | |
8cd79788 | 482 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
483 | } |
484 | ||
0fe6e20b NH |
485 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
486 | unsigned long address); | |
487 | ||
1da177e4 LT |
488 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
489 | unsigned long address) | |
490 | { | |
0fe6e20b NH |
491 | pgoff_t pgoff; |
492 | if (unlikely(is_vm_hugetlb_page(vma))) | |
493 | return linear_hugepage_index(vma, address); | |
494 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 495 | pgoff += vma->vm_pgoff; |
09cbfeaf | 496 | return pgoff; |
1da177e4 LT |
497 | } |
498 | ||
b3c97528 HH |
499 | extern void __lock_page(struct page *page); |
500 | extern int __lock_page_killable(struct page *page); | |
d065bd81 ML |
501 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
502 | unsigned int flags); | |
b3c97528 | 503 | extern void unlock_page(struct page *page); |
1da177e4 | 504 | |
f4458845 AM |
505 | /* |
506 | * Return true if the page was successfully locked | |
507 | */ | |
529ae9aa NP |
508 | static inline int trylock_page(struct page *page) |
509 | { | |
48c935ad | 510 | page = compound_head(page); |
8413ac9d | 511 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
512 | } |
513 | ||
db37648c NP |
514 | /* |
515 | * lock_page may only be called if we have the page's inode pinned. | |
516 | */ | |
1da177e4 LT |
517 | static inline void lock_page(struct page *page) |
518 | { | |
519 | might_sleep(); | |
529ae9aa | 520 | if (!trylock_page(page)) |
1da177e4 LT |
521 | __lock_page(page); |
522 | } | |
db37648c | 523 | |
2687a356 MW |
524 | /* |
525 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
526 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
527 | * killed while waiting. | |
528 | */ | |
529 | static inline int lock_page_killable(struct page *page) | |
530 | { | |
531 | might_sleep(); | |
529ae9aa | 532 | if (!trylock_page(page)) |
2687a356 MW |
533 | return __lock_page_killable(page); |
534 | return 0; | |
535 | } | |
536 | ||
d065bd81 ML |
537 | /* |
538 | * lock_page_or_retry - Lock the page, unless this would block and the | |
539 | * caller indicated that it can handle a retry. | |
9a95f3cf | 540 | * |
c1e8d7c6 | 541 | * Return value and mmap_lock implications depend on flags; see |
9a95f3cf | 542 | * __lock_page_or_retry(). |
d065bd81 ML |
543 | */ |
544 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
545 | unsigned int flags) | |
546 | { | |
547 | might_sleep(); | |
548 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
549 | } | |
550 | ||
1da177e4 | 551 | /* |
74d81bfa NP |
552 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
553 | * and should not be used directly. | |
1da177e4 | 554 | */ |
b3c97528 | 555 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 556 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 557 | |
1da177e4 LT |
558 | /* |
559 | * Wait for a page to be unlocked. | |
560 | * | |
561 | * This must be called with the caller "holding" the page, | |
562 | * ie with increased "page->count" so that the page won't | |
563 | * go away during the wait.. | |
564 | */ | |
565 | static inline void wait_on_page_locked(struct page *page) | |
566 | { | |
567 | if (PageLocked(page)) | |
48c935ad | 568 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
569 | } |
570 | ||
62906027 NP |
571 | static inline int wait_on_page_locked_killable(struct page *page) |
572 | { | |
573 | if (!PageLocked(page)) | |
574 | return 0; | |
575 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
576 | } | |
577 | ||
9a1ea439 HD |
578 | extern void put_and_wait_on_page_locked(struct page *page); |
579 | ||
19343b5b | 580 | void wait_on_page_writeback(struct page *page); |
1da177e4 | 581 | extern void end_page_writeback(struct page *page); |
1d1d1a76 | 582 | void wait_for_stable_page(struct page *page); |
1da177e4 | 583 | |
c11f0c0b | 584 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 585 | |
385e1ca5 DH |
586 | /* |
587 | * Add an arbitrary waiter to a page's wait queue | |
588 | */ | |
ac6424b9 | 589 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 590 | |
1da177e4 | 591 | /* |
4bce9f6e | 592 | * Fault everything in given userspace address range in. |
1da177e4 LT |
593 | */ |
594 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 595 | { |
9923777d | 596 | char __user *end = uaddr + size - 1; |
f56f821f DV |
597 | |
598 | if (unlikely(size == 0)) | |
e23d4159 | 599 | return 0; |
f56f821f | 600 | |
e23d4159 AV |
601 | if (unlikely(uaddr > end)) |
602 | return -EFAULT; | |
f56f821f DV |
603 | /* |
604 | * Writing zeroes into userspace here is OK, because we know that if | |
605 | * the zero gets there, we'll be overwriting it. | |
606 | */ | |
e23d4159 AV |
607 | do { |
608 | if (unlikely(__put_user(0, uaddr) != 0)) | |
609 | return -EFAULT; | |
f56f821f | 610 | uaddr += PAGE_SIZE; |
e23d4159 | 611 | } while (uaddr <= end); |
f56f821f DV |
612 | |
613 | /* Check whether the range spilled into the next page. */ | |
614 | if (((unsigned long)uaddr & PAGE_MASK) == | |
615 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 616 | return __put_user(0, end); |
f56f821f | 617 | |
e23d4159 | 618 | return 0; |
f56f821f DV |
619 | } |
620 | ||
4bce9f6e | 621 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
622 | { |
623 | volatile char c; | |
f56f821f DV |
624 | const char __user *end = uaddr + size - 1; |
625 | ||
626 | if (unlikely(size == 0)) | |
e23d4159 | 627 | return 0; |
f56f821f | 628 | |
e23d4159 AV |
629 | if (unlikely(uaddr > end)) |
630 | return -EFAULT; | |
631 | ||
632 | do { | |
633 | if (unlikely(__get_user(c, uaddr) != 0)) | |
634 | return -EFAULT; | |
f56f821f | 635 | uaddr += PAGE_SIZE; |
e23d4159 | 636 | } while (uaddr <= end); |
f56f821f DV |
637 | |
638 | /* Check whether the range spilled into the next page. */ | |
639 | if (((unsigned long)uaddr & PAGE_MASK) == | |
640 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 641 | return __get_user(c, end); |
f56f821f DV |
642 | } |
643 | ||
90b75db6 | 644 | (void)c; |
e23d4159 | 645 | return 0; |
f56f821f DV |
646 | } |
647 | ||
529ae9aa NP |
648 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
649 | pgoff_t index, gfp_t gfp_mask); | |
650 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
651 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 652 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 653 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 654 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
aa65c29c JK |
655 | void delete_from_page_cache_batch(struct address_space *mapping, |
656 | struct pagevec *pvec); | |
529ae9aa | 657 | |
cee9a0c4 MWO |
658 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
659 | ||
660 | void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, | |
661 | struct file *, pgoff_t index, unsigned long req_count); | |
662 | void page_cache_async_readahead(struct address_space *, struct file_ra_state *, | |
663 | struct file *, struct page *, pgoff_t index, | |
664 | unsigned long req_count); | |
2c684234 MWO |
665 | void page_cache_readahead_unbounded(struct address_space *, struct file *, |
666 | pgoff_t index, unsigned long nr_to_read, | |
667 | unsigned long lookahead_count); | |
cee9a0c4 | 668 | |
529ae9aa NP |
669 | /* |
670 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 671 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
672 | */ |
673 | static inline int add_to_page_cache(struct page *page, | |
674 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
675 | { | |
676 | int error; | |
677 | ||
48c935ad | 678 | __SetPageLocked(page); |
529ae9aa NP |
679 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
680 | if (unlikely(error)) | |
48c935ad | 681 | __ClearPageLocked(page); |
529ae9aa NP |
682 | return error; |
683 | } | |
684 | ||
042124cc MWO |
685 | /** |
686 | * struct readahead_control - Describes a readahead request. | |
687 | * | |
688 | * A readahead request is for consecutive pages. Filesystems which | |
689 | * implement the ->readahead method should call readahead_page() or | |
690 | * readahead_page_batch() in a loop and attempt to start I/O against | |
691 | * each page in the request. | |
692 | * | |
693 | * Most of the fields in this struct are private and should be accessed | |
694 | * by the functions below. | |
695 | * | |
696 | * @file: The file, used primarily by network filesystems for authentication. | |
697 | * May be NULL if invoked internally by the filesystem. | |
698 | * @mapping: Readahead this filesystem object. | |
699 | */ | |
700 | struct readahead_control { | |
701 | struct file *file; | |
702 | struct address_space *mapping; | |
703 | /* private: use the readahead_* accessors instead */ | |
704 | pgoff_t _index; | |
705 | unsigned int _nr_pages; | |
706 | unsigned int _batch_count; | |
707 | }; | |
708 | ||
709 | /** | |
710 | * readahead_page - Get the next page to read. | |
711 | * @rac: The current readahead request. | |
712 | * | |
713 | * Context: The page is locked and has an elevated refcount. The caller | |
714 | * should decreases the refcount once the page has been submitted for I/O | |
715 | * and unlock the page once all I/O to that page has completed. | |
716 | * Return: A pointer to the next page, or %NULL if we are done. | |
717 | */ | |
718 | static inline struct page *readahead_page(struct readahead_control *rac) | |
719 | { | |
720 | struct page *page; | |
721 | ||
722 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
723 | rac->_nr_pages -= rac->_batch_count; | |
724 | rac->_index += rac->_batch_count; | |
725 | ||
726 | if (!rac->_nr_pages) { | |
727 | rac->_batch_count = 0; | |
728 | return NULL; | |
729 | } | |
730 | ||
731 | page = xa_load(&rac->mapping->i_pages, rac->_index); | |
732 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
733 | rac->_batch_count = hpage_nr_pages(page); | |
734 | ||
735 | return page; | |
736 | } | |
737 | ||
738 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | |
739 | struct page **array, unsigned int array_sz) | |
740 | { | |
741 | unsigned int i = 0; | |
742 | XA_STATE(xas, &rac->mapping->i_pages, 0); | |
743 | struct page *page; | |
744 | ||
745 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
746 | rac->_nr_pages -= rac->_batch_count; | |
747 | rac->_index += rac->_batch_count; | |
748 | rac->_batch_count = 0; | |
749 | ||
750 | xas_set(&xas, rac->_index); | |
751 | rcu_read_lock(); | |
752 | xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { | |
753 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
754 | VM_BUG_ON_PAGE(PageTail(page), page); | |
755 | array[i++] = page; | |
756 | rac->_batch_count += hpage_nr_pages(page); | |
757 | ||
758 | /* | |
759 | * The page cache isn't using multi-index entries yet, | |
760 | * so the xas cursor needs to be manually moved to the | |
761 | * next index. This can be removed once the page cache | |
762 | * is converted. | |
763 | */ | |
764 | if (PageHead(page)) | |
765 | xas_set(&xas, rac->_index + rac->_batch_count); | |
766 | ||
767 | if (i == array_sz) | |
768 | break; | |
769 | } | |
770 | rcu_read_unlock(); | |
771 | ||
772 | return i; | |
773 | } | |
774 | ||
775 | /** | |
776 | * readahead_page_batch - Get a batch of pages to read. | |
777 | * @rac: The current readahead request. | |
778 | * @array: An array of pointers to struct page. | |
779 | * | |
780 | * Context: The pages are locked and have an elevated refcount. The caller | |
781 | * should decreases the refcount once the page has been submitted for I/O | |
782 | * and unlock the page once all I/O to that page has completed. | |
783 | * Return: The number of pages placed in the array. 0 indicates the request | |
784 | * is complete. | |
785 | */ | |
786 | #define readahead_page_batch(rac, array) \ | |
787 | __readahead_batch(rac, array, ARRAY_SIZE(array)) | |
788 | ||
789 | /** | |
790 | * readahead_pos - The byte offset into the file of this readahead request. | |
791 | * @rac: The readahead request. | |
792 | */ | |
793 | static inline loff_t readahead_pos(struct readahead_control *rac) | |
794 | { | |
795 | return (loff_t)rac->_index * PAGE_SIZE; | |
796 | } | |
797 | ||
798 | /** | |
799 | * readahead_length - The number of bytes in this readahead request. | |
800 | * @rac: The readahead request. | |
801 | */ | |
802 | static inline loff_t readahead_length(struct readahead_control *rac) | |
803 | { | |
804 | return (loff_t)rac->_nr_pages * PAGE_SIZE; | |
805 | } | |
806 | ||
807 | /** | |
808 | * readahead_index - The index of the first page in this readahead request. | |
809 | * @rac: The readahead request. | |
810 | */ | |
811 | static inline pgoff_t readahead_index(struct readahead_control *rac) | |
812 | { | |
813 | return rac->_index; | |
814 | } | |
815 | ||
816 | /** | |
817 | * readahead_count - The number of pages in this readahead request. | |
818 | * @rac: The readahead request. | |
819 | */ | |
820 | static inline unsigned int readahead_count(struct readahead_control *rac) | |
821 | { | |
822 | return rac->_nr_pages; | |
823 | } | |
824 | ||
b57c2cb9 FF |
825 | static inline unsigned long dir_pages(struct inode *inode) |
826 | { | |
09cbfeaf KS |
827 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
828 | PAGE_SHIFT; | |
b57c2cb9 FF |
829 | } |
830 | ||
243145bc AG |
831 | /** |
832 | * page_mkwrite_check_truncate - check if page was truncated | |
833 | * @page: the page to check | |
834 | * @inode: the inode to check the page against | |
835 | * | |
836 | * Returns the number of bytes in the page up to EOF, | |
837 | * or -EFAULT if the page was truncated. | |
838 | */ | |
839 | static inline int page_mkwrite_check_truncate(struct page *page, | |
840 | struct inode *inode) | |
841 | { | |
842 | loff_t size = i_size_read(inode); | |
843 | pgoff_t index = size >> PAGE_SHIFT; | |
844 | int offset = offset_in_page(size); | |
845 | ||
846 | if (page->mapping != inode->i_mapping) | |
847 | return -EFAULT; | |
848 | ||
849 | /* page is wholly inside EOF */ | |
850 | if (page->index < index) | |
851 | return PAGE_SIZE; | |
852 | /* page is wholly past EOF */ | |
853 | if (page->index > index || !offset) | |
854 | return -EFAULT; | |
855 | /* page is partially inside EOF */ | |
856 | return offset; | |
857 | } | |
858 | ||
1da177e4 | 859 | #endif /* _LINUX_PAGEMAP_H */ |