]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
1da177e4 | 21 | /* |
9c5d760b | 22 | * Bits in mapping->flags. |
1da177e4 | 23 | */ |
9a896c9a | 24 | enum mapping_flags { |
9c5d760b MH |
25 | AS_EIO = 0, /* IO error on async write */ |
26 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
27 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
28 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
29 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 30 | /* writeback related tags are not used */ |
9c5d760b | 31 | AS_NO_WRITEBACK_TAGS = 5, |
9a896c9a | 32 | }; |
1da177e4 | 33 | |
8ed1e46a JL |
34 | /** |
35 | * mapping_set_error - record a writeback error in the address_space | |
767e5ee5 MWO |
36 | * @mapping: the mapping in which an error should be set |
37 | * @error: the error to set in the mapping | |
8ed1e46a JL |
38 | * |
39 | * When writeback fails in some way, we must record that error so that | |
40 | * userspace can be informed when fsync and the like are called. We endeavor | |
41 | * to report errors on any file that was open at the time of the error. Some | |
42 | * internal callers also need to know when writeback errors have occurred. | |
43 | * | |
44 | * When a writeback error occurs, most filesystems will want to call | |
45 | * mapping_set_error to record the error in the mapping so that it can be | |
46 | * reported when the application calls fsync(2). | |
47 | */ | |
3e9f45bd GC |
48 | static inline void mapping_set_error(struct address_space *mapping, int error) |
49 | { | |
8ed1e46a JL |
50 | if (likely(!error)) |
51 | return; | |
52 | ||
53 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
735e4ae5 JL |
54 | __filemap_set_wb_err(mapping, error); |
55 | ||
56 | /* Record it in superblock */ | |
8b7b2eb1 MK |
57 | if (mapping->host) |
58 | errseq_set(&mapping->host->i_sb->s_wb_err, error); | |
8ed1e46a JL |
59 | |
60 | /* Record it in flags for now, for legacy callers */ | |
61 | if (error == -ENOSPC) | |
62 | set_bit(AS_ENOSPC, &mapping->flags); | |
63 | else | |
64 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
65 | } |
66 | ||
ba9ddf49 LS |
67 | static inline void mapping_set_unevictable(struct address_space *mapping) |
68 | { | |
69 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
70 | } | |
71 | ||
89e004ea LS |
72 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
73 | { | |
74 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
75 | } | |
76 | ||
1eb6234e | 77 | static inline bool mapping_unevictable(struct address_space *mapping) |
ba9ddf49 | 78 | { |
1eb6234e | 79 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
ba9ddf49 | 80 | } |
ba9ddf49 | 81 | |
91b0abe3 JW |
82 | static inline void mapping_set_exiting(struct address_space *mapping) |
83 | { | |
84 | set_bit(AS_EXITING, &mapping->flags); | |
85 | } | |
86 | ||
87 | static inline int mapping_exiting(struct address_space *mapping) | |
88 | { | |
89 | return test_bit(AS_EXITING, &mapping->flags); | |
90 | } | |
91 | ||
371a096e HY |
92 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
93 | { | |
94 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
95 | } | |
96 | ||
97 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
98 | { | |
99 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
100 | } | |
101 | ||
dd0fc66f | 102 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 103 | { |
9c5d760b | 104 | return mapping->gfp_mask; |
1da177e4 LT |
105 | } |
106 | ||
c62d2555 MH |
107 | /* Restricts the given gfp_mask to what the mapping allows. */ |
108 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
109 | gfp_t gfp_mask) | |
110 | { | |
111 | return mapping_gfp_mask(mapping) & gfp_mask; | |
112 | } | |
113 | ||
1da177e4 LT |
114 | /* |
115 | * This is non-atomic. Only to be used before the mapping is activated. | |
116 | * Probably needs a barrier... | |
117 | */ | |
260b2367 | 118 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 119 | { |
9c5d760b | 120 | m->gfp_mask = mask; |
1da177e4 LT |
121 | } |
122 | ||
c6f92f9f | 123 | void release_pages(struct page **pages, int nr); |
1da177e4 | 124 | |
e286781d NP |
125 | /* |
126 | * speculatively take a reference to a page. | |
0139aa7b JK |
127 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
128 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
129 | * |
130 | * This function must be called inside the same rcu_read_lock() section as has | |
131 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 132 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
133 | * |
134 | * Unless an RCU grace period has passed, the count of all pages coming out | |
135 | * of the allocator must be considered unstable. page_count may return higher | |
136 | * than expected, and put_page must be able to do the right thing when the | |
137 | * page has been finished with, no matter what it is subsequently allocated | |
138 | * for (because put_page is what is used here to drop an invalid speculative | |
139 | * reference). | |
140 | * | |
141 | * This is the interesting part of the lockless pagecache (and lockless | |
142 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
143 | * has the following pattern: | |
144 | * 1. find page in radix tree | |
145 | * 2. conditionally increment refcount | |
146 | * 3. check the page is still in pagecache (if no, goto 1) | |
147 | * | |
0139aa7b | 148 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
b93b0163 | 149 | * following (with the i_pages lock held): |
e286781d NP |
150 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
151 | * B. remove page from pagecache | |
152 | * C. free the page | |
153 | * | |
154 | * There are 2 critical interleavings that matter: | |
155 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
156 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
157 | * subsequently, B will complete and 1 will find no page, causing the | |
158 | * lookup to return NULL. | |
159 | * | |
160 | * It is possible that between 1 and 2, the page is removed then the exact same | |
161 | * page is inserted into the same position in pagecache. That's OK: the | |
b93b0163 | 162 | * old find_get_page using a lock could equally have run before or after |
e286781d NP |
163 | * such a re-insertion, depending on order that locks are granted. |
164 | * | |
165 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
166 | * will find the page or it will not. Likewise, the old find_get_page could run | |
167 | * either before the insertion or afterwards, depending on timing. | |
168 | */ | |
494eec70 | 169 | static inline int __page_cache_add_speculative(struct page *page, int count) |
e286781d | 170 | { |
8375ad98 | 171 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 172 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 173 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
174 | # endif |
175 | /* | |
176 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
177 | * this for us. | |
178 | * | |
179 | * Pagecache won't be truncated from interrupt context, so if we have | |
180 | * found a page in the radix tree here, we have pinned its refcount by | |
181 | * disabling preempt, and hence no need for the "speculative get" that | |
182 | * SMP requires. | |
183 | */ | |
309381fe | 184 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
494eec70 | 185 | page_ref_add(page, count); |
e286781d NP |
186 | |
187 | #else | |
494eec70 | 188 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
e286781d NP |
189 | /* |
190 | * Either the page has been freed, or will be freed. | |
191 | * In either case, retry here and the caller should | |
192 | * do the right thing (see comments above). | |
193 | */ | |
194 | return 0; | |
195 | } | |
196 | #endif | |
309381fe | 197 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
198 | |
199 | return 1; | |
200 | } | |
201 | ||
494eec70 | 202 | static inline int page_cache_get_speculative(struct page *page) |
ce0ad7f0 | 203 | { |
494eec70 | 204 | return __page_cache_add_speculative(page, 1); |
205 | } | |
ce0ad7f0 | 206 | |
494eec70 | 207 | static inline int page_cache_add_speculative(struct page *page, int count) |
208 | { | |
209 | return __page_cache_add_speculative(page, count); | |
ce0ad7f0 NP |
210 | } |
211 | ||
b03143ac GJ |
212 | /** |
213 | * attach_page_private - Attach private data to a page. | |
214 | * @page: Page to attach data to. | |
215 | * @data: Data to attach to page. | |
216 | * | |
217 | * Attaching private data to a page increments the page's reference count. | |
218 | * The data must be detached before the page will be freed. | |
219 | */ | |
220 | static inline void attach_page_private(struct page *page, void *data) | |
221 | { | |
222 | get_page(page); | |
223 | set_page_private(page, (unsigned long)data); | |
224 | SetPagePrivate(page); | |
225 | } | |
226 | ||
227 | /** | |
228 | * detach_page_private - Detach private data from a page. | |
229 | * @page: Page to detach data from. | |
230 | * | |
231 | * Removes the data that was previously attached to the page and decrements | |
232 | * the refcount on the page. | |
233 | * | |
234 | * Return: Data that was attached to the page. | |
235 | */ | |
236 | static inline void *detach_page_private(struct page *page) | |
237 | { | |
238 | void *data = (void *)page_private(page); | |
239 | ||
240 | if (!PagePrivate(page)) | |
241 | return NULL; | |
242 | ClearPagePrivate(page); | |
243 | set_page_private(page, 0); | |
244 | put_page(page); | |
245 | ||
246 | return data; | |
247 | } | |
248 | ||
44110fe3 | 249 | #ifdef CONFIG_NUMA |
2ae88149 | 250 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 251 | #else |
2ae88149 NP |
252 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
253 | { | |
254 | return alloc_pages(gfp, 0); | |
255 | } | |
256 | #endif | |
257 | ||
1da177e4 LT |
258 | static inline struct page *page_cache_alloc(struct address_space *x) |
259 | { | |
2ae88149 | 260 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
261 | } |
262 | ||
8a5c743e | 263 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 264 | { |
453f85d4 | 265 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
266 | } |
267 | ||
1da177e4 LT |
268 | typedef int filler_t(void *, struct page *); |
269 | ||
0d3f9296 | 270 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
e7b563bb | 271 | pgoff_t index, unsigned long max_scan); |
0d3f9296 | 272 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
e7b563bb JW |
273 | pgoff_t index, unsigned long max_scan); |
274 | ||
2457aec6 MG |
275 | #define FGP_ACCESSED 0x00000001 |
276 | #define FGP_LOCK 0x00000002 | |
277 | #define FGP_CREAT 0x00000004 | |
278 | #define FGP_WRITE 0x00000008 | |
279 | #define FGP_NOFS 0x00000010 | |
280 | #define FGP_NOWAIT 0x00000020 | |
a75d4c33 | 281 | #define FGP_FOR_MMAP 0x00000040 |
a8cf7f27 | 282 | #define FGP_HEAD 0x00000080 |
2457aec6 MG |
283 | |
284 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 285 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
286 | |
287 | /** | |
288 | * find_get_page - find and get a page reference | |
289 | * @mapping: the address_space to search | |
290 | * @offset: the page index | |
291 | * | |
292 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
293 | * page cache page, it is returned with an increased refcount. | |
294 | * | |
295 | * Otherwise, %NULL is returned. | |
296 | */ | |
297 | static inline struct page *find_get_page(struct address_space *mapping, | |
298 | pgoff_t offset) | |
299 | { | |
45f87de5 | 300 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
301 | } |
302 | ||
303 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
304 | pgoff_t offset, int fgp_flags) | |
305 | { | |
45f87de5 | 306 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
307 | } |
308 | ||
309 | /** | |
310 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 MG |
311 | * @mapping: the address_space to search |
312 | * @offset: the page index | |
313 | * | |
a8cf7f27 | 314 | * Looks up the page cache entry at @mapping & @offset. If there is a |
2457aec6 MG |
315 | * page cache page, it is returned locked and with an increased |
316 | * refcount. | |
317 | * | |
a8cf7f27 MWO |
318 | * Context: May sleep. |
319 | * Return: A struct page or %NULL if there is no page in the cache for this | |
320 | * index. | |
2457aec6 MG |
321 | */ |
322 | static inline struct page *find_lock_page(struct address_space *mapping, | |
a8cf7f27 MWO |
323 | pgoff_t index) |
324 | { | |
325 | return pagecache_get_page(mapping, index, FGP_LOCK, 0); | |
326 | } | |
327 | ||
328 | /** | |
329 | * find_lock_head - Locate, pin and lock a pagecache page. | |
330 | * @mapping: The address_space to search. | |
331 | * @offset: The page index. | |
332 | * | |
333 | * Looks up the page cache entry at @mapping & @offset. If there is a | |
334 | * page cache page, its head page is returned locked and with an increased | |
335 | * refcount. | |
336 | * | |
337 | * Context: May sleep. | |
338 | * Return: A struct page which is !PageTail, or %NULL if there is no page | |
339 | * in the cache for this index. | |
340 | */ | |
341 | static inline struct page *find_lock_head(struct address_space *mapping, | |
342 | pgoff_t index) | |
2457aec6 | 343 | { |
a8cf7f27 | 344 | return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); |
2457aec6 MG |
345 | } |
346 | ||
347 | /** | |
348 | * find_or_create_page - locate or add a pagecache page | |
349 | * @mapping: the page's address_space | |
350 | * @index: the page's index into the mapping | |
351 | * @gfp_mask: page allocation mode | |
352 | * | |
353 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
354 | * page cache page, it is returned locked and with an increased | |
355 | * refcount. | |
356 | * | |
357 | * If the page is not present, a new page is allocated using @gfp_mask | |
358 | * and added to the page cache and the VM's LRU list. The page is | |
359 | * returned locked and with an increased refcount. | |
360 | * | |
361 | * On memory exhaustion, %NULL is returned. | |
362 | * | |
363 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
364 | * atomic allocation! | |
365 | */ | |
366 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
767e5ee5 | 367 | pgoff_t index, gfp_t gfp_mask) |
2457aec6 | 368 | { |
767e5ee5 | 369 | return pagecache_get_page(mapping, index, |
2457aec6 | 370 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
45f87de5 | 371 | gfp_mask); |
2457aec6 MG |
372 | } |
373 | ||
374 | /** | |
375 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
376 | * @mapping: target address_space | |
377 | * @index: the page index | |
378 | * | |
379 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
380 | * This is intended for speculative data generators, where the data can | |
381 | * be regenerated if the page couldn't be grabbed. This routine should | |
382 | * be safe to call while holding the lock for another page. | |
383 | * | |
384 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
385 | * and deadlock against the caller's locked page. | |
386 | */ | |
387 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
388 | pgoff_t index) | |
389 | { | |
390 | return pagecache_get_page(mapping, index, | |
391 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 392 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
393 | } |
394 | ||
63ec1973 MWO |
395 | /* Does this page contain this index? */ |
396 | static inline bool thp_contains(struct page *head, pgoff_t index) | |
397 | { | |
398 | /* HugeTLBfs indexes the page cache in units of hpage_size */ | |
399 | if (PageHuge(head)) | |
400 | return head->index == index; | |
401 | return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); | |
402 | } | |
403 | ||
ec848215 MWO |
404 | /* |
405 | * Given the page we found in the page cache, return the page corresponding | |
406 | * to this index in the file | |
407 | */ | |
408 | static inline struct page *find_subpage(struct page *head, pgoff_t index) | |
4101196b | 409 | { |
ec848215 MWO |
410 | /* HugeTLBfs wants the head page regardless */ |
411 | if (PageHuge(head)) | |
412 | return head; | |
4101196b | 413 | |
6c357848 | 414 | return head + (index & (thp_nr_pages(head) - 1)); |
4101196b MWO |
415 | } |
416 | ||
0cd6144a JW |
417 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
418 | unsigned int nr_entries, struct page **entries, | |
419 | pgoff_t *indices); | |
b947cee4 JK |
420 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
421 | pgoff_t end, unsigned int nr_pages, | |
422 | struct page **pages); | |
423 | static inline unsigned find_get_pages(struct address_space *mapping, | |
424 | pgoff_t *start, unsigned int nr_pages, | |
425 | struct page **pages) | |
426 | { | |
427 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
428 | pages); | |
429 | } | |
ebf43500 JA |
430 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
431 | unsigned int nr_pages, struct page **pages); | |
72b045ae | 432 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
a6906972 | 433 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
434 | struct page **pages); |
435 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
a6906972 | 436 | pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
437 | struct page **pages) |
438 | { | |
439 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
440 | nr_pages, pages); | |
441 | } | |
1da177e4 | 442 | |
54566b2c NP |
443 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
444 | pgoff_t index, unsigned flags); | |
afddba49 | 445 | |
1da177e4 LT |
446 | /* |
447 | * Returns locked page at given index in given cache, creating it if needed. | |
448 | */ | |
57f6b96c FW |
449 | static inline struct page *grab_cache_page(struct address_space *mapping, |
450 | pgoff_t index) | |
1da177e4 LT |
451 | { |
452 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
453 | } | |
454 | ||
1da177e4 | 455 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 456 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
457 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
458 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
459 | extern int read_cache_pages(struct address_space *mapping, |
460 | struct list_head *pages, filler_t *filler, void *data); | |
461 | ||
090d2b18 | 462 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 463 | pgoff_t index, void *data) |
090d2b18 | 464 | { |
6c45b454 | 465 | return read_cache_page(mapping, index, NULL, data); |
090d2b18 PE |
466 | } |
467 | ||
a0f7a756 | 468 | /* |
5cbc198a KS |
469 | * Get index of the page with in radix-tree |
470 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 471 | */ |
5cbc198a | 472 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 473 | { |
e9b61f19 KS |
474 | pgoff_t pgoff; |
475 | ||
e9b61f19 | 476 | if (likely(!PageTransTail(page))) |
09cbfeaf | 477 | return page->index; |
e9b61f19 KS |
478 | |
479 | /* | |
480 | * We don't initialize ->index for tail pages: calculate based on | |
481 | * head page | |
482 | */ | |
09cbfeaf | 483 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
484 | pgoff += page - compound_head(page); |
485 | return pgoff; | |
a0f7a756 NH |
486 | } |
487 | ||
5cbc198a KS |
488 | /* |
489 | * Get the offset in PAGE_SIZE. | |
490 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
491 | */ | |
492 | static inline pgoff_t page_to_pgoff(struct page *page) | |
493 | { | |
494 | if (unlikely(PageHeadHuge(page))) | |
495 | return page->index << compound_order(page); | |
496 | ||
497 | return page_to_index(page); | |
498 | } | |
499 | ||
1da177e4 LT |
500 | /* |
501 | * Return byte-offset into filesystem object for page. | |
502 | */ | |
503 | static inline loff_t page_offset(struct page *page) | |
504 | { | |
09cbfeaf | 505 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
506 | } |
507 | ||
f981c595 MG |
508 | static inline loff_t page_file_offset(struct page *page) |
509 | { | |
8cd79788 | 510 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
511 | } |
512 | ||
0fe6e20b NH |
513 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
514 | unsigned long address); | |
515 | ||
1da177e4 LT |
516 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
517 | unsigned long address) | |
518 | { | |
0fe6e20b NH |
519 | pgoff_t pgoff; |
520 | if (unlikely(is_vm_hugetlb_page(vma))) | |
521 | return linear_hugepage_index(vma, address); | |
522 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 523 | pgoff += vma->vm_pgoff; |
09cbfeaf | 524 | return pgoff; |
1da177e4 LT |
525 | } |
526 | ||
c7510ab2 JA |
527 | /* This has the same layout as wait_bit_key - see fs/cachefiles/rdwr.c */ |
528 | struct wait_page_key { | |
529 | struct page *page; | |
530 | int bit_nr; | |
531 | int page_match; | |
532 | }; | |
533 | ||
534 | struct wait_page_queue { | |
535 | struct page *page; | |
536 | int bit_nr; | |
537 | wait_queue_entry_t wait; | |
538 | }; | |
539 | ||
cdc8fcb4 | 540 | static inline bool wake_page_match(struct wait_page_queue *wait_page, |
c7510ab2 JA |
541 | struct wait_page_key *key) |
542 | { | |
543 | if (wait_page->page != key->page) | |
cdc8fcb4 | 544 | return false; |
c7510ab2 JA |
545 | key->page_match = 1; |
546 | ||
547 | if (wait_page->bit_nr != key->bit_nr) | |
cdc8fcb4 | 548 | return false; |
d1932dc3 | 549 | |
cdc8fcb4 | 550 | return true; |
d1932dc3 JA |
551 | } |
552 | ||
b3c97528 HH |
553 | extern void __lock_page(struct page *page); |
554 | extern int __lock_page_killable(struct page *page); | |
dd3e6d50 | 555 | extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); |
d065bd81 ML |
556 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
557 | unsigned int flags); | |
b3c97528 | 558 | extern void unlock_page(struct page *page); |
1da177e4 | 559 | |
f4458845 AM |
560 | /* |
561 | * Return true if the page was successfully locked | |
562 | */ | |
529ae9aa NP |
563 | static inline int trylock_page(struct page *page) |
564 | { | |
48c935ad | 565 | page = compound_head(page); |
8413ac9d | 566 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
567 | } |
568 | ||
db37648c NP |
569 | /* |
570 | * lock_page may only be called if we have the page's inode pinned. | |
571 | */ | |
1da177e4 LT |
572 | static inline void lock_page(struct page *page) |
573 | { | |
574 | might_sleep(); | |
529ae9aa | 575 | if (!trylock_page(page)) |
1da177e4 LT |
576 | __lock_page(page); |
577 | } | |
db37648c | 578 | |
2687a356 MW |
579 | /* |
580 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
581 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
582 | * killed while waiting. | |
583 | */ | |
584 | static inline int lock_page_killable(struct page *page) | |
585 | { | |
586 | might_sleep(); | |
529ae9aa | 587 | if (!trylock_page(page)) |
2687a356 MW |
588 | return __lock_page_killable(page); |
589 | return 0; | |
590 | } | |
591 | ||
dd3e6d50 JA |
592 | /* |
593 | * lock_page_async - Lock the page, unless this would block. If the page | |
594 | * is already locked, then queue a callback when the page becomes unlocked. | |
595 | * This callback can then retry the operation. | |
596 | * | |
597 | * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page | |
598 | * was already locked and the callback defined in 'wait' was queued. | |
599 | */ | |
600 | static inline int lock_page_async(struct page *page, | |
601 | struct wait_page_queue *wait) | |
602 | { | |
603 | if (!trylock_page(page)) | |
604 | return __lock_page_async(page, wait); | |
605 | return 0; | |
606 | } | |
607 | ||
d065bd81 ML |
608 | /* |
609 | * lock_page_or_retry - Lock the page, unless this would block and the | |
610 | * caller indicated that it can handle a retry. | |
9a95f3cf | 611 | * |
c1e8d7c6 | 612 | * Return value and mmap_lock implications depend on flags; see |
9a95f3cf | 613 | * __lock_page_or_retry(). |
d065bd81 ML |
614 | */ |
615 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
616 | unsigned int flags) | |
617 | { | |
618 | might_sleep(); | |
619 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
620 | } | |
621 | ||
1da177e4 | 622 | /* |
74d81bfa NP |
623 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
624 | * and should not be used directly. | |
1da177e4 | 625 | */ |
b3c97528 | 626 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 627 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 628 | |
1da177e4 LT |
629 | /* |
630 | * Wait for a page to be unlocked. | |
631 | * | |
632 | * This must be called with the caller "holding" the page, | |
633 | * ie with increased "page->count" so that the page won't | |
634 | * go away during the wait.. | |
635 | */ | |
636 | static inline void wait_on_page_locked(struct page *page) | |
637 | { | |
638 | if (PageLocked(page)) | |
48c935ad | 639 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
640 | } |
641 | ||
62906027 NP |
642 | static inline int wait_on_page_locked_killable(struct page *page) |
643 | { | |
644 | if (!PageLocked(page)) | |
645 | return 0; | |
646 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
647 | } | |
648 | ||
9a1ea439 HD |
649 | extern void put_and_wait_on_page_locked(struct page *page); |
650 | ||
19343b5b | 651 | void wait_on_page_writeback(struct page *page); |
1da177e4 | 652 | extern void end_page_writeback(struct page *page); |
1d1d1a76 | 653 | void wait_for_stable_page(struct page *page); |
1da177e4 | 654 | |
c11f0c0b | 655 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 656 | |
385e1ca5 DH |
657 | /* |
658 | * Add an arbitrary waiter to a page's wait queue | |
659 | */ | |
ac6424b9 | 660 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 661 | |
1da177e4 | 662 | /* |
4bce9f6e | 663 | * Fault everything in given userspace address range in. |
1da177e4 LT |
664 | */ |
665 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 666 | { |
9923777d | 667 | char __user *end = uaddr + size - 1; |
f56f821f DV |
668 | |
669 | if (unlikely(size == 0)) | |
e23d4159 | 670 | return 0; |
f56f821f | 671 | |
e23d4159 AV |
672 | if (unlikely(uaddr > end)) |
673 | return -EFAULT; | |
f56f821f DV |
674 | /* |
675 | * Writing zeroes into userspace here is OK, because we know that if | |
676 | * the zero gets there, we'll be overwriting it. | |
677 | */ | |
e23d4159 AV |
678 | do { |
679 | if (unlikely(__put_user(0, uaddr) != 0)) | |
680 | return -EFAULT; | |
f56f821f | 681 | uaddr += PAGE_SIZE; |
e23d4159 | 682 | } while (uaddr <= end); |
f56f821f DV |
683 | |
684 | /* Check whether the range spilled into the next page. */ | |
685 | if (((unsigned long)uaddr & PAGE_MASK) == | |
686 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 687 | return __put_user(0, end); |
f56f821f | 688 | |
e23d4159 | 689 | return 0; |
f56f821f DV |
690 | } |
691 | ||
4bce9f6e | 692 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
693 | { |
694 | volatile char c; | |
f56f821f DV |
695 | const char __user *end = uaddr + size - 1; |
696 | ||
697 | if (unlikely(size == 0)) | |
e23d4159 | 698 | return 0; |
f56f821f | 699 | |
e23d4159 AV |
700 | if (unlikely(uaddr > end)) |
701 | return -EFAULT; | |
702 | ||
703 | do { | |
704 | if (unlikely(__get_user(c, uaddr) != 0)) | |
705 | return -EFAULT; | |
f56f821f | 706 | uaddr += PAGE_SIZE; |
e23d4159 | 707 | } while (uaddr <= end); |
f56f821f DV |
708 | |
709 | /* Check whether the range spilled into the next page. */ | |
710 | if (((unsigned long)uaddr & PAGE_MASK) == | |
711 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 712 | return __get_user(c, end); |
f56f821f DV |
713 | } |
714 | ||
90b75db6 | 715 | (void)c; |
e23d4159 | 716 | return 0; |
f56f821f DV |
717 | } |
718 | ||
529ae9aa NP |
719 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
720 | pgoff_t index, gfp_t gfp_mask); | |
721 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
722 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 723 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 724 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
ef6a3c63 | 725 | int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask); |
aa65c29c JK |
726 | void delete_from_page_cache_batch(struct address_space *mapping, |
727 | struct pagevec *pvec); | |
529ae9aa | 728 | |
cee9a0c4 MWO |
729 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
730 | ||
731 | void page_cache_sync_readahead(struct address_space *, struct file_ra_state *, | |
732 | struct file *, pgoff_t index, unsigned long req_count); | |
733 | void page_cache_async_readahead(struct address_space *, struct file_ra_state *, | |
734 | struct file *, struct page *, pgoff_t index, | |
735 | unsigned long req_count); | |
2c684234 MWO |
736 | void page_cache_readahead_unbounded(struct address_space *, struct file *, |
737 | pgoff_t index, unsigned long nr_to_read, | |
738 | unsigned long lookahead_count); | |
cee9a0c4 | 739 | |
529ae9aa NP |
740 | /* |
741 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 742 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
743 | */ |
744 | static inline int add_to_page_cache(struct page *page, | |
745 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
746 | { | |
747 | int error; | |
748 | ||
48c935ad | 749 | __SetPageLocked(page); |
529ae9aa NP |
750 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
751 | if (unlikely(error)) | |
48c935ad | 752 | __ClearPageLocked(page); |
529ae9aa NP |
753 | return error; |
754 | } | |
755 | ||
042124cc MWO |
756 | /** |
757 | * struct readahead_control - Describes a readahead request. | |
758 | * | |
759 | * A readahead request is for consecutive pages. Filesystems which | |
760 | * implement the ->readahead method should call readahead_page() or | |
761 | * readahead_page_batch() in a loop and attempt to start I/O against | |
762 | * each page in the request. | |
763 | * | |
764 | * Most of the fields in this struct are private and should be accessed | |
765 | * by the functions below. | |
766 | * | |
767 | * @file: The file, used primarily by network filesystems for authentication. | |
768 | * May be NULL if invoked internally by the filesystem. | |
769 | * @mapping: Readahead this filesystem object. | |
770 | */ | |
771 | struct readahead_control { | |
772 | struct file *file; | |
773 | struct address_space *mapping; | |
774 | /* private: use the readahead_* accessors instead */ | |
775 | pgoff_t _index; | |
776 | unsigned int _nr_pages; | |
777 | unsigned int _batch_count; | |
778 | }; | |
779 | ||
780 | /** | |
781 | * readahead_page - Get the next page to read. | |
782 | * @rac: The current readahead request. | |
783 | * | |
784 | * Context: The page is locked and has an elevated refcount. The caller | |
785 | * should decreases the refcount once the page has been submitted for I/O | |
786 | * and unlock the page once all I/O to that page has completed. | |
787 | * Return: A pointer to the next page, or %NULL if we are done. | |
788 | */ | |
789 | static inline struct page *readahead_page(struct readahead_control *rac) | |
790 | { | |
791 | struct page *page; | |
792 | ||
793 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
794 | rac->_nr_pages -= rac->_batch_count; | |
795 | rac->_index += rac->_batch_count; | |
796 | ||
797 | if (!rac->_nr_pages) { | |
798 | rac->_batch_count = 0; | |
799 | return NULL; | |
800 | } | |
801 | ||
802 | page = xa_load(&rac->mapping->i_pages, rac->_index); | |
803 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
6c357848 | 804 | rac->_batch_count = thp_nr_pages(page); |
042124cc MWO |
805 | |
806 | return page; | |
807 | } | |
808 | ||
809 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | |
810 | struct page **array, unsigned int array_sz) | |
811 | { | |
812 | unsigned int i = 0; | |
813 | XA_STATE(xas, &rac->mapping->i_pages, 0); | |
814 | struct page *page; | |
815 | ||
816 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
817 | rac->_nr_pages -= rac->_batch_count; | |
818 | rac->_index += rac->_batch_count; | |
819 | rac->_batch_count = 0; | |
820 | ||
821 | xas_set(&xas, rac->_index); | |
822 | rcu_read_lock(); | |
823 | xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { | |
824 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
825 | VM_BUG_ON_PAGE(PageTail(page), page); | |
826 | array[i++] = page; | |
6c357848 | 827 | rac->_batch_count += thp_nr_pages(page); |
042124cc MWO |
828 | |
829 | /* | |
830 | * The page cache isn't using multi-index entries yet, | |
831 | * so the xas cursor needs to be manually moved to the | |
832 | * next index. This can be removed once the page cache | |
833 | * is converted. | |
834 | */ | |
835 | if (PageHead(page)) | |
836 | xas_set(&xas, rac->_index + rac->_batch_count); | |
837 | ||
838 | if (i == array_sz) | |
839 | break; | |
840 | } | |
841 | rcu_read_unlock(); | |
842 | ||
843 | return i; | |
844 | } | |
845 | ||
846 | /** | |
847 | * readahead_page_batch - Get a batch of pages to read. | |
848 | * @rac: The current readahead request. | |
849 | * @array: An array of pointers to struct page. | |
850 | * | |
851 | * Context: The pages are locked and have an elevated refcount. The caller | |
852 | * should decreases the refcount once the page has been submitted for I/O | |
853 | * and unlock the page once all I/O to that page has completed. | |
854 | * Return: The number of pages placed in the array. 0 indicates the request | |
855 | * is complete. | |
856 | */ | |
857 | #define readahead_page_batch(rac, array) \ | |
858 | __readahead_batch(rac, array, ARRAY_SIZE(array)) | |
859 | ||
860 | /** | |
861 | * readahead_pos - The byte offset into the file of this readahead request. | |
862 | * @rac: The readahead request. | |
863 | */ | |
864 | static inline loff_t readahead_pos(struct readahead_control *rac) | |
865 | { | |
866 | return (loff_t)rac->_index * PAGE_SIZE; | |
867 | } | |
868 | ||
869 | /** | |
870 | * readahead_length - The number of bytes in this readahead request. | |
871 | * @rac: The readahead request. | |
872 | */ | |
873 | static inline loff_t readahead_length(struct readahead_control *rac) | |
874 | { | |
875 | return (loff_t)rac->_nr_pages * PAGE_SIZE; | |
876 | } | |
877 | ||
878 | /** | |
879 | * readahead_index - The index of the first page in this readahead request. | |
880 | * @rac: The readahead request. | |
881 | */ | |
882 | static inline pgoff_t readahead_index(struct readahead_control *rac) | |
883 | { | |
884 | return rac->_index; | |
885 | } | |
886 | ||
887 | /** | |
888 | * readahead_count - The number of pages in this readahead request. | |
889 | * @rac: The readahead request. | |
890 | */ | |
891 | static inline unsigned int readahead_count(struct readahead_control *rac) | |
892 | { | |
893 | return rac->_nr_pages; | |
894 | } | |
895 | ||
b57c2cb9 FF |
896 | static inline unsigned long dir_pages(struct inode *inode) |
897 | { | |
09cbfeaf KS |
898 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
899 | PAGE_SHIFT; | |
b57c2cb9 FF |
900 | } |
901 | ||
243145bc AG |
902 | /** |
903 | * page_mkwrite_check_truncate - check if page was truncated | |
904 | * @page: the page to check | |
905 | * @inode: the inode to check the page against | |
906 | * | |
907 | * Returns the number of bytes in the page up to EOF, | |
908 | * or -EFAULT if the page was truncated. | |
909 | */ | |
910 | static inline int page_mkwrite_check_truncate(struct page *page, | |
911 | struct inode *inode) | |
912 | { | |
913 | loff_t size = i_size_read(inode); | |
914 | pgoff_t index = size >> PAGE_SHIFT; | |
915 | int offset = offset_in_page(size); | |
916 | ||
917 | if (page->mapping != inode->i_mapping) | |
918 | return -EFAULT; | |
919 | ||
920 | /* page is wholly inside EOF */ | |
921 | if (page->index < index) | |
922 | return PAGE_SIZE; | |
923 | /* page is wholly past EOF */ | |
924 | if (page->index > index || !offset) | |
925 | return -EFAULT; | |
926 | /* page is partially inside EOF */ | |
927 | return offset; | |
928 | } | |
929 | ||
1da177e4 | 930 | #endif /* _LINUX_PAGEMAP_H */ |