]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
1da177e4 LT |
2 | #ifndef _LINUX_PAGEMAP_H |
3 | #define _LINUX_PAGEMAP_H | |
4 | ||
5 | /* | |
6 | * Copyright 1995 Linus Torvalds | |
7 | */ | |
8 | #include <linux/mm.h> | |
9 | #include <linux/fs.h> | |
10 | #include <linux/list.h> | |
11 | #include <linux/highmem.h> | |
12 | #include <linux/compiler.h> | |
7c0f6ba6 | 13 | #include <linux/uaccess.h> |
1da177e4 | 14 | #include <linux/gfp.h> |
3e9f45bd | 15 | #include <linux/bitops.h> |
e286781d | 16 | #include <linux/hardirq.h> /* for in_interrupt() */ |
8edf344c | 17 | #include <linux/hugetlb_inline.h> |
1da177e4 | 18 | |
aa65c29c JK |
19 | struct pagevec; |
20 | ||
7716506a MWO |
21 | static inline bool mapping_empty(struct address_space *mapping) |
22 | { | |
23 | return xa_empty(&mapping->i_pages); | |
24 | } | |
25 | ||
1da177e4 | 26 | /* |
9c5d760b | 27 | * Bits in mapping->flags. |
1da177e4 | 28 | */ |
9a896c9a | 29 | enum mapping_flags { |
9c5d760b MH |
30 | AS_EIO = 0, /* IO error on async write */ |
31 | AS_ENOSPC = 1, /* ENOSPC on async write */ | |
32 | AS_MM_ALL_LOCKS = 2, /* under mm_take_all_locks() */ | |
33 | AS_UNEVICTABLE = 3, /* e.g., ramdisk, SHM_LOCK */ | |
34 | AS_EXITING = 4, /* final truncate in progress */ | |
371a096e | 35 | /* writeback related tags are not used */ |
9c5d760b | 36 | AS_NO_WRITEBACK_TAGS = 5, |
01c70267 | 37 | AS_THP_SUPPORT = 6, /* THPs supported */ |
9a896c9a | 38 | }; |
1da177e4 | 39 | |
8ed1e46a JL |
40 | /** |
41 | * mapping_set_error - record a writeback error in the address_space | |
767e5ee5 MWO |
42 | * @mapping: the mapping in which an error should be set |
43 | * @error: the error to set in the mapping | |
8ed1e46a JL |
44 | * |
45 | * When writeback fails in some way, we must record that error so that | |
46 | * userspace can be informed when fsync and the like are called. We endeavor | |
47 | * to report errors on any file that was open at the time of the error. Some | |
48 | * internal callers also need to know when writeback errors have occurred. | |
49 | * | |
50 | * When a writeback error occurs, most filesystems will want to call | |
51 | * mapping_set_error to record the error in the mapping so that it can be | |
52 | * reported when the application calls fsync(2). | |
53 | */ | |
3e9f45bd GC |
54 | static inline void mapping_set_error(struct address_space *mapping, int error) |
55 | { | |
8ed1e46a JL |
56 | if (likely(!error)) |
57 | return; | |
58 | ||
59 | /* Record in wb_err for checkers using errseq_t based tracking */ | |
735e4ae5 JL |
60 | __filemap_set_wb_err(mapping, error); |
61 | ||
62 | /* Record it in superblock */ | |
8b7b2eb1 MK |
63 | if (mapping->host) |
64 | errseq_set(&mapping->host->i_sb->s_wb_err, error); | |
8ed1e46a JL |
65 | |
66 | /* Record it in flags for now, for legacy callers */ | |
67 | if (error == -ENOSPC) | |
68 | set_bit(AS_ENOSPC, &mapping->flags); | |
69 | else | |
70 | set_bit(AS_EIO, &mapping->flags); | |
3e9f45bd GC |
71 | } |
72 | ||
ba9ddf49 LS |
73 | static inline void mapping_set_unevictable(struct address_space *mapping) |
74 | { | |
75 | set_bit(AS_UNEVICTABLE, &mapping->flags); | |
76 | } | |
77 | ||
89e004ea LS |
78 | static inline void mapping_clear_unevictable(struct address_space *mapping) |
79 | { | |
80 | clear_bit(AS_UNEVICTABLE, &mapping->flags); | |
81 | } | |
82 | ||
1eb6234e | 83 | static inline bool mapping_unevictable(struct address_space *mapping) |
ba9ddf49 | 84 | { |
1eb6234e | 85 | return mapping && test_bit(AS_UNEVICTABLE, &mapping->flags); |
ba9ddf49 | 86 | } |
ba9ddf49 | 87 | |
91b0abe3 JW |
88 | static inline void mapping_set_exiting(struct address_space *mapping) |
89 | { | |
90 | set_bit(AS_EXITING, &mapping->flags); | |
91 | } | |
92 | ||
93 | static inline int mapping_exiting(struct address_space *mapping) | |
94 | { | |
95 | return test_bit(AS_EXITING, &mapping->flags); | |
96 | } | |
97 | ||
371a096e HY |
98 | static inline void mapping_set_no_writeback_tags(struct address_space *mapping) |
99 | { | |
100 | set_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
101 | } | |
102 | ||
103 | static inline int mapping_use_writeback_tags(struct address_space *mapping) | |
104 | { | |
105 | return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags); | |
106 | } | |
107 | ||
dd0fc66f | 108 | static inline gfp_t mapping_gfp_mask(struct address_space * mapping) |
1da177e4 | 109 | { |
9c5d760b | 110 | return mapping->gfp_mask; |
1da177e4 LT |
111 | } |
112 | ||
c62d2555 MH |
113 | /* Restricts the given gfp_mask to what the mapping allows. */ |
114 | static inline gfp_t mapping_gfp_constraint(struct address_space *mapping, | |
115 | gfp_t gfp_mask) | |
116 | { | |
117 | return mapping_gfp_mask(mapping) & gfp_mask; | |
118 | } | |
119 | ||
1da177e4 LT |
120 | /* |
121 | * This is non-atomic. Only to be used before the mapping is activated. | |
122 | * Probably needs a barrier... | |
123 | */ | |
260b2367 | 124 | static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) |
1da177e4 | 125 | { |
9c5d760b | 126 | m->gfp_mask = mask; |
1da177e4 LT |
127 | } |
128 | ||
01c70267 MWO |
129 | static inline bool mapping_thp_support(struct address_space *mapping) |
130 | { | |
131 | return test_bit(AS_THP_SUPPORT, &mapping->flags); | |
132 | } | |
133 | ||
6f4d2f97 MWO |
134 | static inline int filemap_nr_thps(struct address_space *mapping) |
135 | { | |
136 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
137 | return atomic_read(&mapping->nr_thps); | |
138 | #else | |
139 | return 0; | |
140 | #endif | |
141 | } | |
142 | ||
143 | static inline void filemap_nr_thps_inc(struct address_space *mapping) | |
144 | { | |
145 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
146 | if (!mapping_thp_support(mapping)) | |
147 | atomic_inc(&mapping->nr_thps); | |
148 | #else | |
149 | WARN_ON_ONCE(1); | |
150 | #endif | |
151 | } | |
152 | ||
153 | static inline void filemap_nr_thps_dec(struct address_space *mapping) | |
154 | { | |
155 | #ifdef CONFIG_READ_ONLY_THP_FOR_FS | |
156 | if (!mapping_thp_support(mapping)) | |
157 | atomic_dec(&mapping->nr_thps); | |
158 | #else | |
159 | WARN_ON_ONCE(1); | |
160 | #endif | |
161 | } | |
162 | ||
c6f92f9f | 163 | void release_pages(struct page **pages, int nr); |
1da177e4 | 164 | |
842ca547 MWO |
165 | /* |
166 | * For file cache pages, return the address_space, otherwise return NULL | |
167 | */ | |
168 | static inline struct address_space *page_mapping_file(struct page *page) | |
169 | { | |
170 | if (unlikely(PageSwapCache(page))) | |
171 | return NULL; | |
172 | return page_mapping(page); | |
173 | } | |
174 | ||
e286781d NP |
175 | /* |
176 | * speculatively take a reference to a page. | |
0139aa7b JK |
177 | * If the page is free (_refcount == 0), then _refcount is untouched, and 0 |
178 | * is returned. Otherwise, _refcount is incremented by 1 and 1 is returned. | |
e286781d NP |
179 | * |
180 | * This function must be called inside the same rcu_read_lock() section as has | |
181 | * been used to lookup the page in the pagecache radix-tree (or page table): | |
0139aa7b | 182 | * this allows allocators to use a synchronize_rcu() to stabilize _refcount. |
e286781d NP |
183 | * |
184 | * Unless an RCU grace period has passed, the count of all pages coming out | |
185 | * of the allocator must be considered unstable. page_count may return higher | |
186 | * than expected, and put_page must be able to do the right thing when the | |
187 | * page has been finished with, no matter what it is subsequently allocated | |
188 | * for (because put_page is what is used here to drop an invalid speculative | |
189 | * reference). | |
190 | * | |
191 | * This is the interesting part of the lockless pagecache (and lockless | |
192 | * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page) | |
193 | * has the following pattern: | |
194 | * 1. find page in radix tree | |
195 | * 2. conditionally increment refcount | |
196 | * 3. check the page is still in pagecache (if no, goto 1) | |
197 | * | |
0139aa7b | 198 | * Remove-side that cares about stability of _refcount (eg. reclaim) has the |
b93b0163 | 199 | * following (with the i_pages lock held): |
e286781d NP |
200 | * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg) |
201 | * B. remove page from pagecache | |
202 | * C. free the page | |
203 | * | |
204 | * There are 2 critical interleavings that matter: | |
205 | * - 2 runs before A: in this case, A sees elevated refcount and bails out | |
206 | * - A runs before 2: in this case, 2 sees zero refcount and retries; | |
207 | * subsequently, B will complete and 1 will find no page, causing the | |
208 | * lookup to return NULL. | |
209 | * | |
210 | * It is possible that between 1 and 2, the page is removed then the exact same | |
211 | * page is inserted into the same position in pagecache. That's OK: the | |
b93b0163 | 212 | * old find_get_page using a lock could equally have run before or after |
e286781d NP |
213 | * such a re-insertion, depending on order that locks are granted. |
214 | * | |
215 | * Lookups racing against pagecache insertion isn't a big problem: either 1 | |
216 | * will find the page or it will not. Likewise, the old find_get_page could run | |
217 | * either before the insertion or afterwards, depending on timing. | |
218 | */ | |
494eec70 | 219 | static inline int __page_cache_add_speculative(struct page *page, int count) |
e286781d | 220 | { |
8375ad98 | 221 | #ifdef CONFIG_TINY_RCU |
bdd4e85d | 222 | # ifdef CONFIG_PREEMPT_COUNT |
591a3d7c | 223 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); |
e286781d NP |
224 | # endif |
225 | /* | |
226 | * Preempt must be disabled here - we rely on rcu_read_lock doing | |
227 | * this for us. | |
228 | * | |
229 | * Pagecache won't be truncated from interrupt context, so if we have | |
230 | * found a page in the radix tree here, we have pinned its refcount by | |
231 | * disabling preempt, and hence no need for the "speculative get" that | |
232 | * SMP requires. | |
233 | */ | |
309381fe | 234 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
494eec70 | 235 | page_ref_add(page, count); |
e286781d NP |
236 | |
237 | #else | |
494eec70 | 238 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
e286781d NP |
239 | /* |
240 | * Either the page has been freed, or will be freed. | |
241 | * In either case, retry here and the caller should | |
242 | * do the right thing (see comments above). | |
243 | */ | |
244 | return 0; | |
245 | } | |
246 | #endif | |
309381fe | 247 | VM_BUG_ON_PAGE(PageTail(page), page); |
e286781d NP |
248 | |
249 | return 1; | |
250 | } | |
251 | ||
494eec70 | 252 | static inline int page_cache_get_speculative(struct page *page) |
ce0ad7f0 | 253 | { |
494eec70 | 254 | return __page_cache_add_speculative(page, 1); |
255 | } | |
ce0ad7f0 | 256 | |
494eec70 | 257 | static inline int page_cache_add_speculative(struct page *page, int count) |
258 | { | |
259 | return __page_cache_add_speculative(page, count); | |
ce0ad7f0 NP |
260 | } |
261 | ||
b03143ac GJ |
262 | /** |
263 | * attach_page_private - Attach private data to a page. | |
264 | * @page: Page to attach data to. | |
265 | * @data: Data to attach to page. | |
266 | * | |
267 | * Attaching private data to a page increments the page's reference count. | |
268 | * The data must be detached before the page will be freed. | |
269 | */ | |
270 | static inline void attach_page_private(struct page *page, void *data) | |
271 | { | |
272 | get_page(page); | |
273 | set_page_private(page, (unsigned long)data); | |
274 | SetPagePrivate(page); | |
275 | } | |
276 | ||
277 | /** | |
278 | * detach_page_private - Detach private data from a page. | |
279 | * @page: Page to detach data from. | |
280 | * | |
281 | * Removes the data that was previously attached to the page and decrements | |
282 | * the refcount on the page. | |
283 | * | |
284 | * Return: Data that was attached to the page. | |
285 | */ | |
286 | static inline void *detach_page_private(struct page *page) | |
287 | { | |
288 | void *data = (void *)page_private(page); | |
289 | ||
290 | if (!PagePrivate(page)) | |
291 | return NULL; | |
292 | ClearPagePrivate(page); | |
293 | set_page_private(page, 0); | |
294 | put_page(page); | |
295 | ||
296 | return data; | |
297 | } | |
298 | ||
44110fe3 | 299 | #ifdef CONFIG_NUMA |
2ae88149 | 300 | extern struct page *__page_cache_alloc(gfp_t gfp); |
44110fe3 | 301 | #else |
2ae88149 NP |
302 | static inline struct page *__page_cache_alloc(gfp_t gfp) |
303 | { | |
304 | return alloc_pages(gfp, 0); | |
305 | } | |
306 | #endif | |
307 | ||
1da177e4 LT |
308 | static inline struct page *page_cache_alloc(struct address_space *x) |
309 | { | |
2ae88149 | 310 | return __page_cache_alloc(mapping_gfp_mask(x)); |
1da177e4 LT |
311 | } |
312 | ||
8a5c743e | 313 | static inline gfp_t readahead_gfp_mask(struct address_space *x) |
7b1de586 | 314 | { |
453f85d4 | 315 | return mapping_gfp_mask(x) | __GFP_NORETRY | __GFP_NOWARN; |
7b1de586 WF |
316 | } |
317 | ||
1da177e4 LT |
318 | typedef int filler_t(void *, struct page *); |
319 | ||
0d3f9296 | 320 | pgoff_t page_cache_next_miss(struct address_space *mapping, |
e7b563bb | 321 | pgoff_t index, unsigned long max_scan); |
0d3f9296 | 322 | pgoff_t page_cache_prev_miss(struct address_space *mapping, |
e7b563bb JW |
323 | pgoff_t index, unsigned long max_scan); |
324 | ||
2457aec6 MG |
325 | #define FGP_ACCESSED 0x00000001 |
326 | #define FGP_LOCK 0x00000002 | |
327 | #define FGP_CREAT 0x00000004 | |
328 | #define FGP_WRITE 0x00000008 | |
329 | #define FGP_NOFS 0x00000010 | |
330 | #define FGP_NOWAIT 0x00000020 | |
a75d4c33 | 331 | #define FGP_FOR_MMAP 0x00000040 |
a8cf7f27 | 332 | #define FGP_HEAD 0x00000080 |
44835d20 | 333 | #define FGP_ENTRY 0x00000100 |
2457aec6 MG |
334 | |
335 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | |
45f87de5 | 336 | int fgp_flags, gfp_t cache_gfp_mask); |
2457aec6 MG |
337 | |
338 | /** | |
339 | * find_get_page - find and get a page reference | |
340 | * @mapping: the address_space to search | |
341 | * @offset: the page index | |
342 | * | |
343 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
344 | * page cache page, it is returned with an increased refcount. | |
345 | * | |
346 | * Otherwise, %NULL is returned. | |
347 | */ | |
348 | static inline struct page *find_get_page(struct address_space *mapping, | |
349 | pgoff_t offset) | |
350 | { | |
45f87de5 | 351 | return pagecache_get_page(mapping, offset, 0, 0); |
2457aec6 MG |
352 | } |
353 | ||
354 | static inline struct page *find_get_page_flags(struct address_space *mapping, | |
355 | pgoff_t offset, int fgp_flags) | |
356 | { | |
45f87de5 | 357 | return pagecache_get_page(mapping, offset, fgp_flags, 0); |
2457aec6 MG |
358 | } |
359 | ||
360 | /** | |
361 | * find_lock_page - locate, pin and lock a pagecache page | |
2457aec6 | 362 | * @mapping: the address_space to search |
89b42235 | 363 | * @index: the page index |
2457aec6 | 364 | * |
89b42235 | 365 | * Looks up the page cache entry at @mapping & @index. If there is a |
2457aec6 MG |
366 | * page cache page, it is returned locked and with an increased |
367 | * refcount. | |
368 | * | |
a8cf7f27 MWO |
369 | * Context: May sleep. |
370 | * Return: A struct page or %NULL if there is no page in the cache for this | |
371 | * index. | |
2457aec6 MG |
372 | */ |
373 | static inline struct page *find_lock_page(struct address_space *mapping, | |
a8cf7f27 MWO |
374 | pgoff_t index) |
375 | { | |
376 | return pagecache_get_page(mapping, index, FGP_LOCK, 0); | |
377 | } | |
378 | ||
379 | /** | |
380 | * find_lock_head - Locate, pin and lock a pagecache page. | |
381 | * @mapping: The address_space to search. | |
89b42235 | 382 | * @index: The page index. |
a8cf7f27 | 383 | * |
89b42235 | 384 | * Looks up the page cache entry at @mapping & @index. If there is a |
a8cf7f27 MWO |
385 | * page cache page, its head page is returned locked and with an increased |
386 | * refcount. | |
387 | * | |
388 | * Context: May sleep. | |
389 | * Return: A struct page which is !PageTail, or %NULL if there is no page | |
390 | * in the cache for this index. | |
391 | */ | |
392 | static inline struct page *find_lock_head(struct address_space *mapping, | |
393 | pgoff_t index) | |
2457aec6 | 394 | { |
a8cf7f27 | 395 | return pagecache_get_page(mapping, index, FGP_LOCK | FGP_HEAD, 0); |
2457aec6 MG |
396 | } |
397 | ||
398 | /** | |
399 | * find_or_create_page - locate or add a pagecache page | |
400 | * @mapping: the page's address_space | |
401 | * @index: the page's index into the mapping | |
402 | * @gfp_mask: page allocation mode | |
403 | * | |
404 | * Looks up the page cache slot at @mapping & @offset. If there is a | |
405 | * page cache page, it is returned locked and with an increased | |
406 | * refcount. | |
407 | * | |
408 | * If the page is not present, a new page is allocated using @gfp_mask | |
409 | * and added to the page cache and the VM's LRU list. The page is | |
410 | * returned locked and with an increased refcount. | |
411 | * | |
412 | * On memory exhaustion, %NULL is returned. | |
413 | * | |
414 | * find_or_create_page() may sleep, even if @gfp_flags specifies an | |
415 | * atomic allocation! | |
416 | */ | |
417 | static inline struct page *find_or_create_page(struct address_space *mapping, | |
767e5ee5 | 418 | pgoff_t index, gfp_t gfp_mask) |
2457aec6 | 419 | { |
767e5ee5 | 420 | return pagecache_get_page(mapping, index, |
2457aec6 | 421 | FGP_LOCK|FGP_ACCESSED|FGP_CREAT, |
45f87de5 | 422 | gfp_mask); |
2457aec6 MG |
423 | } |
424 | ||
425 | /** | |
426 | * grab_cache_page_nowait - returns locked page at given index in given cache | |
427 | * @mapping: target address_space | |
428 | * @index: the page index | |
429 | * | |
430 | * Same as grab_cache_page(), but do not wait if the page is unavailable. | |
431 | * This is intended for speculative data generators, where the data can | |
432 | * be regenerated if the page couldn't be grabbed. This routine should | |
433 | * be safe to call while holding the lock for another page. | |
434 | * | |
435 | * Clear __GFP_FS when allocating the page to avoid recursion into the fs | |
436 | * and deadlock against the caller's locked page. | |
437 | */ | |
438 | static inline struct page *grab_cache_page_nowait(struct address_space *mapping, | |
439 | pgoff_t index) | |
440 | { | |
441 | return pagecache_get_page(mapping, index, | |
442 | FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT, | |
45f87de5 | 443 | mapping_gfp_mask(mapping)); |
2457aec6 MG |
444 | } |
445 | ||
63ec1973 MWO |
446 | /* Does this page contain this index? */ |
447 | static inline bool thp_contains(struct page *head, pgoff_t index) | |
448 | { | |
449 | /* HugeTLBfs indexes the page cache in units of hpage_size */ | |
450 | if (PageHuge(head)) | |
451 | return head->index == index; | |
452 | return page_index(head) == (index & ~(thp_nr_pages(head) - 1UL)); | |
453 | } | |
454 | ||
ec848215 MWO |
455 | /* |
456 | * Given the page we found in the page cache, return the page corresponding | |
457 | * to this index in the file | |
458 | */ | |
459 | static inline struct page *find_subpage(struct page *head, pgoff_t index) | |
4101196b | 460 | { |
ec848215 MWO |
461 | /* HugeTLBfs wants the head page regardless */ |
462 | if (PageHuge(head)) | |
463 | return head; | |
4101196b | 464 | |
6c357848 | 465 | return head + (index & (thp_nr_pages(head) - 1)); |
4101196b MWO |
466 | } |
467 | ||
0cd6144a | 468 | unsigned find_get_entries(struct address_space *mapping, pgoff_t start, |
cf2039af | 469 | pgoff_t end, struct pagevec *pvec, pgoff_t *indices); |
b947cee4 JK |
470 | unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, |
471 | pgoff_t end, unsigned int nr_pages, | |
472 | struct page **pages); | |
473 | static inline unsigned find_get_pages(struct address_space *mapping, | |
474 | pgoff_t *start, unsigned int nr_pages, | |
475 | struct page **pages) | |
476 | { | |
477 | return find_get_pages_range(mapping, start, (pgoff_t)-1, nr_pages, | |
478 | pages); | |
479 | } | |
ebf43500 JA |
480 | unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start, |
481 | unsigned int nr_pages, struct page **pages); | |
72b045ae | 482 | unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, |
a6906972 | 483 | pgoff_t end, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
484 | struct page **pages); |
485 | static inline unsigned find_get_pages_tag(struct address_space *mapping, | |
a6906972 | 486 | pgoff_t *index, xa_mark_t tag, unsigned int nr_pages, |
72b045ae JK |
487 | struct page **pages) |
488 | { | |
489 | return find_get_pages_range_tag(mapping, index, (pgoff_t)-1, tag, | |
490 | nr_pages, pages); | |
491 | } | |
1da177e4 | 492 | |
54566b2c NP |
493 | struct page *grab_cache_page_write_begin(struct address_space *mapping, |
494 | pgoff_t index, unsigned flags); | |
afddba49 | 495 | |
1da177e4 LT |
496 | /* |
497 | * Returns locked page at given index in given cache, creating it if needed. | |
498 | */ | |
57f6b96c FW |
499 | static inline struct page *grab_cache_page(struct address_space *mapping, |
500 | pgoff_t index) | |
1da177e4 LT |
501 | { |
502 | return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); | |
503 | } | |
504 | ||
1da177e4 | 505 | extern struct page * read_cache_page(struct address_space *mapping, |
5e5358e7 | 506 | pgoff_t index, filler_t *filler, void *data); |
0531b2aa LT |
507 | extern struct page * read_cache_page_gfp(struct address_space *mapping, |
508 | pgoff_t index, gfp_t gfp_mask); | |
1da177e4 LT |
509 | extern int read_cache_pages(struct address_space *mapping, |
510 | struct list_head *pages, filler_t *filler, void *data); | |
511 | ||
090d2b18 | 512 | static inline struct page *read_mapping_page(struct address_space *mapping, |
5e5358e7 | 513 | pgoff_t index, void *data) |
090d2b18 | 514 | { |
6c45b454 | 515 | return read_cache_page(mapping, index, NULL, data); |
090d2b18 PE |
516 | } |
517 | ||
a0f7a756 | 518 | /* |
5cbc198a KS |
519 | * Get index of the page with in radix-tree |
520 | * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE) | |
a0f7a756 | 521 | */ |
5cbc198a | 522 | static inline pgoff_t page_to_index(struct page *page) |
a0f7a756 | 523 | { |
e9b61f19 KS |
524 | pgoff_t pgoff; |
525 | ||
e9b61f19 | 526 | if (likely(!PageTransTail(page))) |
09cbfeaf | 527 | return page->index; |
e9b61f19 KS |
528 | |
529 | /* | |
530 | * We don't initialize ->index for tail pages: calculate based on | |
531 | * head page | |
532 | */ | |
09cbfeaf | 533 | pgoff = compound_head(page)->index; |
e9b61f19 KS |
534 | pgoff += page - compound_head(page); |
535 | return pgoff; | |
a0f7a756 NH |
536 | } |
537 | ||
5cbc198a KS |
538 | /* |
539 | * Get the offset in PAGE_SIZE. | |
540 | * (TODO: hugepage should have ->index in PAGE_SIZE) | |
541 | */ | |
542 | static inline pgoff_t page_to_pgoff(struct page *page) | |
543 | { | |
544 | if (unlikely(PageHeadHuge(page))) | |
545 | return page->index << compound_order(page); | |
546 | ||
547 | return page_to_index(page); | |
548 | } | |
549 | ||
1da177e4 LT |
550 | /* |
551 | * Return byte-offset into filesystem object for page. | |
552 | */ | |
553 | static inline loff_t page_offset(struct page *page) | |
554 | { | |
09cbfeaf | 555 | return ((loff_t)page->index) << PAGE_SHIFT; |
1da177e4 LT |
556 | } |
557 | ||
f981c595 MG |
558 | static inline loff_t page_file_offset(struct page *page) |
559 | { | |
8cd79788 | 560 | return ((loff_t)page_index(page)) << PAGE_SHIFT; |
f981c595 MG |
561 | } |
562 | ||
0fe6e20b NH |
563 | extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma, |
564 | unsigned long address); | |
565 | ||
1da177e4 LT |
566 | static inline pgoff_t linear_page_index(struct vm_area_struct *vma, |
567 | unsigned long address) | |
568 | { | |
0fe6e20b NH |
569 | pgoff_t pgoff; |
570 | if (unlikely(is_vm_hugetlb_page(vma))) | |
571 | return linear_hugepage_index(vma, address); | |
572 | pgoff = (address - vma->vm_start) >> PAGE_SHIFT; | |
1da177e4 | 573 | pgoff += vma->vm_pgoff; |
09cbfeaf | 574 | return pgoff; |
1da177e4 LT |
575 | } |
576 | ||
c7510ab2 JA |
577 | struct wait_page_key { |
578 | struct page *page; | |
579 | int bit_nr; | |
580 | int page_match; | |
581 | }; | |
582 | ||
583 | struct wait_page_queue { | |
584 | struct page *page; | |
585 | int bit_nr; | |
586 | wait_queue_entry_t wait; | |
587 | }; | |
588 | ||
cdc8fcb4 | 589 | static inline bool wake_page_match(struct wait_page_queue *wait_page, |
c7510ab2 JA |
590 | struct wait_page_key *key) |
591 | { | |
592 | if (wait_page->page != key->page) | |
cdc8fcb4 | 593 | return false; |
c7510ab2 JA |
594 | key->page_match = 1; |
595 | ||
596 | if (wait_page->bit_nr != key->bit_nr) | |
cdc8fcb4 | 597 | return false; |
d1932dc3 | 598 | |
cdc8fcb4 | 599 | return true; |
d1932dc3 JA |
600 | } |
601 | ||
b3c97528 HH |
602 | extern void __lock_page(struct page *page); |
603 | extern int __lock_page_killable(struct page *page); | |
dd3e6d50 | 604 | extern int __lock_page_async(struct page *page, struct wait_page_queue *wait); |
d065bd81 ML |
605 | extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, |
606 | unsigned int flags); | |
b3c97528 | 607 | extern void unlock_page(struct page *page); |
1da177e4 | 608 | |
f4458845 AM |
609 | /* |
610 | * Return true if the page was successfully locked | |
611 | */ | |
529ae9aa NP |
612 | static inline int trylock_page(struct page *page) |
613 | { | |
48c935ad | 614 | page = compound_head(page); |
8413ac9d | 615 | return (likely(!test_and_set_bit_lock(PG_locked, &page->flags))); |
529ae9aa NP |
616 | } |
617 | ||
db37648c NP |
618 | /* |
619 | * lock_page may only be called if we have the page's inode pinned. | |
620 | */ | |
1da177e4 LT |
621 | static inline void lock_page(struct page *page) |
622 | { | |
623 | might_sleep(); | |
529ae9aa | 624 | if (!trylock_page(page)) |
1da177e4 LT |
625 | __lock_page(page); |
626 | } | |
db37648c | 627 | |
2687a356 MW |
628 | /* |
629 | * lock_page_killable is like lock_page but can be interrupted by fatal | |
630 | * signals. It returns 0 if it locked the page and -EINTR if it was | |
631 | * killed while waiting. | |
632 | */ | |
633 | static inline int lock_page_killable(struct page *page) | |
634 | { | |
635 | might_sleep(); | |
529ae9aa | 636 | if (!trylock_page(page)) |
2687a356 MW |
637 | return __lock_page_killable(page); |
638 | return 0; | |
639 | } | |
640 | ||
dd3e6d50 JA |
641 | /* |
642 | * lock_page_async - Lock the page, unless this would block. If the page | |
643 | * is already locked, then queue a callback when the page becomes unlocked. | |
644 | * This callback can then retry the operation. | |
645 | * | |
646 | * Returns 0 if the page is locked successfully, or -EIOCBQUEUED if the page | |
647 | * was already locked and the callback defined in 'wait' was queued. | |
648 | */ | |
649 | static inline int lock_page_async(struct page *page, | |
650 | struct wait_page_queue *wait) | |
651 | { | |
652 | if (!trylock_page(page)) | |
653 | return __lock_page_async(page, wait); | |
654 | return 0; | |
655 | } | |
656 | ||
d065bd81 ML |
657 | /* |
658 | * lock_page_or_retry - Lock the page, unless this would block and the | |
659 | * caller indicated that it can handle a retry. | |
9a95f3cf | 660 | * |
c1e8d7c6 | 661 | * Return value and mmap_lock implications depend on flags; see |
9a95f3cf | 662 | * __lock_page_or_retry(). |
d065bd81 ML |
663 | */ |
664 | static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, | |
665 | unsigned int flags) | |
666 | { | |
667 | might_sleep(); | |
668 | return trylock_page(page) || __lock_page_or_retry(page, mm, flags); | |
669 | } | |
670 | ||
1da177e4 | 671 | /* |
74d81bfa NP |
672 | * This is exported only for wait_on_page_locked/wait_on_page_writeback, etc., |
673 | * and should not be used directly. | |
1da177e4 | 674 | */ |
b3c97528 | 675 | extern void wait_on_page_bit(struct page *page, int bit_nr); |
f62e00cc | 676 | extern int wait_on_page_bit_killable(struct page *page, int bit_nr); |
a4796e37 | 677 | |
1da177e4 LT |
678 | /* |
679 | * Wait for a page to be unlocked. | |
680 | * | |
681 | * This must be called with the caller "holding" the page, | |
682 | * ie with increased "page->count" so that the page won't | |
683 | * go away during the wait.. | |
684 | */ | |
685 | static inline void wait_on_page_locked(struct page *page) | |
686 | { | |
687 | if (PageLocked(page)) | |
48c935ad | 688 | wait_on_page_bit(compound_head(page), PG_locked); |
1da177e4 LT |
689 | } |
690 | ||
62906027 NP |
691 | static inline int wait_on_page_locked_killable(struct page *page) |
692 | { | |
693 | if (!PageLocked(page)) | |
694 | return 0; | |
695 | return wait_on_page_bit_killable(compound_head(page), PG_locked); | |
696 | } | |
697 | ||
48054625 | 698 | int put_and_wait_on_page_locked(struct page *page, int state); |
19343b5b | 699 | void wait_on_page_writeback(struct page *page); |
e5dbd332 | 700 | int wait_on_page_writeback_killable(struct page *page); |
1da177e4 | 701 | extern void end_page_writeback(struct page *page); |
1d1d1a76 | 702 | void wait_for_stable_page(struct page *page); |
1da177e4 | 703 | |
c11f0c0b | 704 | void page_endio(struct page *page, bool is_write, int err); |
57d99845 | 705 | |
73e10ded DH |
706 | /** |
707 | * set_page_private_2 - Set PG_private_2 on a page and take a ref | |
708 | * @page: The page. | |
709 | * | |
710 | * Set the PG_private_2 flag on a page and take the reference needed for the VM | |
711 | * to handle its lifetime correctly. This sets the flag and takes the | |
712 | * reference unconditionally, so care must be taken not to set the flag again | |
713 | * if it's already set. | |
714 | */ | |
715 | static inline void set_page_private_2(struct page *page) | |
716 | { | |
717 | page = compound_head(page); | |
718 | get_page(page); | |
719 | SetPagePrivate2(page); | |
720 | } | |
721 | ||
722 | void end_page_private_2(struct page *page); | |
723 | void wait_on_page_private_2(struct page *page); | |
724 | int wait_on_page_private_2_killable(struct page *page); | |
725 | ||
385e1ca5 DH |
726 | /* |
727 | * Add an arbitrary waiter to a page's wait queue | |
728 | */ | |
ac6424b9 | 729 | extern void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter); |
385e1ca5 | 730 | |
1da177e4 | 731 | /* |
4bce9f6e | 732 | * Fault everything in given userspace address range in. |
1da177e4 LT |
733 | */ |
734 | static inline int fault_in_pages_writeable(char __user *uaddr, int size) | |
f56f821f | 735 | { |
9923777d | 736 | char __user *end = uaddr + size - 1; |
f56f821f DV |
737 | |
738 | if (unlikely(size == 0)) | |
e23d4159 | 739 | return 0; |
f56f821f | 740 | |
e23d4159 AV |
741 | if (unlikely(uaddr > end)) |
742 | return -EFAULT; | |
f56f821f DV |
743 | /* |
744 | * Writing zeroes into userspace here is OK, because we know that if | |
745 | * the zero gets there, we'll be overwriting it. | |
746 | */ | |
e23d4159 AV |
747 | do { |
748 | if (unlikely(__put_user(0, uaddr) != 0)) | |
749 | return -EFAULT; | |
f56f821f | 750 | uaddr += PAGE_SIZE; |
e23d4159 | 751 | } while (uaddr <= end); |
f56f821f DV |
752 | |
753 | /* Check whether the range spilled into the next page. */ | |
754 | if (((unsigned long)uaddr & PAGE_MASK) == | |
755 | ((unsigned long)end & PAGE_MASK)) | |
e23d4159 | 756 | return __put_user(0, end); |
f56f821f | 757 | |
e23d4159 | 758 | return 0; |
f56f821f DV |
759 | } |
760 | ||
4bce9f6e | 761 | static inline int fault_in_pages_readable(const char __user *uaddr, int size) |
f56f821f DV |
762 | { |
763 | volatile char c; | |
f56f821f DV |
764 | const char __user *end = uaddr + size - 1; |
765 | ||
766 | if (unlikely(size == 0)) | |
e23d4159 | 767 | return 0; |
f56f821f | 768 | |
e23d4159 AV |
769 | if (unlikely(uaddr > end)) |
770 | return -EFAULT; | |
771 | ||
772 | do { | |
773 | if (unlikely(__get_user(c, uaddr) != 0)) | |
774 | return -EFAULT; | |
f56f821f | 775 | uaddr += PAGE_SIZE; |
e23d4159 | 776 | } while (uaddr <= end); |
f56f821f DV |
777 | |
778 | /* Check whether the range spilled into the next page. */ | |
779 | if (((unsigned long)uaddr & PAGE_MASK) == | |
780 | ((unsigned long)end & PAGE_MASK)) { | |
e23d4159 | 781 | return __get_user(c, end); |
f56f821f DV |
782 | } |
783 | ||
90b75db6 | 784 | (void)c; |
e23d4159 | 785 | return 0; |
f56f821f DV |
786 | } |
787 | ||
529ae9aa NP |
788 | int add_to_page_cache_locked(struct page *page, struct address_space *mapping, |
789 | pgoff_t index, gfp_t gfp_mask); | |
790 | int add_to_page_cache_lru(struct page *page, struct address_space *mapping, | |
791 | pgoff_t index, gfp_t gfp_mask); | |
97cecb5a | 792 | extern void delete_from_page_cache(struct page *page); |
62cccb8c | 793 | extern void __delete_from_page_cache(struct page *page, void *shadow); |
1f7ef657 | 794 | void replace_page_cache_page(struct page *old, struct page *new); |
aa65c29c JK |
795 | void delete_from_page_cache_batch(struct address_space *mapping, |
796 | struct pagevec *pvec); | |
41139aa4 MWO |
797 | loff_t mapping_seek_hole_data(struct address_space *, loff_t start, loff_t end, |
798 | int whence); | |
529ae9aa NP |
799 | |
800 | /* | |
801 | * Like add_to_page_cache_locked, but used to add newly allocated pages: | |
48c935ad | 802 | * the page is new, so we can just run __SetPageLocked() against it. |
529ae9aa NP |
803 | */ |
804 | static inline int add_to_page_cache(struct page *page, | |
805 | struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask) | |
806 | { | |
807 | int error; | |
808 | ||
48c935ad | 809 | __SetPageLocked(page); |
529ae9aa NP |
810 | error = add_to_page_cache_locked(page, mapping, offset, gfp_mask); |
811 | if (unlikely(error)) | |
48c935ad | 812 | __ClearPageLocked(page); |
529ae9aa NP |
813 | return error; |
814 | } | |
815 | ||
042124cc MWO |
816 | /** |
817 | * struct readahead_control - Describes a readahead request. | |
818 | * | |
819 | * A readahead request is for consecutive pages. Filesystems which | |
820 | * implement the ->readahead method should call readahead_page() or | |
821 | * readahead_page_batch() in a loop and attempt to start I/O against | |
822 | * each page in the request. | |
823 | * | |
824 | * Most of the fields in this struct are private and should be accessed | |
825 | * by the functions below. | |
826 | * | |
827 | * @file: The file, used primarily by network filesystems for authentication. | |
828 | * May be NULL if invoked internally by the filesystem. | |
829 | * @mapping: Readahead this filesystem object. | |
fcd9ae4f | 830 | * @ra: File readahead state. May be NULL. |
042124cc MWO |
831 | */ |
832 | struct readahead_control { | |
833 | struct file *file; | |
834 | struct address_space *mapping; | |
fcd9ae4f | 835 | struct file_ra_state *ra; |
042124cc MWO |
836 | /* private: use the readahead_* accessors instead */ |
837 | pgoff_t _index; | |
838 | unsigned int _nr_pages; | |
839 | unsigned int _batch_count; | |
840 | }; | |
841 | ||
fcd9ae4f MWO |
842 | #define DEFINE_READAHEAD(ractl, f, r, m, i) \ |
843 | struct readahead_control ractl = { \ | |
1aa83cfa MWO |
844 | .file = f, \ |
845 | .mapping = m, \ | |
fcd9ae4f | 846 | .ra = r, \ |
1aa83cfa MWO |
847 | ._index = i, \ |
848 | } | |
849 | ||
fefa7c47 MWO |
850 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
851 | ||
852 | void page_cache_ra_unbounded(struct readahead_control *, | |
853 | unsigned long nr_to_read, unsigned long lookahead_count); | |
fcd9ae4f MWO |
854 | void page_cache_sync_ra(struct readahead_control *, unsigned long req_count); |
855 | void page_cache_async_ra(struct readahead_control *, struct page *, | |
fefa7c47 | 856 | unsigned long req_count); |
3ca23644 DH |
857 | void readahead_expand(struct readahead_control *ractl, |
858 | loff_t new_start, size_t new_len); | |
fefa7c47 MWO |
859 | |
860 | /** | |
861 | * page_cache_sync_readahead - generic file readahead | |
862 | * @mapping: address_space which holds the pagecache and I/O vectors | |
863 | * @ra: file_ra_state which holds the readahead state | |
864 | * @file: Used by the filesystem for authentication. | |
865 | * @index: Index of first page to be read. | |
866 | * @req_count: Total number of pages being read by the caller. | |
867 | * | |
868 | * page_cache_sync_readahead() should be called when a cache miss happened: | |
869 | * it will submit the read. The readahead logic may decide to piggyback more | |
870 | * pages onto the read request if access patterns suggest it will improve | |
871 | * performance. | |
872 | */ | |
873 | static inline | |
874 | void page_cache_sync_readahead(struct address_space *mapping, | |
875 | struct file_ra_state *ra, struct file *file, pgoff_t index, | |
876 | unsigned long req_count) | |
877 | { | |
fcd9ae4f MWO |
878 | DEFINE_READAHEAD(ractl, file, ra, mapping, index); |
879 | page_cache_sync_ra(&ractl, req_count); | |
fefa7c47 MWO |
880 | } |
881 | ||
882 | /** | |
883 | * page_cache_async_readahead - file readahead for marked pages | |
884 | * @mapping: address_space which holds the pagecache and I/O vectors | |
885 | * @ra: file_ra_state which holds the readahead state | |
886 | * @file: Used by the filesystem for authentication. | |
887 | * @page: The page at @index which triggered the readahead call. | |
888 | * @index: Index of first page to be read. | |
889 | * @req_count: Total number of pages being read by the caller. | |
890 | * | |
891 | * page_cache_async_readahead() should be called when a page is used which | |
892 | * is marked as PageReadahead; this is a marker to suggest that the application | |
893 | * has used up enough of the readahead window that we should start pulling in | |
894 | * more pages. | |
895 | */ | |
896 | static inline | |
897 | void page_cache_async_readahead(struct address_space *mapping, | |
898 | struct file_ra_state *ra, struct file *file, | |
899 | struct page *page, pgoff_t index, unsigned long req_count) | |
900 | { | |
fcd9ae4f MWO |
901 | DEFINE_READAHEAD(ractl, file, ra, mapping, index); |
902 | page_cache_async_ra(&ractl, page, req_count); | |
fefa7c47 MWO |
903 | } |
904 | ||
042124cc MWO |
905 | /** |
906 | * readahead_page - Get the next page to read. | |
907 | * @rac: The current readahead request. | |
908 | * | |
909 | * Context: The page is locked and has an elevated refcount. The caller | |
910 | * should decreases the refcount once the page has been submitted for I/O | |
911 | * and unlock the page once all I/O to that page has completed. | |
912 | * Return: A pointer to the next page, or %NULL if we are done. | |
913 | */ | |
914 | static inline struct page *readahead_page(struct readahead_control *rac) | |
915 | { | |
916 | struct page *page; | |
917 | ||
918 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
919 | rac->_nr_pages -= rac->_batch_count; | |
920 | rac->_index += rac->_batch_count; | |
921 | ||
922 | if (!rac->_nr_pages) { | |
923 | rac->_batch_count = 0; | |
924 | return NULL; | |
925 | } | |
926 | ||
927 | page = xa_load(&rac->mapping->i_pages, rac->_index); | |
928 | VM_BUG_ON_PAGE(!PageLocked(page), page); | |
6c357848 | 929 | rac->_batch_count = thp_nr_pages(page); |
042124cc MWO |
930 | |
931 | return page; | |
932 | } | |
933 | ||
934 | static inline unsigned int __readahead_batch(struct readahead_control *rac, | |
935 | struct page **array, unsigned int array_sz) | |
936 | { | |
937 | unsigned int i = 0; | |
938 | XA_STATE(xas, &rac->mapping->i_pages, 0); | |
939 | struct page *page; | |
940 | ||
941 | BUG_ON(rac->_batch_count > rac->_nr_pages); | |
942 | rac->_nr_pages -= rac->_batch_count; | |
943 | rac->_index += rac->_batch_count; | |
944 | rac->_batch_count = 0; | |
945 | ||
946 | xas_set(&xas, rac->_index); | |
947 | rcu_read_lock(); | |
948 | xas_for_each(&xas, page, rac->_index + rac->_nr_pages - 1) { | |
4349a83a MWO |
949 | if (xas_retry(&xas, page)) |
950 | continue; | |
042124cc MWO |
951 | VM_BUG_ON_PAGE(!PageLocked(page), page); |
952 | VM_BUG_ON_PAGE(PageTail(page), page); | |
953 | array[i++] = page; | |
6c357848 | 954 | rac->_batch_count += thp_nr_pages(page); |
042124cc MWO |
955 | |
956 | /* | |
957 | * The page cache isn't using multi-index entries yet, | |
958 | * so the xas cursor needs to be manually moved to the | |
959 | * next index. This can be removed once the page cache | |
960 | * is converted. | |
961 | */ | |
962 | if (PageHead(page)) | |
963 | xas_set(&xas, rac->_index + rac->_batch_count); | |
964 | ||
965 | if (i == array_sz) | |
966 | break; | |
967 | } | |
968 | rcu_read_unlock(); | |
969 | ||
970 | return i; | |
971 | } | |
972 | ||
973 | /** | |
974 | * readahead_page_batch - Get a batch of pages to read. | |
975 | * @rac: The current readahead request. | |
976 | * @array: An array of pointers to struct page. | |
977 | * | |
978 | * Context: The pages are locked and have an elevated refcount. The caller | |
979 | * should decreases the refcount once the page has been submitted for I/O | |
980 | * and unlock the page once all I/O to that page has completed. | |
981 | * Return: The number of pages placed in the array. 0 indicates the request | |
982 | * is complete. | |
983 | */ | |
984 | #define readahead_page_batch(rac, array) \ | |
985 | __readahead_batch(rac, array, ARRAY_SIZE(array)) | |
986 | ||
987 | /** | |
988 | * readahead_pos - The byte offset into the file of this readahead request. | |
989 | * @rac: The readahead request. | |
990 | */ | |
991 | static inline loff_t readahead_pos(struct readahead_control *rac) | |
992 | { | |
993 | return (loff_t)rac->_index * PAGE_SIZE; | |
994 | } | |
995 | ||
996 | /** | |
997 | * readahead_length - The number of bytes in this readahead request. | |
998 | * @rac: The readahead request. | |
999 | */ | |
076171a6 | 1000 | static inline size_t readahead_length(struct readahead_control *rac) |
042124cc | 1001 | { |
076171a6 | 1002 | return rac->_nr_pages * PAGE_SIZE; |
042124cc MWO |
1003 | } |
1004 | ||
1005 | /** | |
1006 | * readahead_index - The index of the first page in this readahead request. | |
1007 | * @rac: The readahead request. | |
1008 | */ | |
1009 | static inline pgoff_t readahead_index(struct readahead_control *rac) | |
1010 | { | |
1011 | return rac->_index; | |
1012 | } | |
1013 | ||
1014 | /** | |
1015 | * readahead_count - The number of pages in this readahead request. | |
1016 | * @rac: The readahead request. | |
1017 | */ | |
1018 | static inline unsigned int readahead_count(struct readahead_control *rac) | |
1019 | { | |
1020 | return rac->_nr_pages; | |
1021 | } | |
1022 | ||
32c0a6bc MWO |
1023 | /** |
1024 | * readahead_batch_length - The number of bytes in the current batch. | |
1025 | * @rac: The readahead request. | |
1026 | */ | |
076171a6 | 1027 | static inline size_t readahead_batch_length(struct readahead_control *rac) |
32c0a6bc MWO |
1028 | { |
1029 | return rac->_batch_count * PAGE_SIZE; | |
1030 | } | |
1031 | ||
b57c2cb9 FF |
1032 | static inline unsigned long dir_pages(struct inode *inode) |
1033 | { | |
09cbfeaf KS |
1034 | return (unsigned long)(inode->i_size + PAGE_SIZE - 1) >> |
1035 | PAGE_SHIFT; | |
b57c2cb9 FF |
1036 | } |
1037 | ||
243145bc AG |
1038 | /** |
1039 | * page_mkwrite_check_truncate - check if page was truncated | |
1040 | * @page: the page to check | |
1041 | * @inode: the inode to check the page against | |
1042 | * | |
1043 | * Returns the number of bytes in the page up to EOF, | |
1044 | * or -EFAULT if the page was truncated. | |
1045 | */ | |
1046 | static inline int page_mkwrite_check_truncate(struct page *page, | |
1047 | struct inode *inode) | |
1048 | { | |
1049 | loff_t size = i_size_read(inode); | |
1050 | pgoff_t index = size >> PAGE_SHIFT; | |
1051 | int offset = offset_in_page(size); | |
1052 | ||
1053 | if (page->mapping != inode->i_mapping) | |
1054 | return -EFAULT; | |
1055 | ||
1056 | /* page is wholly inside EOF */ | |
1057 | if (page->index < index) | |
1058 | return PAGE_SIZE; | |
1059 | /* page is wholly past EOF */ | |
1060 | if (page->index > index || !offset) | |
1061 | return -EFAULT; | |
1062 | /* page is partially inside EOF */ | |
1063 | return offset; | |
1064 | } | |
1065 | ||
24addd84 MWO |
1066 | /** |
1067 | * i_blocks_per_page - How many blocks fit in this page. | |
1068 | * @inode: The inode which contains the blocks. | |
1069 | * @page: The page (head page if the page is a THP). | |
1070 | * | |
1071 | * If the block size is larger than the size of this page, return zero. | |
1072 | * | |
1073 | * Context: The caller should hold a refcount on the page to prevent it | |
1074 | * from being split. | |
1075 | * Return: The number of filesystem blocks covered by this page. | |
1076 | */ | |
1077 | static inline | |
1078 | unsigned int i_blocks_per_page(struct inode *inode, struct page *page) | |
1079 | { | |
1080 | return thp_size(page) >> inode->i_blkbits; | |
1081 | } | |
1da177e4 | 1082 | #endif /* _LINUX_PAGEMAP_H */ |