]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - fs/ext4/readpage.c
ext4: convert from readpages to readahead
[mirror_ubuntu-hirsute-kernel.git] / fs / ext4 / readpage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/fs/ext4/readpage.c
4 *
5 * Copyright (C) 2002, Linus Torvalds.
6 * Copyright (C) 2015, Google, Inc.
7 *
8 * This was originally taken from fs/mpage.c
9 *
10 * The ext4_mpage_readpages() function here is intended to
11 * replace mpage_readahead() in the general case, not just for
12 * encrypted files. It has some limitations (see below), where it
13 * will fall back to read_block_full_page(), but these limitations
14 * should only be hit when page_size != block_size.
15 *
16 * This will allow us to attach a callback function to support ext4
17 * encryption.
18 *
19 * If anything unusual happens, such as:
20 *
21 * - encountering a page which has buffers
22 * - encountering a page which has a non-hole after a hole
23 * - encountering a page with non-contiguous blocks
24 *
25 * then this code just gives up and calls the buffer_head-based read function.
26 * It does handle a page which has holes at the end - that is a common case:
27 * the end-of-file on blocksize < PAGE_SIZE setups.
28 *
29 */
30
31 #include <linux/kernel.h>
32 #include <linux/export.h>
33 #include <linux/mm.h>
34 #include <linux/kdev_t.h>
35 #include <linux/gfp.h>
36 #include <linux/bio.h>
37 #include <linux/fs.h>
38 #include <linux/buffer_head.h>
39 #include <linux/blkdev.h>
40 #include <linux/highmem.h>
41 #include <linux/prefetch.h>
42 #include <linux/mpage.h>
43 #include <linux/writeback.h>
44 #include <linux/backing-dev.h>
45 #include <linux/pagevec.h>
46 #include <linux/cleancache.h>
47
48 #include "ext4.h"
49
50 #define NUM_PREALLOC_POST_READ_CTXS 128
51
52 static struct kmem_cache *bio_post_read_ctx_cache;
53 static mempool_t *bio_post_read_ctx_pool;
54
55 /* postprocessing steps for read bios */
56 enum bio_post_read_step {
57 STEP_INITIAL = 0,
58 STEP_DECRYPT,
59 STEP_VERITY,
60 STEP_MAX,
61 };
62
63 struct bio_post_read_ctx {
64 struct bio *bio;
65 struct work_struct work;
66 unsigned int cur_step;
67 unsigned int enabled_steps;
68 };
69
70 static void __read_end_io(struct bio *bio)
71 {
72 struct page *page;
73 struct bio_vec *bv;
74 struct bvec_iter_all iter_all;
75
76 bio_for_each_segment_all(bv, bio, iter_all) {
77 page = bv->bv_page;
78
79 /* PG_error was set if any post_read step failed */
80 if (bio->bi_status || PageError(page)) {
81 ClearPageUptodate(page);
82 /* will re-read again later */
83 ClearPageError(page);
84 } else {
85 SetPageUptodate(page);
86 }
87 unlock_page(page);
88 }
89 if (bio->bi_private)
90 mempool_free(bio->bi_private, bio_post_read_ctx_pool);
91 bio_put(bio);
92 }
93
94 static void bio_post_read_processing(struct bio_post_read_ctx *ctx);
95
96 static void decrypt_work(struct work_struct *work)
97 {
98 struct bio_post_read_ctx *ctx =
99 container_of(work, struct bio_post_read_ctx, work);
100
101 fscrypt_decrypt_bio(ctx->bio);
102
103 bio_post_read_processing(ctx);
104 }
105
106 static void verity_work(struct work_struct *work)
107 {
108 struct bio_post_read_ctx *ctx =
109 container_of(work, struct bio_post_read_ctx, work);
110 struct bio *bio = ctx->bio;
111
112 /*
113 * fsverity_verify_bio() may call readpages() again, and although verity
114 * will be disabled for that, decryption may still be needed, causing
115 * another bio_post_read_ctx to be allocated. So to guarantee that
116 * mempool_alloc() never deadlocks we must free the current ctx first.
117 * This is safe because verity is the last post-read step.
118 */
119 BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX);
120 mempool_free(ctx, bio_post_read_ctx_pool);
121 bio->bi_private = NULL;
122
123 fsverity_verify_bio(bio);
124
125 __read_end_io(bio);
126 }
127
128 static void bio_post_read_processing(struct bio_post_read_ctx *ctx)
129 {
130 /*
131 * We use different work queues for decryption and for verity because
132 * verity may require reading metadata pages that need decryption, and
133 * we shouldn't recurse to the same workqueue.
134 */
135 switch (++ctx->cur_step) {
136 case STEP_DECRYPT:
137 if (ctx->enabled_steps & (1 << STEP_DECRYPT)) {
138 INIT_WORK(&ctx->work, decrypt_work);
139 fscrypt_enqueue_decrypt_work(&ctx->work);
140 return;
141 }
142 ctx->cur_step++;
143 /* fall-through */
144 case STEP_VERITY:
145 if (ctx->enabled_steps & (1 << STEP_VERITY)) {
146 INIT_WORK(&ctx->work, verity_work);
147 fsverity_enqueue_verify_work(&ctx->work);
148 return;
149 }
150 ctx->cur_step++;
151 /* fall-through */
152 default:
153 __read_end_io(ctx->bio);
154 }
155 }
156
157 static bool bio_post_read_required(struct bio *bio)
158 {
159 return bio->bi_private && !bio->bi_status;
160 }
161
162 /*
163 * I/O completion handler for multipage BIOs.
164 *
165 * The mpage code never puts partial pages into a BIO (except for end-of-file).
166 * If a page does not map to a contiguous run of blocks then it simply falls
167 * back to block_read_full_page().
168 *
169 * Why is this? If a page's completion depends on a number of different BIOs
170 * which can complete in any order (or at the same time) then determining the
171 * status of that page is hard. See end_buffer_async_read() for the details.
172 * There is no point in duplicating all that complexity.
173 */
174 static void mpage_end_io(struct bio *bio)
175 {
176 if (bio_post_read_required(bio)) {
177 struct bio_post_read_ctx *ctx = bio->bi_private;
178
179 ctx->cur_step = STEP_INITIAL;
180 bio_post_read_processing(ctx);
181 return;
182 }
183 __read_end_io(bio);
184 }
185
186 static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx)
187 {
188 return fsverity_active(inode) &&
189 idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE);
190 }
191
192 static void ext4_set_bio_post_read_ctx(struct bio *bio,
193 const struct inode *inode,
194 pgoff_t first_idx)
195 {
196 unsigned int post_read_steps = 0;
197
198 if (IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode))
199 post_read_steps |= 1 << STEP_DECRYPT;
200
201 if (ext4_need_verity(inode, first_idx))
202 post_read_steps |= 1 << STEP_VERITY;
203
204 if (post_read_steps) {
205 /* Due to the mempool, this never fails. */
206 struct bio_post_read_ctx *ctx =
207 mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS);
208
209 ctx->bio = bio;
210 ctx->enabled_steps = post_read_steps;
211 bio->bi_private = ctx;
212 }
213 }
214
215 static inline loff_t ext4_readpage_limit(struct inode *inode)
216 {
217 if (IS_ENABLED(CONFIG_FS_VERITY) &&
218 (IS_VERITY(inode) || ext4_verity_in_progress(inode)))
219 return inode->i_sb->s_maxbytes;
220
221 return i_size_read(inode);
222 }
223
224 int ext4_mpage_readpages(struct address_space *mapping,
225 struct readahead_control *rac, struct page *page)
226 {
227 struct bio *bio = NULL;
228 sector_t last_block_in_bio = 0;
229
230 struct inode *inode = mapping->host;
231 const unsigned blkbits = inode->i_blkbits;
232 const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
233 const unsigned blocksize = 1 << blkbits;
234 sector_t block_in_file;
235 sector_t last_block;
236 sector_t last_block_in_file;
237 sector_t blocks[MAX_BUF_PER_PAGE];
238 unsigned page_block;
239 struct block_device *bdev = inode->i_sb->s_bdev;
240 int length;
241 unsigned relative_block = 0;
242 struct ext4_map_blocks map;
243 unsigned int nr_pages = rac ? readahead_count(rac) : 1;
244
245 map.m_pblk = 0;
246 map.m_lblk = 0;
247 map.m_len = 0;
248 map.m_flags = 0;
249
250 for (; nr_pages; nr_pages--) {
251 int fully_mapped = 1;
252 unsigned first_hole = blocks_per_page;
253
254 if (rac) {
255 page = readahead_page(rac);
256 prefetchw(&page->flags);
257 }
258
259 if (page_has_buffers(page))
260 goto confused;
261
262 block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits);
263 last_block = block_in_file + nr_pages * blocks_per_page;
264 last_block_in_file = (ext4_readpage_limit(inode) +
265 blocksize - 1) >> blkbits;
266 if (last_block > last_block_in_file)
267 last_block = last_block_in_file;
268 page_block = 0;
269
270 /*
271 * Map blocks using the previous result first.
272 */
273 if ((map.m_flags & EXT4_MAP_MAPPED) &&
274 block_in_file > map.m_lblk &&
275 block_in_file < (map.m_lblk + map.m_len)) {
276 unsigned map_offset = block_in_file - map.m_lblk;
277 unsigned last = map.m_len - map_offset;
278
279 for (relative_block = 0; ; relative_block++) {
280 if (relative_block == last) {
281 /* needed? */
282 map.m_flags &= ~EXT4_MAP_MAPPED;
283 break;
284 }
285 if (page_block == blocks_per_page)
286 break;
287 blocks[page_block] = map.m_pblk + map_offset +
288 relative_block;
289 page_block++;
290 block_in_file++;
291 }
292 }
293
294 /*
295 * Then do more ext4_map_blocks() calls until we are
296 * done with this page.
297 */
298 while (page_block < blocks_per_page) {
299 if (block_in_file < last_block) {
300 map.m_lblk = block_in_file;
301 map.m_len = last_block - block_in_file;
302
303 if (ext4_map_blocks(NULL, inode, &map, 0) < 0) {
304 set_error_page:
305 SetPageError(page);
306 zero_user_segment(page, 0,
307 PAGE_SIZE);
308 unlock_page(page);
309 goto next_page;
310 }
311 }
312 if ((map.m_flags & EXT4_MAP_MAPPED) == 0) {
313 fully_mapped = 0;
314 if (first_hole == blocks_per_page)
315 first_hole = page_block;
316 page_block++;
317 block_in_file++;
318 continue;
319 }
320 if (first_hole != blocks_per_page)
321 goto confused; /* hole -> non-hole */
322
323 /* Contiguous blocks? */
324 if (page_block && blocks[page_block-1] != map.m_pblk-1)
325 goto confused;
326 for (relative_block = 0; ; relative_block++) {
327 if (relative_block == map.m_len) {
328 /* needed? */
329 map.m_flags &= ~EXT4_MAP_MAPPED;
330 break;
331 } else if (page_block == blocks_per_page)
332 break;
333 blocks[page_block] = map.m_pblk+relative_block;
334 page_block++;
335 block_in_file++;
336 }
337 }
338 if (first_hole != blocks_per_page) {
339 zero_user_segment(page, first_hole << blkbits,
340 PAGE_SIZE);
341 if (first_hole == 0) {
342 if (ext4_need_verity(inode, page->index) &&
343 !fsverity_verify_page(page))
344 goto set_error_page;
345 SetPageUptodate(page);
346 unlock_page(page);
347 goto next_page;
348 }
349 } else if (fully_mapped) {
350 SetPageMappedToDisk(page);
351 }
352 if (fully_mapped && blocks_per_page == 1 &&
353 !PageUptodate(page) && cleancache_get_page(page) == 0) {
354 SetPageUptodate(page);
355 goto confused;
356 }
357
358 /*
359 * This page will go to BIO. Do we need to send this
360 * BIO off first?
361 */
362 if (bio && (last_block_in_bio != blocks[0] - 1)) {
363 submit_and_realloc:
364 submit_bio(bio);
365 bio = NULL;
366 }
367 if (bio == NULL) {
368 /*
369 * bio_alloc will _always_ be able to allocate a bio if
370 * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset().
371 */
372 bio = bio_alloc(GFP_KERNEL,
373 min_t(int, nr_pages, BIO_MAX_PAGES));
374 ext4_set_bio_post_read_ctx(bio, inode, page->index);
375 bio_set_dev(bio, bdev);
376 bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
377 bio->bi_end_io = mpage_end_io;
378 bio_set_op_attrs(bio, REQ_OP_READ,
379 rac ? REQ_RAHEAD : 0);
380 }
381
382 length = first_hole << blkbits;
383 if (bio_add_page(bio, page, length, 0) < length)
384 goto submit_and_realloc;
385
386 if (((map.m_flags & EXT4_MAP_BOUNDARY) &&
387 (relative_block == map.m_len)) ||
388 (first_hole != blocks_per_page)) {
389 submit_bio(bio);
390 bio = NULL;
391 } else
392 last_block_in_bio = blocks[blocks_per_page - 1];
393 goto next_page;
394 confused:
395 if (bio) {
396 submit_bio(bio);
397 bio = NULL;
398 }
399 if (!PageUptodate(page))
400 block_read_full_page(page, ext4_get_block);
401 else
402 unlock_page(page);
403 next_page:
404 if (rac)
405 put_page(page);
406 }
407 if (bio)
408 submit_bio(bio);
409 return 0;
410 }
411
412 int __init ext4_init_post_read_processing(void)
413 {
414 bio_post_read_ctx_cache =
415 kmem_cache_create("ext4_bio_post_read_ctx",
416 sizeof(struct bio_post_read_ctx), 0, 0, NULL);
417 if (!bio_post_read_ctx_cache)
418 goto fail;
419 bio_post_read_ctx_pool =
420 mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS,
421 bio_post_read_ctx_cache);
422 if (!bio_post_read_ctx_pool)
423 goto fail_free_cache;
424 return 0;
425
426 fail_free_cache:
427 kmem_cache_destroy(bio_post_read_ctx_cache);
428 fail:
429 return -ENOMEM;
430 }
431
432 void ext4_exit_post_read_processing(void)
433 {
434 mempool_destroy(bio_post_read_ctx_pool);
435 kmem_cache_destroy(bio_post_read_ctx_cache);
436 }