]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
f64e02fe TT |
2 | /* |
3 | * linux/fs/ext4/readpage.c | |
4 | * | |
5 | * Copyright (C) 2002, Linus Torvalds. | |
6 | * Copyright (C) 2015, Google, Inc. | |
7 | * | |
8 | * This was originally taken from fs/mpage.c | |
9 | * | |
6311f91f MWO |
10 | * The ext4_mpage_readpages() function here is intended to |
11 | * replace mpage_readahead() in the general case, not just for | |
f64e02fe TT |
12 | * encrypted files. It has some limitations (see below), where it |
13 | * will fall back to read_block_full_page(), but these limitations | |
14 | * should only be hit when page_size != block_size. | |
15 | * | |
16 | * This will allow us to attach a callback function to support ext4 | |
17 | * encryption. | |
18 | * | |
19 | * If anything unusual happens, such as: | |
20 | * | |
21 | * - encountering a page which has buffers | |
22 | * - encountering a page which has a non-hole after a hole | |
23 | * - encountering a page with non-contiguous blocks | |
24 | * | |
25 | * then this code just gives up and calls the buffer_head-based read function. | |
26 | * It does handle a page which has holes at the end - that is a common case: | |
ea1754a0 | 27 | * the end-of-file on blocksize < PAGE_SIZE setups. |
f64e02fe TT |
28 | * |
29 | */ | |
30 | ||
31 | #include <linux/kernel.h> | |
32 | #include <linux/export.h> | |
33 | #include <linux/mm.h> | |
34 | #include <linux/kdev_t.h> | |
35 | #include <linux/gfp.h> | |
36 | #include <linux/bio.h> | |
37 | #include <linux/fs.h> | |
38 | #include <linux/buffer_head.h> | |
39 | #include <linux/blkdev.h> | |
40 | #include <linux/highmem.h> | |
41 | #include <linux/prefetch.h> | |
42 | #include <linux/mpage.h> | |
43 | #include <linux/writeback.h> | |
44 | #include <linux/backing-dev.h> | |
45 | #include <linux/pagevec.h> | |
46 | #include <linux/cleancache.h> | |
47 | ||
48 | #include "ext4.h" | |
49 | ||
22cfe4b4 EB |
50 | #define NUM_PREALLOC_POST_READ_CTXS 128 |
51 | ||
52 | static struct kmem_cache *bio_post_read_ctx_cache; | |
53 | static mempool_t *bio_post_read_ctx_pool; | |
54 | ||
55 | /* postprocessing steps for read bios */ | |
56 | enum bio_post_read_step { | |
57 | STEP_INITIAL = 0, | |
58 | STEP_DECRYPT, | |
59 | STEP_VERITY, | |
68e45330 | 60 | STEP_MAX, |
22cfe4b4 EB |
61 | }; |
62 | ||
63 | struct bio_post_read_ctx { | |
64 | struct bio *bio; | |
65 | struct work_struct work; | |
66 | unsigned int cur_step; | |
67 | unsigned int enabled_steps; | |
68 | }; | |
69 | ||
70 | static void __read_end_io(struct bio *bio) | |
c9c7429c | 71 | { |
22cfe4b4 EB |
72 | struct page *page; |
73 | struct bio_vec *bv; | |
74 | struct bvec_iter_all iter_all; | |
75 | ||
76 | bio_for_each_segment_all(bv, bio, iter_all) { | |
77 | page = bv->bv_page; | |
78 | ||
79 | /* PG_error was set if any post_read step failed */ | |
80 | if (bio->bi_status || PageError(page)) { | |
81 | ClearPageUptodate(page); | |
82 | /* will re-read again later */ | |
83 | ClearPageError(page); | |
84 | } else { | |
85 | SetPageUptodate(page); | |
86 | } | |
87 | unlock_page(page); | |
88 | } | |
89 | if (bio->bi_private) | |
90 | mempool_free(bio->bi_private, bio_post_read_ctx_pool); | |
91 | bio_put(bio); | |
92 | } | |
93 | ||
94 | static void bio_post_read_processing(struct bio_post_read_ctx *ctx); | |
95 | ||
96 | static void decrypt_work(struct work_struct *work) | |
97 | { | |
98 | struct bio_post_read_ctx *ctx = | |
99 | container_of(work, struct bio_post_read_ctx, work); | |
100 | ||
101 | fscrypt_decrypt_bio(ctx->bio); | |
102 | ||
103 | bio_post_read_processing(ctx); | |
104 | } | |
105 | ||
106 | static void verity_work(struct work_struct *work) | |
107 | { | |
108 | struct bio_post_read_ctx *ctx = | |
109 | container_of(work, struct bio_post_read_ctx, work); | |
68e45330 | 110 | struct bio *bio = ctx->bio; |
22cfe4b4 | 111 | |
68e45330 EB |
112 | /* |
113 | * fsverity_verify_bio() may call readpages() again, and although verity | |
114 | * will be disabled for that, decryption may still be needed, causing | |
115 | * another bio_post_read_ctx to be allocated. So to guarantee that | |
116 | * mempool_alloc() never deadlocks we must free the current ctx first. | |
117 | * This is safe because verity is the last post-read step. | |
118 | */ | |
119 | BUILD_BUG_ON(STEP_VERITY + 1 != STEP_MAX); | |
120 | mempool_free(ctx, bio_post_read_ctx_pool); | |
121 | bio->bi_private = NULL; | |
122 | ||
123 | fsverity_verify_bio(bio); | |
124 | ||
125 | __read_end_io(bio); | |
22cfe4b4 EB |
126 | } |
127 | ||
128 | static void bio_post_read_processing(struct bio_post_read_ctx *ctx) | |
129 | { | |
130 | /* | |
131 | * We use different work queues for decryption and for verity because | |
132 | * verity may require reading metadata pages that need decryption, and | |
133 | * we shouldn't recurse to the same workqueue. | |
134 | */ | |
135 | switch (++ctx->cur_step) { | |
136 | case STEP_DECRYPT: | |
137 | if (ctx->enabled_steps & (1 << STEP_DECRYPT)) { | |
138 | INIT_WORK(&ctx->work, decrypt_work); | |
139 | fscrypt_enqueue_decrypt_work(&ctx->work); | |
140 | return; | |
141 | } | |
142 | ctx->cur_step++; | |
70d7ced2 | 143 | fallthrough; |
22cfe4b4 EB |
144 | case STEP_VERITY: |
145 | if (ctx->enabled_steps & (1 << STEP_VERITY)) { | |
146 | INIT_WORK(&ctx->work, verity_work); | |
147 | fsverity_enqueue_verify_work(&ctx->work); | |
148 | return; | |
149 | } | |
150 | ctx->cur_step++; | |
70d7ced2 | 151 | fallthrough; |
22cfe4b4 EB |
152 | default: |
153 | __read_end_io(ctx->bio); | |
154 | } | |
155 | } | |
156 | ||
157 | static bool bio_post_read_required(struct bio *bio) | |
158 | { | |
159 | return bio->bi_private && !bio->bi_status; | |
c9c7429c MH |
160 | } |
161 | ||
f64e02fe TT |
162 | /* |
163 | * I/O completion handler for multipage BIOs. | |
164 | * | |
165 | * The mpage code never puts partial pages into a BIO (except for end-of-file). | |
166 | * If a page does not map to a contiguous run of blocks then it simply falls | |
167 | * back to block_read_full_page(). | |
168 | * | |
169 | * Why is this? If a page's completion depends on a number of different BIOs | |
170 | * which can complete in any order (or at the same time) then determining the | |
171 | * status of that page is hard. See end_buffer_async_read() for the details. | |
172 | * There is no point in duplicating all that complexity. | |
173 | */ | |
4246a0b6 | 174 | static void mpage_end_io(struct bio *bio) |
f64e02fe | 175 | { |
22cfe4b4 EB |
176 | if (bio_post_read_required(bio)) { |
177 | struct bio_post_read_ctx *ctx = bio->bi_private; | |
f64e02fe | 178 | |
22cfe4b4 EB |
179 | ctx->cur_step = STEP_INITIAL; |
180 | bio_post_read_processing(ctx); | |
181 | return; | |
c9c7429c | 182 | } |
22cfe4b4 EB |
183 | __read_end_io(bio); |
184 | } | |
f64e02fe | 185 | |
22cfe4b4 EB |
186 | static inline bool ext4_need_verity(const struct inode *inode, pgoff_t idx) |
187 | { | |
188 | return fsverity_active(inode) && | |
189 | idx < DIV_ROUND_UP(inode->i_size, PAGE_SIZE); | |
190 | } | |
191 | ||
fd5fe253 EB |
192 | static void ext4_set_bio_post_read_ctx(struct bio *bio, |
193 | const struct inode *inode, | |
194 | pgoff_t first_idx) | |
22cfe4b4 EB |
195 | { |
196 | unsigned int post_read_steps = 0; | |
22cfe4b4 | 197 | |
4f74d15f | 198 | if (fscrypt_inode_uses_fs_layer_crypto(inode)) |
22cfe4b4 EB |
199 | post_read_steps |= 1 << STEP_DECRYPT; |
200 | ||
201 | if (ext4_need_verity(inode, first_idx)) | |
202 | post_read_steps |= 1 << STEP_VERITY; | |
203 | ||
204 | if (post_read_steps) { | |
fd5fe253 EB |
205 | /* Due to the mempool, this never fails. */ |
206 | struct bio_post_read_ctx *ctx = | |
207 | mempool_alloc(bio_post_read_ctx_pool, GFP_NOFS); | |
208 | ||
22cfe4b4 EB |
209 | ctx->bio = bio; |
210 | ctx->enabled_steps = post_read_steps; | |
211 | bio->bi_private = ctx; | |
f64e02fe | 212 | } |
22cfe4b4 | 213 | } |
f64e02fe | 214 | |
22cfe4b4 EB |
215 | static inline loff_t ext4_readpage_limit(struct inode *inode) |
216 | { | |
217 | if (IS_ENABLED(CONFIG_FS_VERITY) && | |
218 | (IS_VERITY(inode) || ext4_verity_in_progress(inode))) | |
219 | return inode->i_sb->s_maxbytes; | |
220 | ||
221 | return i_size_read(inode); | |
f64e02fe TT |
222 | } |
223 | ||
a07f624b | 224 | int ext4_mpage_readpages(struct inode *inode, |
6311f91f | 225 | struct readahead_control *rac, struct page *page) |
f64e02fe TT |
226 | { |
227 | struct bio *bio = NULL; | |
f64e02fe TT |
228 | sector_t last_block_in_bio = 0; |
229 | ||
f64e02fe | 230 | const unsigned blkbits = inode->i_blkbits; |
09cbfeaf | 231 | const unsigned blocks_per_page = PAGE_SIZE >> blkbits; |
f64e02fe | 232 | const unsigned blocksize = 1 << blkbits; |
4f74d15f | 233 | sector_t next_block; |
f64e02fe TT |
234 | sector_t block_in_file; |
235 | sector_t last_block; | |
236 | sector_t last_block_in_file; | |
237 | sector_t blocks[MAX_BUF_PER_PAGE]; | |
238 | unsigned page_block; | |
239 | struct block_device *bdev = inode->i_sb->s_bdev; | |
240 | int length; | |
241 | unsigned relative_block = 0; | |
242 | struct ext4_map_blocks map; | |
6311f91f | 243 | unsigned int nr_pages = rac ? readahead_count(rac) : 1; |
f64e02fe TT |
244 | |
245 | map.m_pblk = 0; | |
246 | map.m_lblk = 0; | |
247 | map.m_len = 0; | |
248 | map.m_flags = 0; | |
249 | ||
de9e9181 | 250 | for (; nr_pages; nr_pages--) { |
f64e02fe TT |
251 | int fully_mapped = 1; |
252 | unsigned first_hole = blocks_per_page; | |
253 | ||
6311f91f MWO |
254 | if (rac) { |
255 | page = readahead_page(rac); | |
d454a273 | 256 | prefetchw(&page->flags); |
f64e02fe TT |
257 | } |
258 | ||
259 | if (page_has_buffers(page)) | |
260 | goto confused; | |
261 | ||
4f74d15f EB |
262 | block_in_file = next_block = |
263 | (sector_t)page->index << (PAGE_SHIFT - blkbits); | |
f64e02fe | 264 | last_block = block_in_file + nr_pages * blocks_per_page; |
22cfe4b4 EB |
265 | last_block_in_file = (ext4_readpage_limit(inode) + |
266 | blocksize - 1) >> blkbits; | |
f64e02fe TT |
267 | if (last_block > last_block_in_file) |
268 | last_block = last_block_in_file; | |
269 | page_block = 0; | |
270 | ||
271 | /* | |
272 | * Map blocks using the previous result first. | |
273 | */ | |
274 | if ((map.m_flags & EXT4_MAP_MAPPED) && | |
275 | block_in_file > map.m_lblk && | |
276 | block_in_file < (map.m_lblk + map.m_len)) { | |
277 | unsigned map_offset = block_in_file - map.m_lblk; | |
278 | unsigned last = map.m_len - map_offset; | |
279 | ||
280 | for (relative_block = 0; ; relative_block++) { | |
281 | if (relative_block == last) { | |
282 | /* needed? */ | |
283 | map.m_flags &= ~EXT4_MAP_MAPPED; | |
284 | break; | |
285 | } | |
286 | if (page_block == blocks_per_page) | |
287 | break; | |
288 | blocks[page_block] = map.m_pblk + map_offset + | |
289 | relative_block; | |
290 | page_block++; | |
291 | block_in_file++; | |
292 | } | |
293 | } | |
294 | ||
295 | /* | |
296 | * Then do more ext4_map_blocks() calls until we are | |
297 | * done with this page. | |
298 | */ | |
299 | while (page_block < blocks_per_page) { | |
300 | if (block_in_file < last_block) { | |
301 | map.m_lblk = block_in_file; | |
302 | map.m_len = last_block - block_in_file; | |
303 | ||
304 | if (ext4_map_blocks(NULL, inode, &map, 0) < 0) { | |
305 | set_error_page: | |
306 | SetPageError(page); | |
307 | zero_user_segment(page, 0, | |
09cbfeaf | 308 | PAGE_SIZE); |
f64e02fe TT |
309 | unlock_page(page); |
310 | goto next_page; | |
311 | } | |
312 | } | |
313 | if ((map.m_flags & EXT4_MAP_MAPPED) == 0) { | |
314 | fully_mapped = 0; | |
315 | if (first_hole == blocks_per_page) | |
316 | first_hole = page_block; | |
317 | page_block++; | |
318 | block_in_file++; | |
319 | continue; | |
320 | } | |
321 | if (first_hole != blocks_per_page) | |
322 | goto confused; /* hole -> non-hole */ | |
323 | ||
324 | /* Contiguous blocks? */ | |
325 | if (page_block && blocks[page_block-1] != map.m_pblk-1) | |
326 | goto confused; | |
327 | for (relative_block = 0; ; relative_block++) { | |
328 | if (relative_block == map.m_len) { | |
329 | /* needed? */ | |
330 | map.m_flags &= ~EXT4_MAP_MAPPED; | |
331 | break; | |
332 | } else if (page_block == blocks_per_page) | |
333 | break; | |
334 | blocks[page_block] = map.m_pblk+relative_block; | |
335 | page_block++; | |
336 | block_in_file++; | |
337 | } | |
338 | } | |
339 | if (first_hole != blocks_per_page) { | |
340 | zero_user_segment(page, first_hole << blkbits, | |
09cbfeaf | 341 | PAGE_SIZE); |
f64e02fe | 342 | if (first_hole == 0) { |
22cfe4b4 EB |
343 | if (ext4_need_verity(inode, page->index) && |
344 | !fsverity_verify_page(page)) | |
345 | goto set_error_page; | |
f64e02fe TT |
346 | SetPageUptodate(page); |
347 | unlock_page(page); | |
348 | goto next_page; | |
349 | } | |
350 | } else if (fully_mapped) { | |
351 | SetPageMappedToDisk(page); | |
352 | } | |
353 | if (fully_mapped && blocks_per_page == 1 && | |
354 | !PageUptodate(page) && cleancache_get_page(page) == 0) { | |
355 | SetPageUptodate(page); | |
356 | goto confused; | |
357 | } | |
358 | ||
359 | /* | |
360 | * This page will go to BIO. Do we need to send this | |
361 | * BIO off first? | |
362 | */ | |
4f74d15f EB |
363 | if (bio && (last_block_in_bio != blocks[0] - 1 || |
364 | !fscrypt_mergeable_bio(bio, inode, next_block))) { | |
f64e02fe | 365 | submit_and_realloc: |
4e49ea4a | 366 | submit_bio(bio); |
f64e02fe TT |
367 | bio = NULL; |
368 | } | |
369 | if (bio == NULL) { | |
5500221e GX |
370 | /* |
371 | * bio_alloc will _always_ be able to allocate a bio if | |
372 | * __GFP_DIRECT_RECLAIM is set, see bio_alloc_bioset(). | |
373 | */ | |
5f7136db | 374 | bio = bio_alloc(GFP_KERNEL, bio_max_segs(nr_pages)); |
4f74d15f EB |
375 | fscrypt_set_bio_crypt_ctx(bio, inode, next_block, |
376 | GFP_KERNEL); | |
fd5fe253 | 377 | ext4_set_bio_post_read_ctx(bio, inode, page->index); |
74d46992 | 378 | bio_set_dev(bio, bdev); |
f64e02fe TT |
379 | bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); |
380 | bio->bi_end_io = mpage_end_io; | |
ac22b46a | 381 | bio_set_op_attrs(bio, REQ_OP_READ, |
6311f91f | 382 | rac ? REQ_RAHEAD : 0); |
f64e02fe TT |
383 | } |
384 | ||
385 | length = first_hole << blkbits; | |
386 | if (bio_add_page(bio, page, length, 0) < length) | |
387 | goto submit_and_realloc; | |
388 | ||
389 | if (((map.m_flags & EXT4_MAP_BOUNDARY) && | |
390 | (relative_block == map.m_len)) || | |
391 | (first_hole != blocks_per_page)) { | |
4e49ea4a | 392 | submit_bio(bio); |
f64e02fe TT |
393 | bio = NULL; |
394 | } else | |
395 | last_block_in_bio = blocks[blocks_per_page - 1]; | |
396 | goto next_page; | |
397 | confused: | |
398 | if (bio) { | |
4e49ea4a | 399 | submit_bio(bio); |
f64e02fe TT |
400 | bio = NULL; |
401 | } | |
402 | if (!PageUptodate(page)) | |
403 | block_read_full_page(page, ext4_get_block); | |
404 | else | |
405 | unlock_page(page); | |
406 | next_page: | |
6311f91f | 407 | if (rac) |
09cbfeaf | 408 | put_page(page); |
f64e02fe | 409 | } |
f64e02fe | 410 | if (bio) |
4e49ea4a | 411 | submit_bio(bio); |
f64e02fe TT |
412 | return 0; |
413 | } | |
22cfe4b4 EB |
414 | |
415 | int __init ext4_init_post_read_processing(void) | |
416 | { | |
417 | bio_post_read_ctx_cache = | |
418 | kmem_cache_create("ext4_bio_post_read_ctx", | |
419 | sizeof(struct bio_post_read_ctx), 0, 0, NULL); | |
420 | if (!bio_post_read_ctx_cache) | |
421 | goto fail; | |
422 | bio_post_read_ctx_pool = | |
423 | mempool_create_slab_pool(NUM_PREALLOC_POST_READ_CTXS, | |
424 | bio_post_read_ctx_cache); | |
425 | if (!bio_post_read_ctx_pool) | |
426 | goto fail_free_cache; | |
427 | return 0; | |
428 | ||
429 | fail_free_cache: | |
430 | kmem_cache_destroy(bio_post_read_ctx_cache); | |
431 | fail: | |
432 | return -ENOMEM; | |
433 | } | |
434 | ||
435 | void ext4_exit_post_read_processing(void) | |
436 | { | |
437 | mempool_destroy(bio_post_read_ctx_pool); | |
438 | kmem_cache_destroy(bio_post_read_ctx_cache); | |
439 | } |