]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - fs/ext4/verity.c
fs/ext4: Narrow scope of DAX check in setflags
[mirror_ubuntu-jammy-kernel.git] / fs / ext4 / verity.c
CommitLineData
c93d8f88
EB
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * fs/ext4/verity.c: fs-verity support for ext4
4 *
5 * Copyright 2019 Google LLC
6 */
7
8/*
9 * Implementation of fsverity_operations for ext4.
10 *
11 * ext4 stores the verity metadata (Merkle tree and fsverity_descriptor) past
12 * the end of the file, starting at the first 64K boundary beyond i_size. This
13 * approach works because (a) verity files are readonly, and (b) pages fully
14 * beyond i_size aren't visible to userspace but can be read/written internally
15 * by ext4 with only some relatively small changes to ext4. This approach
16 * avoids having to depend on the EA_INODE feature and on rearchitecturing
17 * ext4's xattr support to support paging multi-gigabyte xattrs into memory, and
18 * to support encrypting xattrs. Note that the verity metadata *must* be
19 * encrypted when the file is, since it contains hashes of the plaintext data.
20 *
21 * Using a 64K boundary rather than a 4K one keeps things ready for
22 * architectures with 64K pages, and it doesn't necessarily waste space on-disk
23 * since there can be a hole between i_size and the start of the Merkle tree.
24 */
25
26#include <linux/quotaops.h>
27
28#include "ext4.h"
29#include "ext4_extents.h"
30#include "ext4_jbd2.h"
31
32static inline loff_t ext4_verity_metadata_pos(const struct inode *inode)
33{
34 return round_up(inode->i_size, 65536);
35}
36
37/*
38 * Read some verity metadata from the inode. __vfs_read() can't be used because
39 * we need to read beyond i_size.
40 */
41static int pagecache_read(struct inode *inode, void *buf, size_t count,
42 loff_t pos)
43{
44 while (count) {
45 size_t n = min_t(size_t, count,
46 PAGE_SIZE - offset_in_page(pos));
47 struct page *page;
48 void *addr;
49
50 page = read_mapping_page(inode->i_mapping, pos >> PAGE_SHIFT,
51 NULL);
52 if (IS_ERR(page))
53 return PTR_ERR(page);
54
55 addr = kmap_atomic(page);
56 memcpy(buf, addr + offset_in_page(pos), n);
57 kunmap_atomic(addr);
58
59 put_page(page);
60
61 buf += n;
62 pos += n;
63 count -= n;
64 }
65 return 0;
66}
67
68/*
69 * Write some verity metadata to the inode for FS_IOC_ENABLE_VERITY.
70 * kernel_write() can't be used because the file descriptor is readonly.
71 */
72static int pagecache_write(struct inode *inode, const void *buf, size_t count,
73 loff_t pos)
74{
75 if (pos + count > inode->i_sb->s_maxbytes)
76 return -EFBIG;
77
78 while (count) {
79 size_t n = min_t(size_t, count,
80 PAGE_SIZE - offset_in_page(pos));
81 struct page *page;
82 void *fsdata;
83 void *addr;
84 int res;
85
86 res = pagecache_write_begin(NULL, inode->i_mapping, pos, n, 0,
87 &page, &fsdata);
88 if (res)
89 return res;
90
91 addr = kmap_atomic(page);
92 memcpy(addr + offset_in_page(pos), buf, n);
93 kunmap_atomic(addr);
94
95 res = pagecache_write_end(NULL, inode->i_mapping, pos, n, n,
96 page, fsdata);
97 if (res < 0)
98 return res;
99 if (res != n)
100 return -EIO;
101
102 buf += n;
103 pos += n;
104 count -= n;
105 }
106 return 0;
107}
108
109static int ext4_begin_enable_verity(struct file *filp)
110{
111 struct inode *inode = file_inode(filp);
112 const int credits = 2; /* superblock and inode for ext4_orphan_add() */
113 handle_t *handle;
114 int err;
115
116 if (ext4_verity_in_progress(inode))
117 return -EBUSY;
118
119 /*
120 * Since the file was opened readonly, we have to initialize the jbd
121 * inode and quotas here and not rely on ->open() doing it. This must
122 * be done before evicting the inline data.
123 */
124
125 err = ext4_inode_attach_jinode(inode);
126 if (err)
127 return err;
128
129 err = dquot_initialize(inode);
130 if (err)
131 return err;
132
133 err = ext4_convert_inline_data(inode);
134 if (err)
135 return err;
136
137 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
138 ext4_warning_inode(inode,
139 "verity is only allowed on extent-based files");
140 return -EOPNOTSUPP;
141 }
142
143 /*
144 * ext4 uses the last allocated block to find the verity descriptor, so
145 * we must remove any other blocks past EOF which might confuse things.
146 */
147 err = ext4_truncate(inode);
148 if (err)
149 return err;
150
151 handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
152 if (IS_ERR(handle))
153 return PTR_ERR(handle);
154
155 err = ext4_orphan_add(handle, inode);
156 if (err == 0)
157 ext4_set_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
158
159 ext4_journal_stop(handle);
160 return err;
161}
162
163/*
164 * ext4 stores the verity descriptor beginning on the next filesystem block
165 * boundary after the Merkle tree. Then, the descriptor size is stored in the
166 * last 4 bytes of the last allocated filesystem block --- which is either the
167 * block in which the descriptor ends, or the next block after that if there
168 * weren't at least 4 bytes remaining.
169 *
170 * We can't simply store the descriptor in an xattr because it *must* be
171 * encrypted when ext4 encryption is used, but ext4 encryption doesn't encrypt
172 * xattrs. Also, if the descriptor includes a large signature blob it may be
173 * too large to store in an xattr without the EA_INODE feature.
174 */
175static int ext4_write_verity_descriptor(struct inode *inode, const void *desc,
176 size_t desc_size, u64 merkle_tree_size)
177{
178 const u64 desc_pos = round_up(ext4_verity_metadata_pos(inode) +
179 merkle_tree_size, i_blocksize(inode));
180 const u64 desc_end = desc_pos + desc_size;
181 const __le32 desc_size_disk = cpu_to_le32(desc_size);
182 const u64 desc_size_pos = round_up(desc_end + sizeof(desc_size_disk),
183 i_blocksize(inode)) -
184 sizeof(desc_size_disk);
185 int err;
186
187 err = pagecache_write(inode, desc, desc_size, desc_pos);
188 if (err)
189 return err;
190
191 return pagecache_write(inode, &desc_size_disk, sizeof(desc_size_disk),
192 desc_size_pos);
193}
194
195static int ext4_end_enable_verity(struct file *filp, const void *desc,
196 size_t desc_size, u64 merkle_tree_size)
197{
198 struct inode *inode = file_inode(filp);
199 const int credits = 2; /* superblock and inode for ext4_orphan_del() */
200 handle_t *handle;
201 int err = 0;
202 int err2;
203
204 if (desc != NULL) {
205 /* Succeeded; write the verity descriptor. */
206 err = ext4_write_verity_descriptor(inode, desc, desc_size,
207 merkle_tree_size);
208
209 /* Write all pages before clearing VERITY_IN_PROGRESS. */
210 if (!err)
211 err = filemap_write_and_wait(inode->i_mapping);
212 }
213
214 /* If we failed, truncate anything we wrote past i_size. */
215 if (desc == NULL || err)
216 ext4_truncate(inode);
217
218 /*
219 * We must always clean up by clearing EXT4_STATE_VERITY_IN_PROGRESS and
220 * deleting the inode from the orphan list, even if something failed.
221 * If everything succeeded, we'll also set the verity bit in the same
222 * transaction.
223 */
224
225 ext4_clear_inode_state(inode, EXT4_STATE_VERITY_IN_PROGRESS);
226
227 handle = ext4_journal_start(inode, EXT4_HT_INODE, credits);
228 if (IS_ERR(handle)) {
229 ext4_orphan_del(NULL, inode);
230 return PTR_ERR(handle);
231 }
232
233 err2 = ext4_orphan_del(handle, inode);
234 if (err2)
235 goto out_stop;
236
237 if (desc != NULL && !err) {
238 struct ext4_iloc iloc;
239
240 err = ext4_reserve_inode_write(handle, inode, &iloc);
241 if (err)
242 goto out_stop;
243 ext4_set_inode_flag(inode, EXT4_INODE_VERITY);
244 ext4_set_inode_flags(inode);
245 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
246 }
247out_stop:
248 ext4_journal_stop(handle);
249 return err ?: err2;
250}
251
252static int ext4_get_verity_descriptor_location(struct inode *inode,
253 size_t *desc_size_ret,
254 u64 *desc_pos_ret)
255{
256 struct ext4_ext_path *path;
257 struct ext4_extent *last_extent;
258 u32 end_lblk;
259 u64 desc_size_pos;
260 __le32 desc_size_disk;
261 u32 desc_size;
262 u64 desc_pos;
263 int err;
264
265 /*
266 * Descriptor size is in last 4 bytes of last allocated block.
267 * See ext4_write_verity_descriptor().
268 */
269
270 if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
271 EXT4_ERROR_INODE(inode, "verity file doesn't use extents");
272 return -EFSCORRUPTED;
273 }
274
275 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
276 if (IS_ERR(path))
277 return PTR_ERR(path);
278
279 last_extent = path[path->p_depth].p_ext;
280 if (!last_extent) {
281 EXT4_ERROR_INODE(inode, "verity file has no extents");
282 ext4_ext_drop_refs(path);
283 kfree(path);
284 return -EFSCORRUPTED;
285 }
286
287 end_lblk = le32_to_cpu(last_extent->ee_block) +
288 ext4_ext_get_actual_len(last_extent);
289 desc_size_pos = (u64)end_lblk << inode->i_blkbits;
290 ext4_ext_drop_refs(path);
291 kfree(path);
292
293 if (desc_size_pos < sizeof(desc_size_disk))
294 goto bad;
295 desc_size_pos -= sizeof(desc_size_disk);
296
297 err = pagecache_read(inode, &desc_size_disk, sizeof(desc_size_disk),
298 desc_size_pos);
299 if (err)
300 return err;
301 desc_size = le32_to_cpu(desc_size_disk);
302
303 /*
304 * The descriptor is stored just before the desc_size_disk, but starting
305 * on a filesystem block boundary.
306 */
307
308 if (desc_size > INT_MAX || desc_size > desc_size_pos)
309 goto bad;
310
311 desc_pos = round_down(desc_size_pos - desc_size, i_blocksize(inode));
312 if (desc_pos < ext4_verity_metadata_pos(inode))
313 goto bad;
314
315 *desc_size_ret = desc_size;
316 *desc_pos_ret = desc_pos;
317 return 0;
318
319bad:
320 EXT4_ERROR_INODE(inode, "verity file corrupted; can't find descriptor");
321 return -EFSCORRUPTED;
322}
323
324static int ext4_get_verity_descriptor(struct inode *inode, void *buf,
325 size_t buf_size)
326{
327 size_t desc_size = 0;
328 u64 desc_pos = 0;
329 int err;
330
331 err = ext4_get_verity_descriptor_location(inode, &desc_size, &desc_pos);
332 if (err)
333 return err;
334
335 if (buf_size) {
336 if (desc_size > buf_size)
337 return -ERANGE;
338 err = pagecache_read(inode, buf, desc_size, desc_pos);
339 if (err)
340 return err;
341 }
342 return desc_size;
343}
344
fd39073d
EB
345/*
346 * Prefetch some pages from the file's Merkle tree.
347 *
348 * This is basically a stripped-down version of __do_page_cache_readahead()
349 * which works on pages past i_size.
350 */
351static void ext4_merkle_tree_readahead(struct address_space *mapping,
352 pgoff_t start_index, unsigned long count)
353{
354 LIST_HEAD(pages);
355 unsigned int nr_pages = 0;
356 struct page *page;
357 pgoff_t index;
358 struct blk_plug plug;
359
360 for (index = start_index; index < start_index + count; index++) {
361 page = xa_load(&mapping->i_pages, index);
362 if (!page || xa_is_value(page)) {
363 page = __page_cache_alloc(readahead_gfp_mask(mapping));
364 if (!page)
365 break;
366 page->index = index;
367 list_add(&page->lru, &pages);
368 nr_pages++;
369 }
370 }
371 blk_start_plug(&plug);
372 ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
373 blk_finish_plug(&plug);
374}
375
c93d8f88 376static struct page *ext4_read_merkle_tree_page(struct inode *inode,
fd39073d
EB
377 pgoff_t index,
378 unsigned long num_ra_pages)
c93d8f88 379{
fd39073d
EB
380 struct page *page;
381
c93d8f88
EB
382 index += ext4_verity_metadata_pos(inode) >> PAGE_SHIFT;
383
fd39073d
EB
384 page = find_get_page_flags(inode->i_mapping, index, FGP_ACCESSED);
385 if (!page || !PageUptodate(page)) {
386 if (page)
387 put_page(page);
388 else if (num_ra_pages > 1)
389 ext4_merkle_tree_readahead(inode->i_mapping, index,
390 num_ra_pages);
391 page = read_mapping_page(inode->i_mapping, index, NULL);
392 }
393 return page;
c93d8f88
EB
394}
395
396static int ext4_write_merkle_tree_block(struct inode *inode, const void *buf,
397 u64 index, int log_blocksize)
398{
399 loff_t pos = ext4_verity_metadata_pos(inode) + (index << log_blocksize);
400
401 return pagecache_write(inode, buf, 1 << log_blocksize, pos);
402}
403
404const struct fsverity_operations ext4_verityops = {
405 .begin_enable_verity = ext4_begin_enable_verity,
406 .end_enable_verity = ext4_end_enable_verity,
407 .get_verity_descriptor = ext4_get_verity_descriptor,
408 .read_merkle_tree_page = ext4_read_merkle_tree_page,
409 .write_merkle_tree_block = ext4_write_merkle_tree_block,
410};