]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/xfs/scrub/attr.c
getxattr: use correct xattr length
[mirror_ubuntu-bionic-kernel.git] / fs / xfs / scrub / attr.c
1 /*
2 * Copyright (C) 2017 Oracle. All Rights Reserved.
3 *
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it would be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20 #include "xfs.h"
21 #include "xfs_fs.h"
22 #include "xfs_shared.h"
23 #include "xfs_format.h"
24 #include "xfs_trans_resv.h"
25 #include "xfs_mount.h"
26 #include "xfs_defer.h"
27 #include "xfs_btree.h"
28 #include "xfs_bit.h"
29 #include "xfs_log_format.h"
30 #include "xfs_trans.h"
31 #include "xfs_sb.h"
32 #include "xfs_inode.h"
33 #include "xfs_da_format.h"
34 #include "xfs_da_btree.h"
35 #include "xfs_dir2.h"
36 #include "xfs_attr.h"
37 #include "xfs_attr_leaf.h"
38 #include "scrub/xfs_scrub.h"
39 #include "scrub/scrub.h"
40 #include "scrub/common.h"
41 #include "scrub/dabtree.h"
42 #include "scrub/trace.h"
43
44 #include <linux/posix_acl_xattr.h>
45 #include <linux/xattr.h>
46
47 /* Set us up to scrub an inode's extended attributes. */
48 int
49 xfs_scrub_setup_xattr(
50 struct xfs_scrub_context *sc,
51 struct xfs_inode *ip)
52 {
53 size_t sz;
54
55 /*
56 * Allocate the buffer without the inode lock held. We need enough
57 * space to read every xattr value in the file or enough space to
58 * hold three copies of the xattr free space bitmap. (Not both at
59 * the same time.)
60 */
61 sz = max_t(size_t, XATTR_SIZE_MAX, 3 * sizeof(long) *
62 BITS_TO_LONGS(sc->mp->m_attr_geo->blksize));
63 sc->buf = kmem_zalloc_large(sz, KM_SLEEP);
64 if (!sc->buf)
65 return -ENOMEM;
66
67 return xfs_scrub_setup_inode_contents(sc, ip, 0);
68 }
69
70 /* Extended Attributes */
71
72 struct xfs_scrub_xattr {
73 struct xfs_attr_list_context context;
74 struct xfs_scrub_context *sc;
75 };
76
77 /*
78 * Check that an extended attribute key can be looked up by hash.
79 *
80 * We use the XFS attribute list iterator (i.e. xfs_attr_list_int_ilocked)
81 * to call this function for every attribute key in an inode. Once
82 * we're here, we load the attribute value to see if any errors happen,
83 * or if we get more or less data than we expected.
84 */
85 static void
86 xfs_scrub_xattr_listent(
87 struct xfs_attr_list_context *context,
88 int flags,
89 unsigned char *name,
90 int namelen,
91 int valuelen)
92 {
93 struct xfs_scrub_xattr *sx;
94 struct xfs_da_args args = { NULL };
95 int error = 0;
96
97 sx = container_of(context, struct xfs_scrub_xattr, context);
98
99 if (flags & XFS_ATTR_INCOMPLETE) {
100 /* Incomplete attr key, just mark the inode for preening. */
101 xfs_scrub_ino_set_preen(sx->sc, context->dp->i_ino, NULL);
102 return;
103 }
104
105 args.flags = ATTR_KERNOTIME;
106 if (flags & XFS_ATTR_ROOT)
107 args.flags |= ATTR_ROOT;
108 else if (flags & XFS_ATTR_SECURE)
109 args.flags |= ATTR_SECURE;
110 args.geo = context->dp->i_mount->m_attr_geo;
111 args.whichfork = XFS_ATTR_FORK;
112 args.dp = context->dp;
113 args.name = name;
114 args.namelen = namelen;
115 args.hashval = xfs_da_hashname(args.name, args.namelen);
116 args.trans = context->tp;
117 args.value = sx->sc->buf;
118 args.valuelen = XATTR_SIZE_MAX;
119
120 error = xfs_attr_get_ilocked(context->dp, &args);
121 if (error == -EEXIST)
122 error = 0;
123 if (!xfs_scrub_fblock_process_error(sx->sc, XFS_ATTR_FORK, args.blkno,
124 &error))
125 goto fail_xref;
126 if (args.valuelen != valuelen)
127 xfs_scrub_fblock_set_corrupt(sx->sc, XFS_ATTR_FORK,
128 args.blkno);
129
130 fail_xref:
131 return;
132 }
133
134 /*
135 * Mark a range [start, start+len) in this map. Returns true if the
136 * region was free, and false if there's a conflict or a problem.
137 *
138 * Within a char, the lowest bit of the char represents the byte with
139 * the smallest address
140 */
141 STATIC bool
142 xfs_scrub_xattr_set_map(
143 struct xfs_scrub_context *sc,
144 unsigned long *map,
145 unsigned int start,
146 unsigned int len)
147 {
148 unsigned int mapsize = sc->mp->m_attr_geo->blksize;
149 bool ret = true;
150
151 if (start >= mapsize)
152 return false;
153 if (start + len > mapsize) {
154 len = mapsize - start;
155 ret = false;
156 }
157
158 if (find_next_bit(map, mapsize, start) < start + len)
159 ret = false;
160 bitmap_set(map, start, len);
161
162 return ret;
163 }
164
165 /*
166 * Check the leaf freemap from the usage bitmap. Returns false if the
167 * attr freemap has problems or points to used space.
168 */
169 STATIC bool
170 xfs_scrub_xattr_check_freemap(
171 struct xfs_scrub_context *sc,
172 unsigned long *map,
173 struct xfs_attr3_icleaf_hdr *leafhdr)
174 {
175 unsigned long *freemap;
176 unsigned long *dstmap;
177 unsigned int mapsize = sc->mp->m_attr_geo->blksize;
178 int i;
179
180 /* Construct bitmap of freemap contents. */
181 freemap = (unsigned long *)sc->buf + BITS_TO_LONGS(mapsize);
182 bitmap_zero(freemap, mapsize);
183 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
184 if (!xfs_scrub_xattr_set_map(sc, freemap,
185 leafhdr->freemap[i].base,
186 leafhdr->freemap[i].size))
187 return false;
188 }
189
190 /* Look for bits that are set in freemap and are marked in use. */
191 dstmap = freemap + BITS_TO_LONGS(mapsize);
192 return bitmap_and(dstmap, freemap, map, mapsize) == 0;
193 }
194
195 /*
196 * Check this leaf entry's relations to everything else.
197 * Returns the number of bytes used for the name/value data.
198 */
199 STATIC void
200 xfs_scrub_xattr_entry(
201 struct xfs_scrub_da_btree *ds,
202 int level,
203 char *buf_end,
204 struct xfs_attr_leafblock *leaf,
205 struct xfs_attr3_icleaf_hdr *leafhdr,
206 unsigned long *usedmap,
207 struct xfs_attr_leaf_entry *ent,
208 int idx,
209 unsigned int *usedbytes,
210 __u32 *last_hashval)
211 {
212 struct xfs_mount *mp = ds->state->mp;
213 char *name_end;
214 struct xfs_attr_leaf_name_local *lentry;
215 struct xfs_attr_leaf_name_remote *rentry;
216 unsigned int nameidx;
217 unsigned int namesize;
218
219 if (ent->pad2 != 0)
220 xfs_scrub_da_set_corrupt(ds, level);
221
222 /* Hash values in order? */
223 if (be32_to_cpu(ent->hashval) < *last_hashval)
224 xfs_scrub_da_set_corrupt(ds, level);
225 *last_hashval = be32_to_cpu(ent->hashval);
226
227 nameidx = be16_to_cpu(ent->nameidx);
228 if (nameidx < leafhdr->firstused ||
229 nameidx >= mp->m_attr_geo->blksize) {
230 xfs_scrub_da_set_corrupt(ds, level);
231 return;
232 }
233
234 /* Check the name information. */
235 if (ent->flags & XFS_ATTR_LOCAL) {
236 lentry = xfs_attr3_leaf_name_local(leaf, idx);
237 namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
238 be16_to_cpu(lentry->valuelen));
239 name_end = (char *)lentry + namesize;
240 if (lentry->namelen == 0)
241 xfs_scrub_da_set_corrupt(ds, level);
242 } else {
243 rentry = xfs_attr3_leaf_name_remote(leaf, idx);
244 namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
245 name_end = (char *)rentry + namesize;
246 if (rentry->namelen == 0 || rentry->valueblk == 0)
247 xfs_scrub_da_set_corrupt(ds, level);
248 }
249 if (name_end > buf_end)
250 xfs_scrub_da_set_corrupt(ds, level);
251
252 if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, nameidx, namesize))
253 xfs_scrub_da_set_corrupt(ds, level);
254 if (!(ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT))
255 *usedbytes += namesize;
256 }
257
258 /* Scrub an attribute leaf. */
259 STATIC int
260 xfs_scrub_xattr_block(
261 struct xfs_scrub_da_btree *ds,
262 int level)
263 {
264 struct xfs_attr3_icleaf_hdr leafhdr;
265 struct xfs_mount *mp = ds->state->mp;
266 struct xfs_da_state_blk *blk = &ds->state->path.blk[level];
267 struct xfs_buf *bp = blk->bp;
268 xfs_dablk_t *last_checked = ds->private;
269 struct xfs_attr_leafblock *leaf = bp->b_addr;
270 struct xfs_attr_leaf_entry *ent;
271 struct xfs_attr_leaf_entry *entries;
272 unsigned long *usedmap = ds->sc->buf;
273 char *buf_end;
274 size_t off;
275 __u32 last_hashval = 0;
276 unsigned int usedbytes = 0;
277 unsigned int hdrsize;
278 int i;
279
280 if (*last_checked == blk->blkno)
281 return 0;
282 *last_checked = blk->blkno;
283 bitmap_zero(usedmap, mp->m_attr_geo->blksize);
284
285 /* Check all the padding. */
286 if (xfs_sb_version_hascrc(&ds->sc->mp->m_sb)) {
287 struct xfs_attr3_leafblock *leaf = bp->b_addr;
288
289 if (leaf->hdr.pad1 != 0 || leaf->hdr.pad2 != 0 ||
290 leaf->hdr.info.hdr.pad != 0)
291 xfs_scrub_da_set_corrupt(ds, level);
292 } else {
293 if (leaf->hdr.pad1 != 0 || leaf->hdr.info.pad != 0)
294 xfs_scrub_da_set_corrupt(ds, level);
295 }
296
297 /* Check the leaf header */
298 xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
299 hdrsize = xfs_attr3_leaf_hdr_size(leaf);
300
301 if (leafhdr.usedbytes > mp->m_attr_geo->blksize)
302 xfs_scrub_da_set_corrupt(ds, level);
303 if (leafhdr.firstused > mp->m_attr_geo->blksize)
304 xfs_scrub_da_set_corrupt(ds, level);
305 if (leafhdr.firstused < hdrsize)
306 xfs_scrub_da_set_corrupt(ds, level);
307 if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, 0, hdrsize))
308 xfs_scrub_da_set_corrupt(ds, level);
309
310 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
311 goto out;
312
313 entries = xfs_attr3_leaf_entryp(leaf);
314 if ((char *)&entries[leafhdr.count] > (char *)leaf + leafhdr.firstused)
315 xfs_scrub_da_set_corrupt(ds, level);
316
317 buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
318 for (i = 0, ent = entries; i < leafhdr.count; ent++, i++) {
319 /* Mark the leaf entry itself. */
320 off = (char *)ent - (char *)leaf;
321 if (!xfs_scrub_xattr_set_map(ds->sc, usedmap, off,
322 sizeof(xfs_attr_leaf_entry_t))) {
323 xfs_scrub_da_set_corrupt(ds, level);
324 goto out;
325 }
326
327 /* Check the entry and nameval. */
328 xfs_scrub_xattr_entry(ds, level, buf_end, leaf, &leafhdr,
329 usedmap, ent, i, &usedbytes, &last_hashval);
330
331 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
332 goto out;
333 }
334
335 if (!xfs_scrub_xattr_check_freemap(ds->sc, usedmap, &leafhdr))
336 xfs_scrub_da_set_corrupt(ds, level);
337
338 if (leafhdr.usedbytes != usedbytes)
339 xfs_scrub_da_set_corrupt(ds, level);
340
341 out:
342 return 0;
343 }
344
345 /* Scrub a attribute btree record. */
346 STATIC int
347 xfs_scrub_xattr_rec(
348 struct xfs_scrub_da_btree *ds,
349 int level,
350 void *rec)
351 {
352 struct xfs_mount *mp = ds->state->mp;
353 struct xfs_attr_leaf_entry *ent = rec;
354 struct xfs_da_state_blk *blk;
355 struct xfs_attr_leaf_name_local *lentry;
356 struct xfs_attr_leaf_name_remote *rentry;
357 struct xfs_buf *bp;
358 xfs_dahash_t calc_hash;
359 xfs_dahash_t hash;
360 int nameidx;
361 int hdrsize;
362 unsigned int badflags;
363 int error;
364
365 blk = &ds->state->path.blk[level];
366
367 /* Check the whole block, if necessary. */
368 error = xfs_scrub_xattr_block(ds, level);
369 if (error)
370 goto out;
371 if (ds->sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
372 goto out;
373
374 /* Check the hash of the entry. */
375 error = xfs_scrub_da_btree_hash(ds, level, &ent->hashval);
376 if (error)
377 goto out;
378
379 /* Find the attr entry's location. */
380 bp = blk->bp;
381 hdrsize = xfs_attr3_leaf_hdr_size(bp->b_addr);
382 nameidx = be16_to_cpu(ent->nameidx);
383 if (nameidx < hdrsize || nameidx >= mp->m_attr_geo->blksize) {
384 xfs_scrub_da_set_corrupt(ds, level);
385 goto out;
386 }
387
388 /* Retrieve the entry and check it. */
389 hash = be32_to_cpu(ent->hashval);
390 badflags = ~(XFS_ATTR_LOCAL | XFS_ATTR_ROOT | XFS_ATTR_SECURE |
391 XFS_ATTR_INCOMPLETE);
392 if ((ent->flags & badflags) != 0)
393 xfs_scrub_da_set_corrupt(ds, level);
394 if (ent->flags & XFS_ATTR_LOCAL) {
395 lentry = (struct xfs_attr_leaf_name_local *)
396 (((char *)bp->b_addr) + nameidx);
397 if (lentry->namelen <= 0) {
398 xfs_scrub_da_set_corrupt(ds, level);
399 goto out;
400 }
401 calc_hash = xfs_da_hashname(lentry->nameval, lentry->namelen);
402 } else {
403 rentry = (struct xfs_attr_leaf_name_remote *)
404 (((char *)bp->b_addr) + nameidx);
405 if (rentry->namelen <= 0) {
406 xfs_scrub_da_set_corrupt(ds, level);
407 goto out;
408 }
409 calc_hash = xfs_da_hashname(rentry->name, rentry->namelen);
410 }
411 if (calc_hash != hash)
412 xfs_scrub_da_set_corrupt(ds, level);
413
414 out:
415 return error;
416 }
417
418 /* Scrub the extended attribute metadata. */
419 int
420 xfs_scrub_xattr(
421 struct xfs_scrub_context *sc)
422 {
423 struct xfs_scrub_xattr sx;
424 struct attrlist_cursor_kern cursor = { 0 };
425 xfs_dablk_t last_checked = -1U;
426 int error = 0;
427
428 if (!xfs_inode_hasattr(sc->ip))
429 return -ENOENT;
430
431 memset(&sx, 0, sizeof(sx));
432 /* Check attribute tree structure */
433 error = xfs_scrub_da_btree(sc, XFS_ATTR_FORK, xfs_scrub_xattr_rec,
434 &last_checked);
435 if (error)
436 goto out;
437
438 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)
439 goto out;
440
441 /* Check that every attr key can also be looked up by hash. */
442 sx.context.dp = sc->ip;
443 sx.context.cursor = &cursor;
444 sx.context.resynch = 1;
445 sx.context.put_listent = xfs_scrub_xattr_listent;
446 sx.context.tp = sc->tp;
447 sx.context.flags = ATTR_INCOMPLETE;
448 sx.sc = sc;
449
450 /*
451 * Look up every xattr in this file by name.
452 *
453 * Use the backend implementation of xfs_attr_list to call
454 * xfs_scrub_xattr_listent on every attribute key in this inode.
455 * In other words, we use the same iterator/callback mechanism
456 * that listattr uses to scrub extended attributes, though in our
457 * _listent function, we check the value of the attribute.
458 *
459 * The VFS only locks i_rwsem when modifying attrs, so keep all
460 * three locks held because that's the only way to ensure we're
461 * the only thread poking into the da btree. We traverse the da
462 * btree while holding a leaf buffer locked for the xattr name
463 * iteration, which doesn't really follow the usual buffer
464 * locking order.
465 */
466 error = xfs_attr_list_int_ilocked(&sx.context);
467 if (!xfs_scrub_fblock_process_error(sc, XFS_ATTR_FORK, 0, &error))
468 goto out;
469 out:
470 return error;
471 }