]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - fs/ext4/xattr.c
cifs: release auth_key.response for reconnect.
[mirror_ubuntu-bionic-kernel.git] / fs / ext4 / xattr.c
1 /*
2 * linux/fs/ext4/xattr.c
3 *
4 * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
5 *
6 * Fix by Harrison Xing <harrison@mountainviewdata.com>.
7 * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
8 * Extended attributes for symlinks and special files added per
9 * suggestion of Luka Renko <luka.renko@hermes.si>.
10 * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
11 * Red Hat Inc.
12 * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
13 * and Andreas Gruenbacher <agruen@suse.de>.
14 */
15
16 /*
17 * Extended attributes are stored directly in inodes (on file systems with
18 * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
19 * field contains the block number if an inode uses an additional block. All
20 * attributes must fit in the inode and one additional block. Blocks that
21 * contain the identical set of attributes may be shared among several inodes.
22 * Identical blocks are detected by keeping a cache of blocks that have
23 * recently been accessed.
24 *
25 * The attributes in inodes and on blocks have a different header; the entries
26 * are stored in the same format:
27 *
28 * +------------------+
29 * | header |
30 * | entry 1 | |
31 * | entry 2 | | growing downwards
32 * | entry 3 | v
33 * | four null bytes |
34 * | . . . |
35 * | value 1 | ^
36 * | value 3 | | growing upwards
37 * | value 2 | |
38 * +------------------+
39 *
40 * The header is followed by multiple entry descriptors. In disk blocks, the
41 * entry descriptors are kept sorted. In inodes, they are unsorted. The
42 * attribute values are aligned to the end of the block in no specific order.
43 *
44 * Locking strategy
45 * ----------------
46 * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
47 * EA blocks are only changed if they are exclusive to an inode, so
48 * holding xattr_sem also means that nothing but the EA block's reference
49 * count can change. Multiple writers to the same block are synchronized
50 * by the buffer lock.
51 */
52
53 #include <linux/init.h>
54 #include <linux/fs.h>
55 #include <linux/slab.h>
56 #include <linux/mbcache.h>
57 #include <linux/quotaops.h>
58 #include "ext4_jbd2.h"
59 #include "ext4.h"
60 #include "xattr.h"
61 #include "acl.h"
62
63 #ifdef EXT4_XATTR_DEBUG
64 # define ea_idebug(inode, fmt, ...) \
65 printk(KERN_DEBUG "inode %s:%lu: " fmt "\n", \
66 inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
67 # define ea_bdebug(bh, fmt, ...) \
68 printk(KERN_DEBUG "block %pg:%lu: " fmt "\n", \
69 bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
70 #else
71 # define ea_idebug(inode, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
72 # define ea_bdebug(bh, fmt, ...) no_printk(fmt, ##__VA_ARGS__)
73 #endif
74
75 static void ext4_xattr_block_cache_insert(struct mb_cache *,
76 struct buffer_head *);
77 static struct buffer_head *
78 ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
79 struct mb_cache_entry **);
80 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
81 size_t value_count);
82 static void ext4_xattr_rehash(struct ext4_xattr_header *);
83
84 static const struct xattr_handler * const ext4_xattr_handler_map[] = {
85 [EXT4_XATTR_INDEX_USER] = &ext4_xattr_user_handler,
86 #ifdef CONFIG_EXT4_FS_POSIX_ACL
87 [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
88 [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
89 #endif
90 [EXT4_XATTR_INDEX_TRUSTED] = &ext4_xattr_trusted_handler,
91 #ifdef CONFIG_EXT4_FS_SECURITY
92 [EXT4_XATTR_INDEX_SECURITY] = &ext4_xattr_security_handler,
93 #endif
94 };
95
96 const struct xattr_handler *ext4_xattr_handlers[] = {
97 &ext4_xattr_user_handler,
98 &ext4_xattr_trusted_handler,
99 #ifdef CONFIG_EXT4_FS_POSIX_ACL
100 &posix_acl_access_xattr_handler,
101 &posix_acl_default_xattr_handler,
102 #endif
103 #ifdef CONFIG_EXT4_FS_SECURITY
104 &ext4_xattr_security_handler,
105 #endif
106 NULL
107 };
108
109 #define EA_BLOCK_CACHE(inode) (((struct ext4_sb_info *) \
110 inode->i_sb->s_fs_info)->s_ea_block_cache)
111
112 #define EA_INODE_CACHE(inode) (((struct ext4_sb_info *) \
113 inode->i_sb->s_fs_info)->s_ea_inode_cache)
114
115 static int
116 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
117 struct inode *inode);
118
119 #ifdef CONFIG_LOCKDEP
120 void ext4_xattr_inode_set_class(struct inode *ea_inode)
121 {
122 lockdep_set_subclass(&ea_inode->i_rwsem, 1);
123 }
124 #endif
125
126 static __le32 ext4_xattr_block_csum(struct inode *inode,
127 sector_t block_nr,
128 struct ext4_xattr_header *hdr)
129 {
130 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
131 __u32 csum;
132 __le64 dsk_block_nr = cpu_to_le64(block_nr);
133 __u32 dummy_csum = 0;
134 int offset = offsetof(struct ext4_xattr_header, h_checksum);
135
136 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
137 sizeof(dsk_block_nr));
138 csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
139 csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
140 offset += sizeof(dummy_csum);
141 csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
142 EXT4_BLOCK_SIZE(inode->i_sb) - offset);
143
144 return cpu_to_le32(csum);
145 }
146
147 static int ext4_xattr_block_csum_verify(struct inode *inode,
148 struct buffer_head *bh)
149 {
150 struct ext4_xattr_header *hdr = BHDR(bh);
151 int ret = 1;
152
153 if (ext4_has_metadata_csum(inode->i_sb)) {
154 lock_buffer(bh);
155 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
156 bh->b_blocknr, hdr));
157 unlock_buffer(bh);
158 }
159 return ret;
160 }
161
162 static void ext4_xattr_block_csum_set(struct inode *inode,
163 struct buffer_head *bh)
164 {
165 if (ext4_has_metadata_csum(inode->i_sb))
166 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
167 bh->b_blocknr, BHDR(bh));
168 }
169
170 static inline const struct xattr_handler *
171 ext4_xattr_handler(int name_index)
172 {
173 const struct xattr_handler *handler = NULL;
174
175 if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
176 handler = ext4_xattr_handler_map[name_index];
177 return handler;
178 }
179
180 static int
181 ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
182 void *value_start)
183 {
184 struct ext4_xattr_entry *e = entry;
185
186 /* Find the end of the names list */
187 while (!IS_LAST_ENTRY(e)) {
188 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
189 if ((void *)next >= end)
190 return -EFSCORRUPTED;
191 e = next;
192 }
193
194 /* Check the values */
195 while (!IS_LAST_ENTRY(entry)) {
196 if (entry->e_value_size != 0 &&
197 entry->e_value_inum == 0) {
198 u16 offs = le16_to_cpu(entry->e_value_offs);
199 u32 size = le32_to_cpu(entry->e_value_size);
200 void *value;
201
202 /*
203 * The value cannot overlap the names, and the value
204 * with padding cannot extend beyond 'end'. Check both
205 * the padded and unpadded sizes, since the size may
206 * overflow to 0 when adding padding.
207 */
208 if (offs > end - value_start)
209 return -EFSCORRUPTED;
210 value = value_start + offs;
211 if (value < (void *)e + sizeof(u32) ||
212 size > end - value ||
213 EXT4_XATTR_SIZE(size) > end - value)
214 return -EFSCORRUPTED;
215 }
216 entry = EXT4_XATTR_NEXT(entry);
217 }
218
219 return 0;
220 }
221
222 static inline int
223 ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
224 {
225 int error;
226
227 if (buffer_verified(bh))
228 return 0;
229
230 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
231 BHDR(bh)->h_blocks != cpu_to_le32(1))
232 return -EFSCORRUPTED;
233 if (!ext4_xattr_block_csum_verify(inode, bh))
234 return -EFSBADCRC;
235 error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
236 bh->b_data);
237 if (!error)
238 set_buffer_verified(bh);
239 return error;
240 }
241
242 static int
243 __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
244 void *end, const char *function, unsigned int line)
245 {
246 int error = -EFSCORRUPTED;
247
248 if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
249 (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
250 goto errout;
251 error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
252 errout:
253 if (error)
254 __ext4_error_inode(inode, function, line, 0,
255 "corrupted in-inode xattr");
256 return error;
257 }
258
259 #define xattr_check_inode(inode, header, end) \
260 __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
261
262 static int
263 ext4_xattr_find_entry(struct ext4_xattr_entry **pentry, int name_index,
264 const char *name, int sorted)
265 {
266 struct ext4_xattr_entry *entry;
267 size_t name_len;
268 int cmp = 1;
269
270 if (name == NULL)
271 return -EINVAL;
272 name_len = strlen(name);
273 entry = *pentry;
274 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
275 cmp = name_index - entry->e_name_index;
276 if (!cmp)
277 cmp = name_len - entry->e_name_len;
278 if (!cmp)
279 cmp = memcmp(name, entry->e_name, name_len);
280 if (cmp <= 0 && (sorted || cmp == 0))
281 break;
282 }
283 *pentry = entry;
284 return cmp ? -ENODATA : 0;
285 }
286
287 static u32
288 ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
289 {
290 return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
291 }
292
293 static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
294 {
295 return ((u64)ea_inode->i_ctime.tv_sec << 32) |
296 ((u32)ea_inode->i_version);
297 }
298
299 static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
300 {
301 ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
302 ea_inode->i_version = (u32)ref_count;
303 }
304
305 static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
306 {
307 return (u32)ea_inode->i_atime.tv_sec;
308 }
309
310 static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
311 {
312 ea_inode->i_atime.tv_sec = hash;
313 }
314
315 /*
316 * Read the EA value from an inode.
317 */
318 static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
319 {
320 int blocksize = 1 << ea_inode->i_blkbits;
321 int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits;
322 int tail_size = (size % blocksize) ?: blocksize;
323 struct buffer_head *bhs_inline[8];
324 struct buffer_head **bhs = bhs_inline;
325 int i, ret;
326
327 if (bh_count > ARRAY_SIZE(bhs_inline)) {
328 bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS);
329 if (!bhs)
330 return -ENOMEM;
331 }
332
333 ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count,
334 true /* wait */, bhs);
335 if (ret)
336 goto free_bhs;
337
338 for (i = 0; i < bh_count; i++) {
339 /* There shouldn't be any holes in ea_inode. */
340 if (!bhs[i]) {
341 ret = -EFSCORRUPTED;
342 goto put_bhs;
343 }
344 memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
345 i < bh_count - 1 ? blocksize : tail_size);
346 }
347 ret = 0;
348 put_bhs:
349 for (i = 0; i < bh_count; i++)
350 brelse(bhs[i]);
351 free_bhs:
352 if (bhs != bhs_inline)
353 kfree(bhs);
354 return ret;
355 }
356
357 #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
358
359 static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
360 u32 ea_inode_hash, struct inode **ea_inode)
361 {
362 struct inode *inode;
363 int err;
364
365 inode = ext4_iget(parent->i_sb, ea_ino);
366 if (IS_ERR(inode)) {
367 err = PTR_ERR(inode);
368 ext4_error(parent->i_sb,
369 "error while reading EA inode %lu err=%d", ea_ino,
370 err);
371 return err;
372 }
373
374 if (is_bad_inode(inode)) {
375 ext4_error(parent->i_sb,
376 "error while reading EA inode %lu is_bad_inode",
377 ea_ino);
378 err = -EIO;
379 goto error;
380 }
381
382 if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
383 ext4_error(parent->i_sb,
384 "EA inode %lu does not have EXT4_EA_INODE_FL flag",
385 ea_ino);
386 err = -EINVAL;
387 goto error;
388 }
389
390 ext4_xattr_inode_set_class(inode);
391
392 /*
393 * Check whether this is an old Lustre-style xattr inode. Lustre
394 * implementation does not have hash validation, rather it has a
395 * backpointer from ea_inode to the parent inode.
396 */
397 if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
398 EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
399 inode->i_generation == parent->i_generation) {
400 ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
401 ext4_xattr_inode_set_ref(inode, 1);
402 } else {
403 inode_lock(inode);
404 inode->i_flags |= S_NOQUOTA;
405 inode_unlock(inode);
406 }
407
408 *ea_inode = inode;
409 return 0;
410 error:
411 iput(inode);
412 return err;
413 }
414
415 static int
416 ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
417 struct ext4_xattr_entry *entry, void *buffer,
418 size_t size)
419 {
420 u32 hash;
421
422 /* Verify stored hash matches calculated hash. */
423 hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
424 if (hash != ext4_xattr_inode_get_hash(ea_inode))
425 return -EFSCORRUPTED;
426
427 if (entry) {
428 __le32 e_hash, tmp_data;
429
430 /* Verify entry hash. */
431 tmp_data = cpu_to_le32(hash);
432 e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
433 &tmp_data, 1);
434 if (e_hash != entry->e_hash)
435 return -EFSCORRUPTED;
436 }
437 return 0;
438 }
439
440 /*
441 * Read xattr value from the EA inode.
442 */
443 static int
444 ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
445 void *buffer, size_t size)
446 {
447 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
448 struct inode *ea_inode;
449 int err;
450
451 err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
452 le32_to_cpu(entry->e_hash), &ea_inode);
453 if (err) {
454 ea_inode = NULL;
455 goto out;
456 }
457
458 if (i_size_read(ea_inode) != size) {
459 ext4_warning_inode(ea_inode,
460 "ea_inode file size=%llu entry size=%zu",
461 i_size_read(ea_inode), size);
462 err = -EFSCORRUPTED;
463 goto out;
464 }
465
466 err = ext4_xattr_inode_read(ea_inode, buffer, size);
467 if (err)
468 goto out;
469
470 if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) {
471 err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer,
472 size);
473 if (err) {
474 ext4_warning_inode(ea_inode,
475 "EA inode hash validation failed");
476 goto out;
477 }
478
479 if (ea_inode_cache)
480 mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
481 ext4_xattr_inode_get_hash(ea_inode),
482 ea_inode->i_ino, true /* reusable */);
483 }
484 out:
485 iput(ea_inode);
486 return err;
487 }
488
489 static int
490 ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
491 void *buffer, size_t buffer_size)
492 {
493 struct buffer_head *bh = NULL;
494 struct ext4_xattr_entry *entry;
495 size_t size;
496 int error;
497 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
498
499 ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
500 name_index, name, buffer, (long)buffer_size);
501
502 error = -ENODATA;
503 if (!EXT4_I(inode)->i_file_acl)
504 goto cleanup;
505 ea_idebug(inode, "reading block %llu",
506 (unsigned long long)EXT4_I(inode)->i_file_acl);
507 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
508 if (!bh)
509 goto cleanup;
510 ea_bdebug(bh, "b_count=%d, refcount=%d",
511 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
512 if (ext4_xattr_check_block(inode, bh)) {
513 EXT4_ERROR_INODE(inode, "bad block %llu",
514 EXT4_I(inode)->i_file_acl);
515 error = -EFSCORRUPTED;
516 goto cleanup;
517 }
518 ext4_xattr_block_cache_insert(ea_block_cache, bh);
519 entry = BFIRST(bh);
520 error = ext4_xattr_find_entry(&entry, name_index, name, 1);
521 if (error)
522 goto cleanup;
523 size = le32_to_cpu(entry->e_value_size);
524 if (buffer) {
525 error = -ERANGE;
526 if (size > buffer_size)
527 goto cleanup;
528 if (entry->e_value_inum) {
529 error = ext4_xattr_inode_get(inode, entry, buffer,
530 size);
531 if (error)
532 goto cleanup;
533 } else {
534 memcpy(buffer, bh->b_data +
535 le16_to_cpu(entry->e_value_offs), size);
536 }
537 }
538 error = size;
539
540 cleanup:
541 brelse(bh);
542 return error;
543 }
544
545 int
546 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
547 void *buffer, size_t buffer_size)
548 {
549 struct ext4_xattr_ibody_header *header;
550 struct ext4_xattr_entry *entry;
551 struct ext4_inode *raw_inode;
552 struct ext4_iloc iloc;
553 size_t size;
554 void *end;
555 int error;
556
557 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
558 return -ENODATA;
559 error = ext4_get_inode_loc(inode, &iloc);
560 if (error)
561 return error;
562 raw_inode = ext4_raw_inode(&iloc);
563 header = IHDR(inode, raw_inode);
564 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
565 error = xattr_check_inode(inode, header, end);
566 if (error)
567 goto cleanup;
568 entry = IFIRST(header);
569 error = ext4_xattr_find_entry(&entry, name_index, name, 0);
570 if (error)
571 goto cleanup;
572 size = le32_to_cpu(entry->e_value_size);
573 if (buffer) {
574 error = -ERANGE;
575 if (size > buffer_size)
576 goto cleanup;
577 if (entry->e_value_inum) {
578 error = ext4_xattr_inode_get(inode, entry, buffer,
579 size);
580 if (error)
581 goto cleanup;
582 } else {
583 memcpy(buffer, (void *)IFIRST(header) +
584 le16_to_cpu(entry->e_value_offs), size);
585 }
586 }
587 error = size;
588
589 cleanup:
590 brelse(iloc.bh);
591 return error;
592 }
593
594 /*
595 * ext4_xattr_get()
596 *
597 * Copy an extended attribute into the buffer
598 * provided, or compute the buffer size required.
599 * Buffer is NULL to compute the size of the buffer required.
600 *
601 * Returns a negative error number on failure, or the number of bytes
602 * used / required on success.
603 */
604 int
605 ext4_xattr_get(struct inode *inode, int name_index, const char *name,
606 void *buffer, size_t buffer_size)
607 {
608 int error;
609
610 if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
611 return -EIO;
612
613 if (strlen(name) > 255)
614 return -ERANGE;
615
616 down_read(&EXT4_I(inode)->xattr_sem);
617 error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
618 buffer_size);
619 if (error == -ENODATA)
620 error = ext4_xattr_block_get(inode, name_index, name, buffer,
621 buffer_size);
622 up_read(&EXT4_I(inode)->xattr_sem);
623 return error;
624 }
625
626 static int
627 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
628 char *buffer, size_t buffer_size)
629 {
630 size_t rest = buffer_size;
631
632 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
633 const struct xattr_handler *handler =
634 ext4_xattr_handler(entry->e_name_index);
635
636 if (handler && (!handler->list || handler->list(dentry))) {
637 const char *prefix = handler->prefix ?: handler->name;
638 size_t prefix_len = strlen(prefix);
639 size_t size = prefix_len + entry->e_name_len + 1;
640
641 if (buffer) {
642 if (size > rest)
643 return -ERANGE;
644 memcpy(buffer, prefix, prefix_len);
645 buffer += prefix_len;
646 memcpy(buffer, entry->e_name, entry->e_name_len);
647 buffer += entry->e_name_len;
648 *buffer++ = 0;
649 }
650 rest -= size;
651 }
652 }
653 return buffer_size - rest; /* total size */
654 }
655
656 static int
657 ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
658 {
659 struct inode *inode = d_inode(dentry);
660 struct buffer_head *bh = NULL;
661 int error;
662
663 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
664 buffer, (long)buffer_size);
665
666 error = 0;
667 if (!EXT4_I(inode)->i_file_acl)
668 goto cleanup;
669 ea_idebug(inode, "reading block %llu",
670 (unsigned long long)EXT4_I(inode)->i_file_acl);
671 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
672 error = -EIO;
673 if (!bh)
674 goto cleanup;
675 ea_bdebug(bh, "b_count=%d, refcount=%d",
676 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
677 if (ext4_xattr_check_block(inode, bh)) {
678 EXT4_ERROR_INODE(inode, "bad block %llu",
679 EXT4_I(inode)->i_file_acl);
680 error = -EFSCORRUPTED;
681 goto cleanup;
682 }
683 ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
684 error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer, buffer_size);
685
686 cleanup:
687 brelse(bh);
688
689 return error;
690 }
691
692 static int
693 ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
694 {
695 struct inode *inode = d_inode(dentry);
696 struct ext4_xattr_ibody_header *header;
697 struct ext4_inode *raw_inode;
698 struct ext4_iloc iloc;
699 void *end;
700 int error;
701
702 if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
703 return 0;
704 error = ext4_get_inode_loc(inode, &iloc);
705 if (error)
706 return error;
707 raw_inode = ext4_raw_inode(&iloc);
708 header = IHDR(inode, raw_inode);
709 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
710 error = xattr_check_inode(inode, header, end);
711 if (error)
712 goto cleanup;
713 error = ext4_xattr_list_entries(dentry, IFIRST(header),
714 buffer, buffer_size);
715
716 cleanup:
717 brelse(iloc.bh);
718 return error;
719 }
720
721 /*
722 * Inode operation listxattr()
723 *
724 * d_inode(dentry)->i_rwsem: don't care
725 *
726 * Copy a list of attribute names into the buffer
727 * provided, or compute the buffer size required.
728 * Buffer is NULL to compute the size of the buffer required.
729 *
730 * Returns a negative error number on failure, or the number of bytes
731 * used / required on success.
732 */
733 ssize_t
734 ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
735 {
736 int ret, ret2;
737
738 down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
739 ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
740 if (ret < 0)
741 goto errout;
742 if (buffer) {
743 buffer += ret;
744 buffer_size -= ret;
745 }
746 ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
747 if (ret < 0)
748 goto errout;
749 ret += ret2;
750 errout:
751 up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
752 return ret;
753 }
754
755 /*
756 * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
757 * not set, set it.
758 */
759 static void ext4_xattr_update_super_block(handle_t *handle,
760 struct super_block *sb)
761 {
762 if (ext4_has_feature_xattr(sb))
763 return;
764
765 BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
766 if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
767 ext4_set_feature_xattr(sb);
768 ext4_handle_dirty_super(handle, sb);
769 }
770 }
771
772 int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
773 {
774 struct ext4_iloc iloc = { .bh = NULL };
775 struct buffer_head *bh = NULL;
776 struct ext4_inode *raw_inode;
777 struct ext4_xattr_ibody_header *header;
778 struct ext4_xattr_entry *entry;
779 qsize_t ea_inode_refs = 0;
780 void *end;
781 int ret;
782
783 lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
784
785 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
786 ret = ext4_get_inode_loc(inode, &iloc);
787 if (ret)
788 goto out;
789 raw_inode = ext4_raw_inode(&iloc);
790 header = IHDR(inode, raw_inode);
791 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
792 ret = xattr_check_inode(inode, header, end);
793 if (ret)
794 goto out;
795
796 for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
797 entry = EXT4_XATTR_NEXT(entry))
798 if (entry->e_value_inum)
799 ea_inode_refs++;
800 }
801
802 if (EXT4_I(inode)->i_file_acl) {
803 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
804 if (!bh) {
805 ret = -EIO;
806 goto out;
807 }
808
809 if (ext4_xattr_check_block(inode, bh)) {
810 ret = -EFSCORRUPTED;
811 goto out;
812 }
813
814 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
815 entry = EXT4_XATTR_NEXT(entry))
816 if (entry->e_value_inum)
817 ea_inode_refs++;
818 }
819 *usage = ea_inode_refs + 1;
820 ret = 0;
821 out:
822 brelse(iloc.bh);
823 brelse(bh);
824 return ret;
825 }
826
827 static inline size_t round_up_cluster(struct inode *inode, size_t length)
828 {
829 struct super_block *sb = inode->i_sb;
830 size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
831 inode->i_blkbits);
832 size_t mask = ~(cluster_size - 1);
833
834 return (length + cluster_size - 1) & mask;
835 }
836
837 static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
838 {
839 int err;
840
841 err = dquot_alloc_inode(inode);
842 if (err)
843 return err;
844 err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
845 if (err)
846 dquot_free_inode(inode);
847 return err;
848 }
849
850 static void ext4_xattr_inode_free_quota(struct inode *parent,
851 struct inode *ea_inode,
852 size_t len)
853 {
854 if (ea_inode &&
855 ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE))
856 return;
857 dquot_free_space_nodirty(parent, round_up_cluster(parent, len));
858 dquot_free_inode(parent);
859 }
860
861 int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
862 struct buffer_head *block_bh, size_t value_len,
863 bool is_create)
864 {
865 int credits;
866 int blocks;
867
868 /*
869 * 1) Owner inode update
870 * 2) Ref count update on old xattr block
871 * 3) new xattr block
872 * 4) block bitmap update for new xattr block
873 * 5) group descriptor for new xattr block
874 * 6) block bitmap update for old xattr block
875 * 7) group descriptor for old block
876 *
877 * 6 & 7 can happen if we have two racing threads T_a and T_b
878 * which are each trying to set an xattr on inodes I_a and I_b
879 * which were both initially sharing an xattr block.
880 */
881 credits = 7;
882
883 /* Quota updates. */
884 credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
885
886 /*
887 * In case of inline data, we may push out the data to a block,
888 * so we need to reserve credits for this eventuality
889 */
890 if (inode && ext4_has_inline_data(inode))
891 credits += ext4_writepage_trans_blocks(inode) + 1;
892
893 /* We are done if ea_inode feature is not enabled. */
894 if (!ext4_has_feature_ea_inode(sb))
895 return credits;
896
897 /* New ea_inode, inode map, block bitmap, group descriptor. */
898 credits += 4;
899
900 /* Data blocks. */
901 blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
902
903 /* Indirection block or one level of extent tree. */
904 blocks += 1;
905
906 /* Block bitmap and group descriptor updates for each block. */
907 credits += blocks * 2;
908
909 /* Blocks themselves. */
910 credits += blocks;
911
912 if (!is_create) {
913 /* Dereference ea_inode holding old xattr value.
914 * Old ea_inode, inode map, block bitmap, group descriptor.
915 */
916 credits += 4;
917
918 /* Data blocks for old ea_inode. */
919 blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
920
921 /* Indirection block or one level of extent tree for old
922 * ea_inode.
923 */
924 blocks += 1;
925
926 /* Block bitmap and group descriptor updates for each block. */
927 credits += blocks * 2;
928 }
929
930 /* We may need to clone the existing xattr block in which case we need
931 * to increment ref counts for existing ea_inodes referenced by it.
932 */
933 if (block_bh) {
934 struct ext4_xattr_entry *entry = BFIRST(block_bh);
935
936 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
937 if (entry->e_value_inum)
938 /* Ref count update on ea_inode. */
939 credits += 1;
940 }
941 return credits;
942 }
943
944 static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
945 int credits, struct buffer_head *bh,
946 bool dirty, bool block_csum)
947 {
948 int error;
949
950 if (!ext4_handle_valid(handle))
951 return 0;
952
953 if (handle->h_buffer_credits >= credits)
954 return 0;
955
956 error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
957 if (!error)
958 return 0;
959 if (error < 0) {
960 ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
961 return error;
962 }
963
964 if (bh && dirty) {
965 if (block_csum)
966 ext4_xattr_block_csum_set(inode, bh);
967 error = ext4_handle_dirty_metadata(handle, NULL, bh);
968 if (error) {
969 ext4_warning(inode->i_sb, "Handle metadata (error %d)",
970 error);
971 return error;
972 }
973 }
974
975 error = ext4_journal_restart(handle, credits);
976 if (error) {
977 ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
978 return error;
979 }
980
981 if (bh) {
982 error = ext4_journal_get_write_access(handle, bh);
983 if (error) {
984 ext4_warning(inode->i_sb,
985 "Get write access failed (error %d)",
986 error);
987 return error;
988 }
989 }
990 return 0;
991 }
992
993 static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
994 int ref_change)
995 {
996 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
997 struct ext4_iloc iloc;
998 s64 ref_count;
999 u32 hash;
1000 int ret;
1001
1002 inode_lock(ea_inode);
1003
1004 ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
1005 if (ret) {
1006 iloc.bh = NULL;
1007 goto out;
1008 }
1009
1010 ref_count = ext4_xattr_inode_get_ref(ea_inode);
1011 ref_count += ref_change;
1012 ext4_xattr_inode_set_ref(ea_inode, ref_count);
1013
1014 if (ref_change > 0) {
1015 WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
1016 ea_inode->i_ino, ref_count);
1017
1018 if (ref_count == 1) {
1019 WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
1020 ea_inode->i_ino, ea_inode->i_nlink);
1021
1022 set_nlink(ea_inode, 1);
1023 ext4_orphan_del(handle, ea_inode);
1024
1025 if (ea_inode_cache) {
1026 hash = ext4_xattr_inode_get_hash(ea_inode);
1027 mb_cache_entry_create(ea_inode_cache,
1028 GFP_NOFS, hash,
1029 ea_inode->i_ino,
1030 true /* reusable */);
1031 }
1032 }
1033 } else {
1034 WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
1035 ea_inode->i_ino, ref_count);
1036
1037 if (ref_count == 0) {
1038 WARN_ONCE(ea_inode->i_nlink != 1,
1039 "EA inode %lu i_nlink=%u",
1040 ea_inode->i_ino, ea_inode->i_nlink);
1041
1042 clear_nlink(ea_inode);
1043 ext4_orphan_add(handle, ea_inode);
1044
1045 if (ea_inode_cache) {
1046 hash = ext4_xattr_inode_get_hash(ea_inode);
1047 mb_cache_entry_delete(ea_inode_cache, hash,
1048 ea_inode->i_ino);
1049 }
1050 }
1051 }
1052
1053 ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
1054 iloc.bh = NULL;
1055 if (ret)
1056 ext4_warning_inode(ea_inode,
1057 "ext4_mark_iloc_dirty() failed ret=%d", ret);
1058 out:
1059 brelse(iloc.bh);
1060 inode_unlock(ea_inode);
1061 return ret;
1062 }
1063
1064 static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
1065 {
1066 return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
1067 }
1068
1069 static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
1070 {
1071 return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
1072 }
1073
1074 static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
1075 struct ext4_xattr_entry *first)
1076 {
1077 struct inode *ea_inode;
1078 struct ext4_xattr_entry *entry;
1079 struct ext4_xattr_entry *failed_entry;
1080 unsigned int ea_ino;
1081 int err, saved_err;
1082
1083 for (entry = first; !IS_LAST_ENTRY(entry);
1084 entry = EXT4_XATTR_NEXT(entry)) {
1085 if (!entry->e_value_inum)
1086 continue;
1087 ea_ino = le32_to_cpu(entry->e_value_inum);
1088 err = ext4_xattr_inode_iget(parent, ea_ino,
1089 le32_to_cpu(entry->e_hash),
1090 &ea_inode);
1091 if (err)
1092 goto cleanup;
1093 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1094 if (err) {
1095 ext4_warning_inode(ea_inode, "inc ref error %d", err);
1096 iput(ea_inode);
1097 goto cleanup;
1098 }
1099 iput(ea_inode);
1100 }
1101 return 0;
1102
1103 cleanup:
1104 saved_err = err;
1105 failed_entry = entry;
1106
1107 for (entry = first; entry != failed_entry;
1108 entry = EXT4_XATTR_NEXT(entry)) {
1109 if (!entry->e_value_inum)
1110 continue;
1111 ea_ino = le32_to_cpu(entry->e_value_inum);
1112 err = ext4_xattr_inode_iget(parent, ea_ino,
1113 le32_to_cpu(entry->e_hash),
1114 &ea_inode);
1115 if (err) {
1116 ext4_warning(parent->i_sb,
1117 "cleanup ea_ino %u iget error %d", ea_ino,
1118 err);
1119 continue;
1120 }
1121 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1122 if (err)
1123 ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
1124 err);
1125 iput(ea_inode);
1126 }
1127 return saved_err;
1128 }
1129
1130 static void
1131 ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
1132 struct buffer_head *bh,
1133 struct ext4_xattr_entry *first, bool block_csum,
1134 struct ext4_xattr_inode_array **ea_inode_array,
1135 int extra_credits, bool skip_quota)
1136 {
1137 struct inode *ea_inode;
1138 struct ext4_xattr_entry *entry;
1139 bool dirty = false;
1140 unsigned int ea_ino;
1141 int err;
1142 int credits;
1143
1144 /* One credit for dec ref on ea_inode, one for orphan list addition, */
1145 credits = 2 + extra_credits;
1146
1147 for (entry = first; !IS_LAST_ENTRY(entry);
1148 entry = EXT4_XATTR_NEXT(entry)) {
1149 if (!entry->e_value_inum)
1150 continue;
1151 ea_ino = le32_to_cpu(entry->e_value_inum);
1152 err = ext4_xattr_inode_iget(parent, ea_ino,
1153 le32_to_cpu(entry->e_hash),
1154 &ea_inode);
1155 if (err)
1156 continue;
1157
1158 err = ext4_expand_inode_array(ea_inode_array, ea_inode);
1159 if (err) {
1160 ext4_warning_inode(ea_inode,
1161 "Expand inode array err=%d", err);
1162 iput(ea_inode);
1163 continue;
1164 }
1165
1166 err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
1167 dirty, block_csum);
1168 if (err) {
1169 ext4_warning_inode(ea_inode, "Ensure credits err=%d",
1170 err);
1171 continue;
1172 }
1173
1174 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1175 if (err) {
1176 ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
1177 err);
1178 continue;
1179 }
1180
1181 if (!skip_quota)
1182 ext4_xattr_inode_free_quota(parent, ea_inode,
1183 le32_to_cpu(entry->e_value_size));
1184
1185 /*
1186 * Forget about ea_inode within the same transaction that
1187 * decrements the ref count. This avoids duplicate decrements in
1188 * case the rest of the work spills over to subsequent
1189 * transactions.
1190 */
1191 entry->e_value_inum = 0;
1192 entry->e_value_size = 0;
1193
1194 dirty = true;
1195 }
1196
1197 if (dirty) {
1198 /*
1199 * Note that we are deliberately skipping csum calculation for
1200 * the final update because we do not expect any journal
1201 * restarts until xattr block is freed.
1202 */
1203
1204 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1205 if (err)
1206 ext4_warning_inode(parent,
1207 "handle dirty metadata err=%d", err);
1208 }
1209 }
1210
1211 /*
1212 * Release the xattr block BH: If the reference count is > 1, decrement it;
1213 * otherwise free the block.
1214 */
1215 static void
1216 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
1217 struct buffer_head *bh,
1218 struct ext4_xattr_inode_array **ea_inode_array,
1219 int extra_credits)
1220 {
1221 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1222 u32 hash, ref;
1223 int error = 0;
1224
1225 BUFFER_TRACE(bh, "get_write_access");
1226 error = ext4_journal_get_write_access(handle, bh);
1227 if (error)
1228 goto out;
1229
1230 lock_buffer(bh);
1231 hash = le32_to_cpu(BHDR(bh)->h_hash);
1232 ref = le32_to_cpu(BHDR(bh)->h_refcount);
1233 if (ref == 1) {
1234 ea_bdebug(bh, "refcount now=0; freeing");
1235 /*
1236 * This must happen under buffer lock for
1237 * ext4_xattr_block_set() to reliably detect freed block
1238 */
1239 if (ea_block_cache)
1240 mb_cache_entry_delete(ea_block_cache, hash,
1241 bh->b_blocknr);
1242 get_bh(bh);
1243 unlock_buffer(bh);
1244
1245 if (ext4_has_feature_ea_inode(inode->i_sb))
1246 ext4_xattr_inode_dec_ref_all(handle, inode, bh,
1247 BFIRST(bh),
1248 true /* block_csum */,
1249 ea_inode_array,
1250 extra_credits,
1251 true /* skip_quota */);
1252 ext4_free_blocks(handle, inode, bh, 0, 1,
1253 EXT4_FREE_BLOCKS_METADATA |
1254 EXT4_FREE_BLOCKS_FORGET);
1255 } else {
1256 ref--;
1257 BHDR(bh)->h_refcount = cpu_to_le32(ref);
1258 if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
1259 struct mb_cache_entry *ce;
1260
1261 if (ea_block_cache) {
1262 ce = mb_cache_entry_get(ea_block_cache, hash,
1263 bh->b_blocknr);
1264 if (ce) {
1265 ce->e_reusable = 1;
1266 mb_cache_entry_put(ea_block_cache, ce);
1267 }
1268 }
1269 }
1270
1271 ext4_xattr_block_csum_set(inode, bh);
1272 /*
1273 * Beware of this ugliness: Releasing of xattr block references
1274 * from different inodes can race and so we have to protect
1275 * from a race where someone else frees the block (and releases
1276 * its journal_head) before we are done dirtying the buffer. In
1277 * nojournal mode this race is harmless and we actually cannot
1278 * call ext4_handle_dirty_metadata() with locked buffer as
1279 * that function can call sync_dirty_buffer() so for that case
1280 * we handle the dirtying after unlocking the buffer.
1281 */
1282 if (ext4_handle_valid(handle))
1283 error = ext4_handle_dirty_metadata(handle, inode, bh);
1284 unlock_buffer(bh);
1285 if (!ext4_handle_valid(handle))
1286 error = ext4_handle_dirty_metadata(handle, inode, bh);
1287 if (IS_SYNC(inode))
1288 ext4_handle_sync(handle);
1289 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1290 ea_bdebug(bh, "refcount now=%d; releasing",
1291 le32_to_cpu(BHDR(bh)->h_refcount));
1292 }
1293 out:
1294 ext4_std_error(inode->i_sb, error);
1295 return;
1296 }
1297
1298 /*
1299 * Find the available free space for EAs. This also returns the total number of
1300 * bytes used by EA entries.
1301 */
1302 static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1303 size_t *min_offs, void *base, int *total)
1304 {
1305 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1306 if (!last->e_value_inum && last->e_value_size) {
1307 size_t offs = le16_to_cpu(last->e_value_offs);
1308 if (offs < *min_offs)
1309 *min_offs = offs;
1310 }
1311 if (total)
1312 *total += EXT4_XATTR_LEN(last->e_name_len);
1313 }
1314 return (*min_offs - ((void *)last - base) - sizeof(__u32));
1315 }
1316
1317 /*
1318 * Write the value of the EA in an inode.
1319 */
1320 static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
1321 const void *buf, int bufsize)
1322 {
1323 struct buffer_head *bh = NULL;
1324 unsigned long block = 0;
1325 int blocksize = ea_inode->i_sb->s_blocksize;
1326 int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
1327 int csize, wsize = 0;
1328 int ret = 0;
1329 int retries = 0;
1330
1331 retry:
1332 while (ret >= 0 && ret < max_blocks) {
1333 struct ext4_map_blocks map;
1334 map.m_lblk = block += ret;
1335 map.m_len = max_blocks -= ret;
1336
1337 ret = ext4_map_blocks(handle, ea_inode, &map,
1338 EXT4_GET_BLOCKS_CREATE);
1339 if (ret <= 0) {
1340 ext4_mark_inode_dirty(handle, ea_inode);
1341 if (ret == -ENOSPC &&
1342 ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
1343 ret = 0;
1344 goto retry;
1345 }
1346 break;
1347 }
1348 }
1349
1350 if (ret < 0)
1351 return ret;
1352
1353 block = 0;
1354 while (wsize < bufsize) {
1355 if (bh != NULL)
1356 brelse(bh);
1357 csize = (bufsize - wsize) > blocksize ? blocksize :
1358 bufsize - wsize;
1359 bh = ext4_getblk(handle, ea_inode, block, 0);
1360 if (IS_ERR(bh))
1361 return PTR_ERR(bh);
1362 ret = ext4_journal_get_write_access(handle, bh);
1363 if (ret)
1364 goto out;
1365
1366 memcpy(bh->b_data, buf, csize);
1367 set_buffer_uptodate(bh);
1368 ext4_handle_dirty_metadata(handle, ea_inode, bh);
1369
1370 buf += csize;
1371 wsize += csize;
1372 block += 1;
1373 }
1374
1375 inode_lock(ea_inode);
1376 i_size_write(ea_inode, wsize);
1377 ext4_update_i_disksize(ea_inode, wsize);
1378 inode_unlock(ea_inode);
1379
1380 ext4_mark_inode_dirty(handle, ea_inode);
1381
1382 out:
1383 brelse(bh);
1384
1385 return ret;
1386 }
1387
1388 /*
1389 * Create an inode to store the value of a large EA.
1390 */
1391 static struct inode *ext4_xattr_inode_create(handle_t *handle,
1392 struct inode *inode, u32 hash)
1393 {
1394 struct inode *ea_inode = NULL;
1395 uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
1396 int err;
1397
1398 /*
1399 * Let the next inode be the goal, so we try and allocate the EA inode
1400 * in the same group, or nearby one.
1401 */
1402 ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
1403 S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
1404 EXT4_EA_INODE_FL);
1405 if (!IS_ERR(ea_inode)) {
1406 ea_inode->i_op = &ext4_file_inode_operations;
1407 ea_inode->i_fop = &ext4_file_operations;
1408 ext4_set_aops(ea_inode);
1409 ext4_xattr_inode_set_class(ea_inode);
1410 unlock_new_inode(ea_inode);
1411 ext4_xattr_inode_set_ref(ea_inode, 1);
1412 ext4_xattr_inode_set_hash(ea_inode, hash);
1413 err = ext4_mark_inode_dirty(handle, ea_inode);
1414 if (!err)
1415 err = ext4_inode_attach_jinode(ea_inode);
1416 if (err) {
1417 iput(ea_inode);
1418 return ERR_PTR(err);
1419 }
1420
1421 /*
1422 * Xattr inodes are shared therefore quota charging is performed
1423 * at a higher level.
1424 */
1425 dquot_free_inode(ea_inode);
1426 dquot_drop(ea_inode);
1427 inode_lock(ea_inode);
1428 ea_inode->i_flags |= S_NOQUOTA;
1429 inode_unlock(ea_inode);
1430 }
1431
1432 return ea_inode;
1433 }
1434
1435 static struct inode *
1436 ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
1437 size_t value_len, u32 hash)
1438 {
1439 struct inode *ea_inode;
1440 struct mb_cache_entry *ce;
1441 struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
1442 void *ea_data;
1443
1444 if (!ea_inode_cache)
1445 return NULL;
1446
1447 ce = mb_cache_entry_find_first(ea_inode_cache, hash);
1448 if (!ce)
1449 return NULL;
1450
1451 ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
1452 if (!ea_data) {
1453 mb_cache_entry_put(ea_inode_cache, ce);
1454 return NULL;
1455 }
1456
1457 while (ce) {
1458 ea_inode = ext4_iget(inode->i_sb, ce->e_value);
1459 if (!IS_ERR(ea_inode) &&
1460 !is_bad_inode(ea_inode) &&
1461 (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
1462 i_size_read(ea_inode) == value_len &&
1463 !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
1464 !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
1465 value_len) &&
1466 !memcmp(value, ea_data, value_len)) {
1467 mb_cache_entry_touch(ea_inode_cache, ce);
1468 mb_cache_entry_put(ea_inode_cache, ce);
1469 kvfree(ea_data);
1470 return ea_inode;
1471 }
1472
1473 if (!IS_ERR(ea_inode))
1474 iput(ea_inode);
1475 ce = mb_cache_entry_find_next(ea_inode_cache, ce);
1476 }
1477 kvfree(ea_data);
1478 return NULL;
1479 }
1480
1481 /*
1482 * Add value of the EA in an inode.
1483 */
1484 static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
1485 const void *value, size_t value_len,
1486 struct inode **ret_inode)
1487 {
1488 struct inode *ea_inode;
1489 u32 hash;
1490 int err;
1491
1492 hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
1493 ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
1494 if (ea_inode) {
1495 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1496 if (err) {
1497 iput(ea_inode);
1498 return err;
1499 }
1500
1501 *ret_inode = ea_inode;
1502 return 0;
1503 }
1504
1505 /* Create an inode for the EA value */
1506 ea_inode = ext4_xattr_inode_create(handle, inode, hash);
1507 if (IS_ERR(ea_inode))
1508 return PTR_ERR(ea_inode);
1509
1510 err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
1511 if (err) {
1512 ext4_xattr_inode_dec_ref(handle, ea_inode);
1513 iput(ea_inode);
1514 return err;
1515 }
1516
1517 if (EA_INODE_CACHE(inode))
1518 mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
1519 ea_inode->i_ino, true /* reusable */);
1520
1521 *ret_inode = ea_inode;
1522 return 0;
1523 }
1524
1525 /*
1526 * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode
1527 * feature is enabled.
1528 */
1529 #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
1530
1531 static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1532 struct ext4_xattr_search *s,
1533 handle_t *handle, struct inode *inode,
1534 bool is_block)
1535 {
1536 struct ext4_xattr_entry *last;
1537 struct ext4_xattr_entry *here = s->here;
1538 size_t min_offs = s->end - s->base, name_len = strlen(i->name);
1539 int in_inode = i->in_inode;
1540 struct inode *old_ea_inode = NULL;
1541 struct inode *new_ea_inode = NULL;
1542 size_t old_size, new_size;
1543 int ret;
1544
1545 /* Space used by old and new values. */
1546 old_size = (!s->not_found && !here->e_value_inum) ?
1547 EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
1548 new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
1549
1550 /*
1551 * Optimization for the simple case when old and new values have the
1552 * same padded sizes. Not applicable if external inodes are involved.
1553 */
1554 if (new_size && new_size == old_size) {
1555 size_t offs = le16_to_cpu(here->e_value_offs);
1556 void *val = s->base + offs;
1557
1558 here->e_value_size = cpu_to_le32(i->value_len);
1559 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1560 memset(val, 0, new_size);
1561 } else {
1562 memcpy(val, i->value, i->value_len);
1563 /* Clear padding bytes. */
1564 memset(val + i->value_len, 0, new_size - i->value_len);
1565 }
1566 goto update_hash;
1567 }
1568
1569 /* Compute min_offs and last. */
1570 last = s->first;
1571 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1572 if (!last->e_value_inum && last->e_value_size) {
1573 size_t offs = le16_to_cpu(last->e_value_offs);
1574 if (offs < min_offs)
1575 min_offs = offs;
1576 }
1577 }
1578
1579 /* Check whether we have enough space. */
1580 if (i->value) {
1581 size_t free;
1582
1583 free = min_offs - ((void *)last - s->base) - sizeof(__u32);
1584 if (!s->not_found)
1585 free += EXT4_XATTR_LEN(name_len) + old_size;
1586
1587 if (free < EXT4_XATTR_LEN(name_len) + new_size) {
1588 ret = -ENOSPC;
1589 goto out;
1590 }
1591
1592 /*
1593 * If storing the value in an external inode is an option,
1594 * reserve space for xattr entries/names in the external
1595 * attribute block so that a long value does not occupy the
1596 * whole space and prevent futher entries being added.
1597 */
1598 if (ext4_has_feature_ea_inode(inode->i_sb) &&
1599 new_size && is_block &&
1600 (min_offs + old_size - new_size) <
1601 EXT4_XATTR_BLOCK_RESERVE(inode)) {
1602 ret = -ENOSPC;
1603 goto out;
1604 }
1605 }
1606
1607 /*
1608 * Getting access to old and new ea inodes is subject to failures.
1609 * Finish that work before doing any modifications to the xattr data.
1610 */
1611 if (!s->not_found && here->e_value_inum) {
1612 ret = ext4_xattr_inode_iget(inode,
1613 le32_to_cpu(here->e_value_inum),
1614 le32_to_cpu(here->e_hash),
1615 &old_ea_inode);
1616 if (ret) {
1617 old_ea_inode = NULL;
1618 goto out;
1619 }
1620 }
1621 if (i->value && in_inode) {
1622 WARN_ON_ONCE(!i->value_len);
1623
1624 ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
1625 if (ret)
1626 goto out;
1627
1628 ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
1629 i->value_len,
1630 &new_ea_inode);
1631 if (ret) {
1632 new_ea_inode = NULL;
1633 ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
1634 goto out;
1635 }
1636 }
1637
1638 if (old_ea_inode) {
1639 /* We are ready to release ref count on the old_ea_inode. */
1640 ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
1641 if (ret) {
1642 /* Release newly required ref count on new_ea_inode. */
1643 if (new_ea_inode) {
1644 int err;
1645
1646 err = ext4_xattr_inode_dec_ref(handle,
1647 new_ea_inode);
1648 if (err)
1649 ext4_warning_inode(new_ea_inode,
1650 "dec ref new_ea_inode err=%d",
1651 err);
1652 ext4_xattr_inode_free_quota(inode, new_ea_inode,
1653 i->value_len);
1654 }
1655 goto out;
1656 }
1657
1658 ext4_xattr_inode_free_quota(inode, old_ea_inode,
1659 le32_to_cpu(here->e_value_size));
1660 }
1661
1662 /* No failures allowed past this point. */
1663
1664 if (!s->not_found && here->e_value_offs) {
1665 /* Remove the old value. */
1666 void *first_val = s->base + min_offs;
1667 size_t offs = le16_to_cpu(here->e_value_offs);
1668 void *val = s->base + offs;
1669
1670 memmove(first_val + old_size, first_val, val - first_val);
1671 memset(first_val, 0, old_size);
1672 min_offs += old_size;
1673
1674 /* Adjust all value offsets. */
1675 last = s->first;
1676 while (!IS_LAST_ENTRY(last)) {
1677 size_t o = le16_to_cpu(last->e_value_offs);
1678
1679 if (!last->e_value_inum &&
1680 last->e_value_size && o < offs)
1681 last->e_value_offs = cpu_to_le16(o + old_size);
1682 last = EXT4_XATTR_NEXT(last);
1683 }
1684 }
1685
1686 if (!i->value) {
1687 /* Remove old name. */
1688 size_t size = EXT4_XATTR_LEN(name_len);
1689
1690 last = ENTRY((void *)last - size);
1691 memmove(here, (void *)here + size,
1692 (void *)last - (void *)here + sizeof(__u32));
1693 memset(last, 0, size);
1694 } else if (s->not_found) {
1695 /* Insert new name. */
1696 size_t size = EXT4_XATTR_LEN(name_len);
1697 size_t rest = (void *)last - (void *)here + sizeof(__u32);
1698
1699 memmove((void *)here + size, here, rest);
1700 memset(here, 0, size);
1701 here->e_name_index = i->name_index;
1702 here->e_name_len = name_len;
1703 memcpy(here->e_name, i->name, name_len);
1704 } else {
1705 /* This is an update, reset value info. */
1706 here->e_value_inum = 0;
1707 here->e_value_offs = 0;
1708 here->e_value_size = 0;
1709 }
1710
1711 if (i->value) {
1712 /* Insert new value. */
1713 if (in_inode) {
1714 here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
1715 } else if (i->value_len) {
1716 void *val = s->base + min_offs - new_size;
1717
1718 here->e_value_offs = cpu_to_le16(min_offs - new_size);
1719 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1720 memset(val, 0, new_size);
1721 } else {
1722 memcpy(val, i->value, i->value_len);
1723 /* Clear padding bytes. */
1724 memset(val + i->value_len, 0,
1725 new_size - i->value_len);
1726 }
1727 }
1728 here->e_value_size = cpu_to_le32(i->value_len);
1729 }
1730
1731 update_hash:
1732 if (i->value) {
1733 __le32 hash = 0;
1734
1735 /* Entry hash calculation. */
1736 if (in_inode) {
1737 __le32 crc32c_hash;
1738
1739 /*
1740 * Feed crc32c hash instead of the raw value for entry
1741 * hash calculation. This is to avoid walking
1742 * potentially long value buffer again.
1743 */
1744 crc32c_hash = cpu_to_le32(
1745 ext4_xattr_inode_get_hash(new_ea_inode));
1746 hash = ext4_xattr_hash_entry(here->e_name,
1747 here->e_name_len,
1748 &crc32c_hash, 1);
1749 } else if (is_block) {
1750 __le32 *value = s->base + le16_to_cpu(
1751 here->e_value_offs);
1752
1753 hash = ext4_xattr_hash_entry(here->e_name,
1754 here->e_name_len, value,
1755 new_size >> 2);
1756 }
1757 here->e_hash = hash;
1758 }
1759
1760 if (is_block)
1761 ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
1762
1763 ret = 0;
1764 out:
1765 iput(old_ea_inode);
1766 iput(new_ea_inode);
1767 return ret;
1768 }
1769
1770 struct ext4_xattr_block_find {
1771 struct ext4_xattr_search s;
1772 struct buffer_head *bh;
1773 };
1774
1775 static int
1776 ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
1777 struct ext4_xattr_block_find *bs)
1778 {
1779 struct super_block *sb = inode->i_sb;
1780 int error;
1781
1782 ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
1783 i->name_index, i->name, i->value, (long)i->value_len);
1784
1785 if (EXT4_I(inode)->i_file_acl) {
1786 /* The inode already has an extended attribute block. */
1787 bs->bh = sb_bread(sb, EXT4_I(inode)->i_file_acl);
1788 error = -EIO;
1789 if (!bs->bh)
1790 goto cleanup;
1791 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
1792 atomic_read(&(bs->bh->b_count)),
1793 le32_to_cpu(BHDR(bs->bh)->h_refcount));
1794 if (ext4_xattr_check_block(inode, bs->bh)) {
1795 EXT4_ERROR_INODE(inode, "bad block %llu",
1796 EXT4_I(inode)->i_file_acl);
1797 error = -EFSCORRUPTED;
1798 goto cleanup;
1799 }
1800 /* Find the named attribute. */
1801 bs->s.base = BHDR(bs->bh);
1802 bs->s.first = BFIRST(bs->bh);
1803 bs->s.end = bs->bh->b_data + bs->bh->b_size;
1804 bs->s.here = bs->s.first;
1805 error = ext4_xattr_find_entry(&bs->s.here, i->name_index,
1806 i->name, 1);
1807 if (error && error != -ENODATA)
1808 goto cleanup;
1809 bs->s.not_found = error;
1810 }
1811 error = 0;
1812
1813 cleanup:
1814 return error;
1815 }
1816
1817 static int
1818 ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1819 struct ext4_xattr_info *i,
1820 struct ext4_xattr_block_find *bs)
1821 {
1822 struct super_block *sb = inode->i_sb;
1823 struct buffer_head *new_bh = NULL;
1824 struct ext4_xattr_search s_copy = bs->s;
1825 struct ext4_xattr_search *s = &s_copy;
1826 struct mb_cache_entry *ce = NULL;
1827 int error = 0;
1828 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1829 struct inode *ea_inode = NULL, *tmp_inode;
1830 size_t old_ea_inode_quota = 0;
1831 unsigned int ea_ino;
1832
1833
1834 #define header(x) ((struct ext4_xattr_header *)(x))
1835
1836 if (s->base) {
1837 BUFFER_TRACE(bs->bh, "get_write_access");
1838 error = ext4_journal_get_write_access(handle, bs->bh);
1839 if (error)
1840 goto cleanup;
1841 lock_buffer(bs->bh);
1842
1843 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
1844 __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
1845
1846 /*
1847 * This must happen under buffer lock for
1848 * ext4_xattr_block_set() to reliably detect modified
1849 * block
1850 */
1851 if (ea_block_cache)
1852 mb_cache_entry_delete(ea_block_cache, hash,
1853 bs->bh->b_blocknr);
1854 ea_bdebug(bs->bh, "modifying in-place");
1855 error = ext4_xattr_set_entry(i, s, handle, inode,
1856 true /* is_block */);
1857 ext4_xattr_block_csum_set(inode, bs->bh);
1858 unlock_buffer(bs->bh);
1859 if (error == -EFSCORRUPTED)
1860 goto bad_block;
1861 if (!error)
1862 error = ext4_handle_dirty_metadata(handle,
1863 inode,
1864 bs->bh);
1865 if (error)
1866 goto cleanup;
1867 goto inserted;
1868 } else {
1869 int offset = (char *)s->here - bs->bh->b_data;
1870
1871 unlock_buffer(bs->bh);
1872 ea_bdebug(bs->bh, "cloning");
1873 s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
1874 error = -ENOMEM;
1875 if (s->base == NULL)
1876 goto cleanup;
1877 memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
1878 s->first = ENTRY(header(s->base)+1);
1879 header(s->base)->h_refcount = cpu_to_le32(1);
1880 s->here = ENTRY(s->base + offset);
1881 s->end = s->base + bs->bh->b_size;
1882
1883 /*
1884 * If existing entry points to an xattr inode, we need
1885 * to prevent ext4_xattr_set_entry() from decrementing
1886 * ref count on it because the reference belongs to the
1887 * original block. In this case, make the entry look
1888 * like it has an empty value.
1889 */
1890 if (!s->not_found && s->here->e_value_inum) {
1891 ea_ino = le32_to_cpu(s->here->e_value_inum);
1892 error = ext4_xattr_inode_iget(inode, ea_ino,
1893 le32_to_cpu(s->here->e_hash),
1894 &tmp_inode);
1895 if (error)
1896 goto cleanup;
1897
1898 if (!ext4_test_inode_state(tmp_inode,
1899 EXT4_STATE_LUSTRE_EA_INODE)) {
1900 /*
1901 * Defer quota free call for previous
1902 * inode until success is guaranteed.
1903 */
1904 old_ea_inode_quota = le32_to_cpu(
1905 s->here->e_value_size);
1906 }
1907 iput(tmp_inode);
1908
1909 s->here->e_value_inum = 0;
1910 s->here->e_value_size = 0;
1911 }
1912 }
1913 } else {
1914 /* Allocate a buffer where we construct the new block. */
1915 s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
1916 /* assert(header == s->base) */
1917 error = -ENOMEM;
1918 if (s->base == NULL)
1919 goto cleanup;
1920 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1921 header(s->base)->h_blocks = cpu_to_le32(1);
1922 header(s->base)->h_refcount = cpu_to_le32(1);
1923 s->first = ENTRY(header(s->base)+1);
1924 s->here = ENTRY(header(s->base)+1);
1925 s->end = s->base + sb->s_blocksize;
1926 }
1927
1928 error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
1929 if (error == -EFSCORRUPTED)
1930 goto bad_block;
1931 if (error)
1932 goto cleanup;
1933
1934 if (i->value && s->here->e_value_inum) {
1935 /*
1936 * A ref count on ea_inode has been taken as part of the call to
1937 * ext4_xattr_set_entry() above. We would like to drop this
1938 * extra ref but we have to wait until the xattr block is
1939 * initialized and has its own ref count on the ea_inode.
1940 */
1941 ea_ino = le32_to_cpu(s->here->e_value_inum);
1942 error = ext4_xattr_inode_iget(inode, ea_ino,
1943 le32_to_cpu(s->here->e_hash),
1944 &ea_inode);
1945 if (error) {
1946 ea_inode = NULL;
1947 goto cleanup;
1948 }
1949 }
1950
1951 inserted:
1952 if (!IS_LAST_ENTRY(s->first)) {
1953 new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
1954 &ce);
1955 if (new_bh) {
1956 /* We found an identical block in the cache. */
1957 if (new_bh == bs->bh)
1958 ea_bdebug(new_bh, "keeping");
1959 else {
1960 u32 ref;
1961
1962 WARN_ON_ONCE(dquot_initialize_needed(inode));
1963
1964 /* The old block is released after updating
1965 the inode. */
1966 error = dquot_alloc_block(inode,
1967 EXT4_C2B(EXT4_SB(sb), 1));
1968 if (error)
1969 goto cleanup;
1970 BUFFER_TRACE(new_bh, "get_write_access");
1971 error = ext4_journal_get_write_access(handle,
1972 new_bh);
1973 if (error)
1974 goto cleanup_dquot;
1975 lock_buffer(new_bh);
1976 /*
1977 * We have to be careful about races with
1978 * freeing, rehashing or adding references to
1979 * xattr block. Once we hold buffer lock xattr
1980 * block's state is stable so we can check
1981 * whether the block got freed / rehashed or
1982 * not. Since we unhash mbcache entry under
1983 * buffer lock when freeing / rehashing xattr
1984 * block, checking whether entry is still
1985 * hashed is reliable. Same rules hold for
1986 * e_reusable handling.
1987 */
1988 if (hlist_bl_unhashed(&ce->e_hash_list) ||
1989 !ce->e_reusable) {
1990 /*
1991 * Undo everything and check mbcache
1992 * again.
1993 */
1994 unlock_buffer(new_bh);
1995 dquot_free_block(inode,
1996 EXT4_C2B(EXT4_SB(sb),
1997 1));
1998 brelse(new_bh);
1999 mb_cache_entry_put(ea_block_cache, ce);
2000 ce = NULL;
2001 new_bh = NULL;
2002 goto inserted;
2003 }
2004 ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
2005 BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
2006 if (ref >= EXT4_XATTR_REFCOUNT_MAX)
2007 ce->e_reusable = 0;
2008 ea_bdebug(new_bh, "reusing; refcount now=%d",
2009 ref);
2010 ext4_xattr_block_csum_set(inode, new_bh);
2011 unlock_buffer(new_bh);
2012 error = ext4_handle_dirty_metadata(handle,
2013 inode,
2014 new_bh);
2015 if (error)
2016 goto cleanup_dquot;
2017 }
2018 mb_cache_entry_touch(ea_block_cache, ce);
2019 mb_cache_entry_put(ea_block_cache, ce);
2020 ce = NULL;
2021 } else if (bs->bh && s->base == bs->bh->b_data) {
2022 /* We were modifying this block in-place. */
2023 ea_bdebug(bs->bh, "keeping this block");
2024 ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
2025 new_bh = bs->bh;
2026 get_bh(new_bh);
2027 } else {
2028 /* We need to allocate a new block */
2029 ext4_fsblk_t goal, block;
2030
2031 WARN_ON_ONCE(dquot_initialize_needed(inode));
2032
2033 goal = ext4_group_first_block_no(sb,
2034 EXT4_I(inode)->i_block_group);
2035
2036 /* non-extent files can't have physical blocks past 2^32 */
2037 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2038 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
2039
2040 block = ext4_new_meta_blocks(handle, inode, goal, 0,
2041 NULL, &error);
2042 if (error)
2043 goto cleanup;
2044
2045 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
2046 BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
2047
2048 ea_idebug(inode, "creating block %llu",
2049 (unsigned long long)block);
2050
2051 new_bh = sb_getblk(sb, block);
2052 if (unlikely(!new_bh)) {
2053 error = -ENOMEM;
2054 getblk_failed:
2055 ext4_free_blocks(handle, inode, NULL, block, 1,
2056 EXT4_FREE_BLOCKS_METADATA);
2057 goto cleanup;
2058 }
2059 error = ext4_xattr_inode_inc_ref_all(handle, inode,
2060 ENTRY(header(s->base)+1));
2061 if (error)
2062 goto getblk_failed;
2063 if (ea_inode) {
2064 /* Drop the extra ref on ea_inode. */
2065 error = ext4_xattr_inode_dec_ref(handle,
2066 ea_inode);
2067 if (error)
2068 ext4_warning_inode(ea_inode,
2069 "dec ref error=%d",
2070 error);
2071 iput(ea_inode);
2072 ea_inode = NULL;
2073 }
2074
2075 lock_buffer(new_bh);
2076 error = ext4_journal_get_create_access(handle, new_bh);
2077 if (error) {
2078 unlock_buffer(new_bh);
2079 error = -EIO;
2080 goto getblk_failed;
2081 }
2082 memcpy(new_bh->b_data, s->base, new_bh->b_size);
2083 ext4_xattr_block_csum_set(inode, new_bh);
2084 set_buffer_uptodate(new_bh);
2085 unlock_buffer(new_bh);
2086 ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
2087 error = ext4_handle_dirty_metadata(handle, inode,
2088 new_bh);
2089 if (error)
2090 goto cleanup;
2091 }
2092 }
2093
2094 if (old_ea_inode_quota)
2095 ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota);
2096
2097 /* Update the inode. */
2098 EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
2099
2100 /* Drop the previous xattr block. */
2101 if (bs->bh && bs->bh != new_bh) {
2102 struct ext4_xattr_inode_array *ea_inode_array = NULL;
2103
2104 ext4_xattr_release_block(handle, inode, bs->bh,
2105 &ea_inode_array,
2106 0 /* extra_credits */);
2107 ext4_xattr_inode_array_free(ea_inode_array);
2108 }
2109 error = 0;
2110
2111 cleanup:
2112 if (ea_inode) {
2113 int error2;
2114
2115 error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
2116 if (error2)
2117 ext4_warning_inode(ea_inode, "dec ref error=%d",
2118 error2);
2119
2120 /* If there was an error, revert the quota charge. */
2121 if (error)
2122 ext4_xattr_inode_free_quota(inode, ea_inode,
2123 i_size_read(ea_inode));
2124 iput(ea_inode);
2125 }
2126 if (ce)
2127 mb_cache_entry_put(ea_block_cache, ce);
2128 brelse(new_bh);
2129 if (!(bs->bh && s->base == bs->bh->b_data))
2130 kfree(s->base);
2131
2132 return error;
2133
2134 cleanup_dquot:
2135 dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
2136 goto cleanup;
2137
2138 bad_block:
2139 EXT4_ERROR_INODE(inode, "bad block %llu",
2140 EXT4_I(inode)->i_file_acl);
2141 goto cleanup;
2142
2143 #undef header
2144 }
2145
2146 int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
2147 struct ext4_xattr_ibody_find *is)
2148 {
2149 struct ext4_xattr_ibody_header *header;
2150 struct ext4_inode *raw_inode;
2151 int error;
2152
2153 if (EXT4_I(inode)->i_extra_isize == 0)
2154 return 0;
2155 raw_inode = ext4_raw_inode(&is->iloc);
2156 header = IHDR(inode, raw_inode);
2157 is->s.base = is->s.first = IFIRST(header);
2158 is->s.here = is->s.first;
2159 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2160 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2161 error = xattr_check_inode(inode, header, is->s.end);
2162 if (error)
2163 return error;
2164 /* Find the named attribute. */
2165 error = ext4_xattr_find_entry(&is->s.here, i->name_index,
2166 i->name, 0);
2167 if (error && error != -ENODATA)
2168 return error;
2169 is->s.not_found = error;
2170 }
2171 return 0;
2172 }
2173
2174 int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
2175 struct ext4_xattr_info *i,
2176 struct ext4_xattr_ibody_find *is)
2177 {
2178 struct ext4_xattr_ibody_header *header;
2179 struct ext4_xattr_search *s = &is->s;
2180 int error;
2181
2182 if (EXT4_I(inode)->i_extra_isize == 0)
2183 return -ENOSPC;
2184 error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
2185 if (error) {
2186 if (error == -ENOSPC &&
2187 ext4_has_inline_data(inode)) {
2188 error = ext4_try_to_evict_inline_data(handle, inode,
2189 EXT4_XATTR_LEN(strlen(i->name) +
2190 EXT4_XATTR_SIZE(i->value_len)));
2191 if (error)
2192 return error;
2193 error = ext4_xattr_ibody_find(inode, i, is);
2194 if (error)
2195 return error;
2196 error = ext4_xattr_set_entry(i, s, handle, inode,
2197 false /* is_block */);
2198 }
2199 if (error)
2200 return error;
2201 }
2202 header = IHDR(inode, ext4_raw_inode(&is->iloc));
2203 if (!IS_LAST_ENTRY(s->first)) {
2204 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2205 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2206 } else {
2207 header->h_magic = cpu_to_le32(0);
2208 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2209 }
2210 return 0;
2211 }
2212
2213 static int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
2214 struct ext4_xattr_info *i,
2215 struct ext4_xattr_ibody_find *is)
2216 {
2217 struct ext4_xattr_ibody_header *header;
2218 struct ext4_xattr_search *s = &is->s;
2219 int error;
2220
2221 if (EXT4_I(inode)->i_extra_isize == 0)
2222 return -ENOSPC;
2223 error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
2224 if (error)
2225 return error;
2226 header = IHDR(inode, ext4_raw_inode(&is->iloc));
2227 if (!IS_LAST_ENTRY(s->first)) {
2228 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2229 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2230 } else {
2231 header->h_magic = cpu_to_le32(0);
2232 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2233 }
2234 return 0;
2235 }
2236
2237 static int ext4_xattr_value_same(struct ext4_xattr_search *s,
2238 struct ext4_xattr_info *i)
2239 {
2240 void *value;
2241
2242 /* When e_value_inum is set the value is stored externally. */
2243 if (s->here->e_value_inum)
2244 return 0;
2245 if (le32_to_cpu(s->here->e_value_size) != i->value_len)
2246 return 0;
2247 value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
2248 return !memcmp(value, i->value, i->value_len);
2249 }
2250
2251 static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
2252 {
2253 struct buffer_head *bh;
2254 int error;
2255
2256 if (!EXT4_I(inode)->i_file_acl)
2257 return NULL;
2258 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
2259 if (!bh)
2260 return ERR_PTR(-EIO);
2261 error = ext4_xattr_check_block(inode, bh);
2262 if (error)
2263 return ERR_PTR(error);
2264 return bh;
2265 }
2266
2267 /*
2268 * ext4_xattr_set_handle()
2269 *
2270 * Create, replace or remove an extended attribute for this inode. Value
2271 * is NULL to remove an existing extended attribute, and non-NULL to
2272 * either replace an existing extended attribute, or create a new extended
2273 * attribute. The flags XATTR_REPLACE and XATTR_CREATE
2274 * specify that an extended attribute must exist and must not exist
2275 * previous to the call, respectively.
2276 *
2277 * Returns 0, or a negative error number on failure.
2278 */
2279 int
2280 ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
2281 const char *name, const void *value, size_t value_len,
2282 int flags)
2283 {
2284 struct ext4_xattr_info i = {
2285 .name_index = name_index,
2286 .name = name,
2287 .value = value,
2288 .value_len = value_len,
2289 .in_inode = 0,
2290 };
2291 struct ext4_xattr_ibody_find is = {
2292 .s = { .not_found = -ENODATA, },
2293 };
2294 struct ext4_xattr_block_find bs = {
2295 .s = { .not_found = -ENODATA, },
2296 };
2297 int no_expand;
2298 int error;
2299
2300 if (!name)
2301 return -EINVAL;
2302 if (strlen(name) > 255)
2303 return -ERANGE;
2304
2305 ext4_write_lock_xattr(inode, &no_expand);
2306
2307 /* Check journal credits under write lock. */
2308 if (ext4_handle_valid(handle)) {
2309 struct buffer_head *bh;
2310 int credits;
2311
2312 bh = ext4_xattr_get_block(inode);
2313 if (IS_ERR(bh)) {
2314 error = PTR_ERR(bh);
2315 goto cleanup;
2316 }
2317
2318 credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2319 value_len,
2320 flags & XATTR_CREATE);
2321 brelse(bh);
2322
2323 if (!ext4_handle_has_enough_credits(handle, credits)) {
2324 error = -ENOSPC;
2325 goto cleanup;
2326 }
2327 }
2328
2329 error = ext4_reserve_inode_write(handle, inode, &is.iloc);
2330 if (error)
2331 goto cleanup;
2332
2333 if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
2334 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
2335 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2336 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
2337 }
2338
2339 error = ext4_xattr_ibody_find(inode, &i, &is);
2340 if (error)
2341 goto cleanup;
2342 if (is.s.not_found)
2343 error = ext4_xattr_block_find(inode, &i, &bs);
2344 if (error)
2345 goto cleanup;
2346 if (is.s.not_found && bs.s.not_found) {
2347 error = -ENODATA;
2348 if (flags & XATTR_REPLACE)
2349 goto cleanup;
2350 error = 0;
2351 if (!value)
2352 goto cleanup;
2353 } else {
2354 error = -EEXIST;
2355 if (flags & XATTR_CREATE)
2356 goto cleanup;
2357 }
2358
2359 if (!value) {
2360 if (!is.s.not_found)
2361 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2362 else if (!bs.s.not_found)
2363 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2364 } else {
2365 error = 0;
2366 /* Xattr value did not change? Save us some work and bail out */
2367 if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
2368 goto cleanup;
2369 if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
2370 goto cleanup;
2371
2372 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2373 (EXT4_XATTR_SIZE(i.value_len) >
2374 EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
2375 i.in_inode = 1;
2376 retry_inode:
2377 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2378 if (!error && !bs.s.not_found) {
2379 i.value = NULL;
2380 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2381 } else if (error == -ENOSPC) {
2382 if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
2383 error = ext4_xattr_block_find(inode, &i, &bs);
2384 if (error)
2385 goto cleanup;
2386 }
2387 error = ext4_xattr_block_set(handle, inode, &i, &bs);
2388 if (!error && !is.s.not_found) {
2389 i.value = NULL;
2390 error = ext4_xattr_ibody_set(handle, inode, &i,
2391 &is);
2392 } else if (error == -ENOSPC) {
2393 /*
2394 * Xattr does not fit in the block, store at
2395 * external inode if possible.
2396 */
2397 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2398 !i.in_inode) {
2399 i.in_inode = 1;
2400 goto retry_inode;
2401 }
2402 }
2403 }
2404 }
2405 if (!error) {
2406 ext4_xattr_update_super_block(handle, inode->i_sb);
2407 inode->i_ctime = current_time(inode);
2408 if (!value)
2409 no_expand = 0;
2410 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
2411 /*
2412 * The bh is consumed by ext4_mark_iloc_dirty, even with
2413 * error != 0.
2414 */
2415 is.iloc.bh = NULL;
2416 if (IS_SYNC(inode))
2417 ext4_handle_sync(handle);
2418 }
2419
2420 cleanup:
2421 brelse(is.iloc.bh);
2422 brelse(bs.bh);
2423 ext4_write_unlock_xattr(inode, &no_expand);
2424 return error;
2425 }
2426
2427 int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
2428 bool is_create, int *credits)
2429 {
2430 struct buffer_head *bh;
2431 int err;
2432
2433 *credits = 0;
2434
2435 if (!EXT4_SB(inode->i_sb)->s_journal)
2436 return 0;
2437
2438 down_read(&EXT4_I(inode)->xattr_sem);
2439
2440 bh = ext4_xattr_get_block(inode);
2441 if (IS_ERR(bh)) {
2442 err = PTR_ERR(bh);
2443 } else {
2444 *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2445 value_len, is_create);
2446 brelse(bh);
2447 err = 0;
2448 }
2449
2450 up_read(&EXT4_I(inode)->xattr_sem);
2451 return err;
2452 }
2453
2454 /*
2455 * ext4_xattr_set()
2456 *
2457 * Like ext4_xattr_set_handle, but start from an inode. This extended
2458 * attribute modification is a filesystem transaction by itself.
2459 *
2460 * Returns 0, or a negative error number on failure.
2461 */
2462 int
2463 ext4_xattr_set(struct inode *inode, int name_index, const char *name,
2464 const void *value, size_t value_len, int flags)
2465 {
2466 handle_t *handle;
2467 struct super_block *sb = inode->i_sb;
2468 int error, retries = 0;
2469 int credits;
2470
2471 error = dquot_initialize(inode);
2472 if (error)
2473 return error;
2474
2475 retry:
2476 error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
2477 &credits);
2478 if (error)
2479 return error;
2480
2481 handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
2482 if (IS_ERR(handle)) {
2483 error = PTR_ERR(handle);
2484 } else {
2485 int error2;
2486
2487 error = ext4_xattr_set_handle(handle, inode, name_index, name,
2488 value, value_len, flags);
2489 error2 = ext4_journal_stop(handle);
2490 if (error == -ENOSPC &&
2491 ext4_should_retry_alloc(sb, &retries))
2492 goto retry;
2493 if (error == 0)
2494 error = error2;
2495 }
2496
2497 return error;
2498 }
2499
2500 /*
2501 * Shift the EA entries in the inode to create space for the increased
2502 * i_extra_isize.
2503 */
2504 static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
2505 int value_offs_shift, void *to,
2506 void *from, size_t n)
2507 {
2508 struct ext4_xattr_entry *last = entry;
2509 int new_offs;
2510
2511 /* We always shift xattr headers further thus offsets get lower */
2512 BUG_ON(value_offs_shift > 0);
2513
2514 /* Adjust the value offsets of the entries */
2515 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2516 if (!last->e_value_inum && last->e_value_size) {
2517 new_offs = le16_to_cpu(last->e_value_offs) +
2518 value_offs_shift;
2519 last->e_value_offs = cpu_to_le16(new_offs);
2520 }
2521 }
2522 /* Shift the entries by n bytes */
2523 memmove(to, from, n);
2524 }
2525
2526 /*
2527 * Move xattr pointed to by 'entry' from inode into external xattr block
2528 */
2529 static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
2530 struct ext4_inode *raw_inode,
2531 struct ext4_xattr_entry *entry)
2532 {
2533 struct ext4_xattr_ibody_find *is = NULL;
2534 struct ext4_xattr_block_find *bs = NULL;
2535 char *buffer = NULL, *b_entry_name = NULL;
2536 size_t value_size = le32_to_cpu(entry->e_value_size);
2537 struct ext4_xattr_info i = {
2538 .value = NULL,
2539 .value_len = 0,
2540 .name_index = entry->e_name_index,
2541 .in_inode = !!entry->e_value_inum,
2542 };
2543 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2544 int error;
2545
2546 is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
2547 bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
2548 buffer = kmalloc(value_size, GFP_NOFS);
2549 b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
2550 if (!is || !bs || !buffer || !b_entry_name) {
2551 error = -ENOMEM;
2552 goto out;
2553 }
2554
2555 is->s.not_found = -ENODATA;
2556 bs->s.not_found = -ENODATA;
2557 is->iloc.bh = NULL;
2558 bs->bh = NULL;
2559
2560 /* Save the entry name and the entry value */
2561 if (entry->e_value_inum) {
2562 error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
2563 if (error)
2564 goto out;
2565 } else {
2566 size_t value_offs = le16_to_cpu(entry->e_value_offs);
2567 memcpy(buffer, (void *)IFIRST(header) + value_offs, value_size);
2568 }
2569
2570 memcpy(b_entry_name, entry->e_name, entry->e_name_len);
2571 b_entry_name[entry->e_name_len] = '\0';
2572 i.name = b_entry_name;
2573
2574 error = ext4_get_inode_loc(inode, &is->iloc);
2575 if (error)
2576 goto out;
2577
2578 error = ext4_xattr_ibody_find(inode, &i, is);
2579 if (error)
2580 goto out;
2581
2582 /* Remove the chosen entry from the inode */
2583 error = ext4_xattr_ibody_set(handle, inode, &i, is);
2584 if (error)
2585 goto out;
2586
2587 i.value = buffer;
2588 i.value_len = value_size;
2589 error = ext4_xattr_block_find(inode, &i, bs);
2590 if (error)
2591 goto out;
2592
2593 /* Add entry which was removed from the inode into the block */
2594 error = ext4_xattr_block_set(handle, inode, &i, bs);
2595 if (error)
2596 goto out;
2597 error = 0;
2598 out:
2599 kfree(b_entry_name);
2600 kfree(buffer);
2601 if (is)
2602 brelse(is->iloc.bh);
2603 kfree(is);
2604 kfree(bs);
2605
2606 return error;
2607 }
2608
2609 static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
2610 struct ext4_inode *raw_inode,
2611 int isize_diff, size_t ifree,
2612 size_t bfree, int *total_ino)
2613 {
2614 struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2615 struct ext4_xattr_entry *small_entry;
2616 struct ext4_xattr_entry *entry;
2617 struct ext4_xattr_entry *last;
2618 unsigned int entry_size; /* EA entry size */
2619 unsigned int total_size; /* EA entry size + value size */
2620 unsigned int min_total_size;
2621 int error;
2622
2623 while (isize_diff > ifree) {
2624 entry = NULL;
2625 small_entry = NULL;
2626 min_total_size = ~0U;
2627 last = IFIRST(header);
2628 /* Find the entry best suited to be pushed into EA block */
2629 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2630 total_size = EXT4_XATTR_LEN(last->e_name_len);
2631 if (!last->e_value_inum)
2632 total_size += EXT4_XATTR_SIZE(
2633 le32_to_cpu(last->e_value_size));
2634 if (total_size <= bfree &&
2635 total_size < min_total_size) {
2636 if (total_size + ifree < isize_diff) {
2637 small_entry = last;
2638 } else {
2639 entry = last;
2640 min_total_size = total_size;
2641 }
2642 }
2643 }
2644
2645 if (entry == NULL) {
2646 if (small_entry == NULL)
2647 return -ENOSPC;
2648 entry = small_entry;
2649 }
2650
2651 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
2652 total_size = entry_size;
2653 if (!entry->e_value_inum)
2654 total_size += EXT4_XATTR_SIZE(
2655 le32_to_cpu(entry->e_value_size));
2656 error = ext4_xattr_move_to_block(handle, inode, raw_inode,
2657 entry);
2658 if (error)
2659 return error;
2660
2661 *total_ino -= entry_size;
2662 ifree += total_size;
2663 bfree -= total_size;
2664 }
2665
2666 return 0;
2667 }
2668
2669 /*
2670 * Expand an inode by new_extra_isize bytes when EAs are present.
2671 * Returns 0 on success or negative error number on failure.
2672 */
2673 int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
2674 struct ext4_inode *raw_inode, handle_t *handle)
2675 {
2676 struct ext4_xattr_ibody_header *header;
2677 struct buffer_head *bh;
2678 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2679 static unsigned int mnt_count;
2680 size_t min_offs;
2681 size_t ifree, bfree;
2682 int total_ino;
2683 void *base, *end;
2684 int error = 0, tried_min_extra_isize = 0;
2685 int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize);
2686 int isize_diff; /* How much do we need to grow i_extra_isize */
2687
2688 retry:
2689 isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
2690 if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
2691 return 0;
2692
2693 header = IHDR(inode, raw_inode);
2694
2695 /*
2696 * Check if enough free space is available in the inode to shift the
2697 * entries ahead by new_extra_isize.
2698 */
2699
2700 base = IFIRST(header);
2701 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2702 min_offs = end - base;
2703 total_ino = sizeof(struct ext4_xattr_ibody_header);
2704
2705 error = xattr_check_inode(inode, header, end);
2706 if (error)
2707 goto cleanup;
2708
2709 ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
2710 if (ifree >= isize_diff)
2711 goto shift;
2712
2713 /*
2714 * Enough free space isn't available in the inode, check if
2715 * EA block can hold new_extra_isize bytes.
2716 */
2717 if (EXT4_I(inode)->i_file_acl) {
2718 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
2719 error = -EIO;
2720 if (!bh)
2721 goto cleanup;
2722 if (ext4_xattr_check_block(inode, bh)) {
2723 EXT4_ERROR_INODE(inode, "bad block %llu",
2724 EXT4_I(inode)->i_file_acl);
2725 error = -EFSCORRUPTED;
2726 brelse(bh);
2727 goto cleanup;
2728 }
2729 base = BHDR(bh);
2730 end = bh->b_data + bh->b_size;
2731 min_offs = end - base;
2732 bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
2733 NULL);
2734 brelse(bh);
2735 if (bfree + ifree < isize_diff) {
2736 if (!tried_min_extra_isize && s_min_extra_isize) {
2737 tried_min_extra_isize++;
2738 new_extra_isize = s_min_extra_isize;
2739 goto retry;
2740 }
2741 error = -ENOSPC;
2742 goto cleanup;
2743 }
2744 } else {
2745 bfree = inode->i_sb->s_blocksize;
2746 }
2747
2748 error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
2749 isize_diff, ifree, bfree,
2750 &total_ino);
2751 if (error) {
2752 if (error == -ENOSPC && !tried_min_extra_isize &&
2753 s_min_extra_isize) {
2754 tried_min_extra_isize++;
2755 new_extra_isize = s_min_extra_isize;
2756 goto retry;
2757 }
2758 goto cleanup;
2759 }
2760 shift:
2761 /* Adjust the offsets and shift the remaining entries ahead */
2762 ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
2763 - new_extra_isize, (void *)raw_inode +
2764 EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
2765 (void *)header, total_ino);
2766 EXT4_I(inode)->i_extra_isize = new_extra_isize;
2767
2768 cleanup:
2769 if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
2770 ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
2771 inode->i_ino);
2772 mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count);
2773 }
2774 return error;
2775 }
2776
2777 #define EIA_INCR 16 /* must be 2^n */
2778 #define EIA_MASK (EIA_INCR - 1)
2779
2780 /* Add the large xattr @inode into @ea_inode_array for deferred iput().
2781 * If @ea_inode_array is new or full it will be grown and the old
2782 * contents copied over.
2783 */
2784 static int
2785 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
2786 struct inode *inode)
2787 {
2788 if (*ea_inode_array == NULL) {
2789 /*
2790 * Start with 15 inodes, so it fits into a power-of-two size.
2791 * If *ea_inode_array is NULL, this is essentially offsetof()
2792 */
2793 (*ea_inode_array) =
2794 kmalloc(offsetof(struct ext4_xattr_inode_array,
2795 inodes[EIA_MASK]),
2796 GFP_NOFS);
2797 if (*ea_inode_array == NULL)
2798 return -ENOMEM;
2799 (*ea_inode_array)->count = 0;
2800 } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
2801 /* expand the array once all 15 + n * 16 slots are full */
2802 struct ext4_xattr_inode_array *new_array = NULL;
2803 int count = (*ea_inode_array)->count;
2804
2805 /* if new_array is NULL, this is essentially offsetof() */
2806 new_array = kmalloc(
2807 offsetof(struct ext4_xattr_inode_array,
2808 inodes[count + EIA_INCR]),
2809 GFP_NOFS);
2810 if (new_array == NULL)
2811 return -ENOMEM;
2812 memcpy(new_array, *ea_inode_array,
2813 offsetof(struct ext4_xattr_inode_array, inodes[count]));
2814 kfree(*ea_inode_array);
2815 *ea_inode_array = new_array;
2816 }
2817 (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
2818 return 0;
2819 }
2820
2821 /*
2822 * ext4_xattr_delete_inode()
2823 *
2824 * Free extended attribute resources associated with this inode. Traverse
2825 * all entries and decrement reference on any xattr inodes associated with this
2826 * inode. This is called immediately before an inode is freed. We have exclusive
2827 * access to the inode. If an orphan inode is deleted it will also release its
2828 * references on xattr block and xattr inodes.
2829 */
2830 int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2831 struct ext4_xattr_inode_array **ea_inode_array,
2832 int extra_credits)
2833 {
2834 struct buffer_head *bh = NULL;
2835 struct ext4_xattr_ibody_header *header;
2836 struct ext4_iloc iloc = { .bh = NULL };
2837 struct ext4_xattr_entry *entry;
2838 struct inode *ea_inode;
2839 int error;
2840
2841 error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
2842 NULL /* bh */,
2843 false /* dirty */,
2844 false /* block_csum */);
2845 if (error) {
2846 EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
2847 goto cleanup;
2848 }
2849
2850 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2851 ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2852
2853 error = ext4_get_inode_loc(inode, &iloc);
2854 if (error) {
2855 EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
2856 goto cleanup;
2857 }
2858
2859 error = ext4_journal_get_write_access(handle, iloc.bh);
2860 if (error) {
2861 EXT4_ERROR_INODE(inode, "write access (error %d)",
2862 error);
2863 goto cleanup;
2864 }
2865
2866 header = IHDR(inode, ext4_raw_inode(&iloc));
2867 if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2868 ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
2869 IFIRST(header),
2870 false /* block_csum */,
2871 ea_inode_array,
2872 extra_credits,
2873 false /* skip_quota */);
2874 }
2875
2876 if (EXT4_I(inode)->i_file_acl) {
2877 bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
2878 if (!bh) {
2879 EXT4_ERROR_INODE(inode, "block %llu read error",
2880 EXT4_I(inode)->i_file_acl);
2881 error = -EIO;
2882 goto cleanup;
2883 }
2884 error = ext4_xattr_check_block(inode, bh);
2885 if (error) {
2886 EXT4_ERROR_INODE(inode, "bad block %llu (error %d)",
2887 EXT4_I(inode)->i_file_acl, error);
2888 goto cleanup;
2889 }
2890
2891 if (ext4_has_feature_ea_inode(inode->i_sb)) {
2892 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
2893 entry = EXT4_XATTR_NEXT(entry)) {
2894 if (!entry->e_value_inum)
2895 continue;
2896 error = ext4_xattr_inode_iget(inode,
2897 le32_to_cpu(entry->e_value_inum),
2898 le32_to_cpu(entry->e_hash),
2899 &ea_inode);
2900 if (error)
2901 continue;
2902 ext4_xattr_inode_free_quota(inode, ea_inode,
2903 le32_to_cpu(entry->e_value_size));
2904 iput(ea_inode);
2905 }
2906
2907 }
2908
2909 ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
2910 extra_credits);
2911 /*
2912 * Update i_file_acl value in the same transaction that releases
2913 * block.
2914 */
2915 EXT4_I(inode)->i_file_acl = 0;
2916 error = ext4_mark_inode_dirty(handle, inode);
2917 if (error) {
2918 EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
2919 error);
2920 goto cleanup;
2921 }
2922 }
2923 error = 0;
2924 cleanup:
2925 brelse(iloc.bh);
2926 brelse(bh);
2927 return error;
2928 }
2929
2930 void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
2931 {
2932 int idx;
2933
2934 if (ea_inode_array == NULL)
2935 return;
2936
2937 for (idx = 0; idx < ea_inode_array->count; ++idx)
2938 iput(ea_inode_array->inodes[idx]);
2939 kfree(ea_inode_array);
2940 }
2941
2942 /*
2943 * ext4_xattr_block_cache_insert()
2944 *
2945 * Create a new entry in the extended attribute block cache, and insert
2946 * it unless such an entry is already in the cache.
2947 *
2948 * Returns 0, or a negative error number on failure.
2949 */
2950 static void
2951 ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
2952 struct buffer_head *bh)
2953 {
2954 struct ext4_xattr_header *header = BHDR(bh);
2955 __u32 hash = le32_to_cpu(header->h_hash);
2956 int reusable = le32_to_cpu(header->h_refcount) <
2957 EXT4_XATTR_REFCOUNT_MAX;
2958 int error;
2959
2960 if (!ea_block_cache)
2961 return;
2962 error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
2963 bh->b_blocknr, reusable);
2964 if (error) {
2965 if (error == -EBUSY)
2966 ea_bdebug(bh, "already in cache");
2967 } else
2968 ea_bdebug(bh, "inserting [%x]", (int)hash);
2969 }
2970
2971 /*
2972 * ext4_xattr_cmp()
2973 *
2974 * Compare two extended attribute blocks for equality.
2975 *
2976 * Returns 0 if the blocks are equal, 1 if they differ, and
2977 * a negative error number on errors.
2978 */
2979 static int
2980 ext4_xattr_cmp(struct ext4_xattr_header *header1,
2981 struct ext4_xattr_header *header2)
2982 {
2983 struct ext4_xattr_entry *entry1, *entry2;
2984
2985 entry1 = ENTRY(header1+1);
2986 entry2 = ENTRY(header2+1);
2987 while (!IS_LAST_ENTRY(entry1)) {
2988 if (IS_LAST_ENTRY(entry2))
2989 return 1;
2990 if (entry1->e_hash != entry2->e_hash ||
2991 entry1->e_name_index != entry2->e_name_index ||
2992 entry1->e_name_len != entry2->e_name_len ||
2993 entry1->e_value_size != entry2->e_value_size ||
2994 entry1->e_value_inum != entry2->e_value_inum ||
2995 memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
2996 return 1;
2997 if (!entry1->e_value_inum &&
2998 memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
2999 (char *)header2 + le16_to_cpu(entry2->e_value_offs),
3000 le32_to_cpu(entry1->e_value_size)))
3001 return 1;
3002
3003 entry1 = EXT4_XATTR_NEXT(entry1);
3004 entry2 = EXT4_XATTR_NEXT(entry2);
3005 }
3006 if (!IS_LAST_ENTRY(entry2))
3007 return 1;
3008 return 0;
3009 }
3010
3011 /*
3012 * ext4_xattr_block_cache_find()
3013 *
3014 * Find an identical extended attribute block.
3015 *
3016 * Returns a pointer to the block found, or NULL if such a block was
3017 * not found or an error occurred.
3018 */
3019 static struct buffer_head *
3020 ext4_xattr_block_cache_find(struct inode *inode,
3021 struct ext4_xattr_header *header,
3022 struct mb_cache_entry **pce)
3023 {
3024 __u32 hash = le32_to_cpu(header->h_hash);
3025 struct mb_cache_entry *ce;
3026 struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
3027
3028 if (!ea_block_cache)
3029 return NULL;
3030 if (!header->h_hash)
3031 return NULL; /* never share */
3032 ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
3033 ce = mb_cache_entry_find_first(ea_block_cache, hash);
3034 while (ce) {
3035 struct buffer_head *bh;
3036
3037 bh = sb_bread(inode->i_sb, ce->e_value);
3038 if (!bh) {
3039 EXT4_ERROR_INODE(inode, "block %lu read error",
3040 (unsigned long)ce->e_value);
3041 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
3042 *pce = ce;
3043 return bh;
3044 }
3045 brelse(bh);
3046 ce = mb_cache_entry_find_next(ea_block_cache, ce);
3047 }
3048 return NULL;
3049 }
3050
3051 #define NAME_HASH_SHIFT 5
3052 #define VALUE_HASH_SHIFT 16
3053
3054 /*
3055 * ext4_xattr_hash_entry()
3056 *
3057 * Compute the hash of an extended attribute.
3058 */
3059 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
3060 size_t value_count)
3061 {
3062 __u32 hash = 0;
3063
3064 while (name_len--) {
3065 hash = (hash << NAME_HASH_SHIFT) ^
3066 (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
3067 *name++;
3068 }
3069 while (value_count--) {
3070 hash = (hash << VALUE_HASH_SHIFT) ^
3071 (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
3072 le32_to_cpu(*value++);
3073 }
3074 return cpu_to_le32(hash);
3075 }
3076
3077 #undef NAME_HASH_SHIFT
3078 #undef VALUE_HASH_SHIFT
3079
3080 #define BLOCK_HASH_SHIFT 16
3081
3082 /*
3083 * ext4_xattr_rehash()
3084 *
3085 * Re-compute the extended attribute hash value after an entry has changed.
3086 */
3087 static void ext4_xattr_rehash(struct ext4_xattr_header *header)
3088 {
3089 struct ext4_xattr_entry *here;
3090 __u32 hash = 0;
3091
3092 here = ENTRY(header+1);
3093 while (!IS_LAST_ENTRY(here)) {
3094 if (!here->e_hash) {
3095 /* Block is not shared if an entry's hash value == 0 */
3096 hash = 0;
3097 break;
3098 }
3099 hash = (hash << BLOCK_HASH_SHIFT) ^
3100 (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
3101 le32_to_cpu(here->e_hash);
3102 here = EXT4_XATTR_NEXT(here);
3103 }
3104 header->h_hash = cpu_to_le32(hash);
3105 }
3106
3107 #undef BLOCK_HASH_SHIFT
3108
3109 #define HASH_BUCKET_BITS 10
3110
3111 struct mb_cache *
3112 ext4_xattr_create_cache(void)
3113 {
3114 return mb_cache_create(HASH_BUCKET_BITS);
3115 }
3116
3117 void ext4_xattr_destroy_cache(struct mb_cache *cache)
3118 {
3119 if (cache)
3120 mb_cache_destroy(cache);
3121 }
3122