]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - fs/gfs2/xattr.c
Merge branch 'for-5.13/warnings' into for-linus
[mirror_ubuntu-jammy-kernel.git] / fs / gfs2 / xattr.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/slab.h>
8 #include <linux/spinlock.h>
9 #include <linux/completion.h>
10 #include <linux/buffer_head.h>
11 #include <linux/xattr.h>
12 #include <linux/gfs2_ondisk.h>
13 #include <linux/posix_acl_xattr.h>
14 #include <linux/uaccess.h>
15
16 #include "gfs2.h"
17 #include "incore.h"
18 #include "acl.h"
19 #include "xattr.h"
20 #include "glock.h"
21 #include "inode.h"
22 #include "meta_io.h"
23 #include "quota.h"
24 #include "rgrp.h"
25 #include "super.h"
26 #include "trans.h"
27 #include "util.h"
28
29 /**
30 * ea_calc_size - returns the acutal number of bytes the request will take up
31 * (not counting any unstuffed data blocks)
32 * @sdp:
33 * @er:
34 * @size:
35 *
36 * Returns: 1 if the EA should be stuffed
37 */
38
39 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
40 unsigned int *size)
41 {
42 unsigned int jbsize = sdp->sd_jbsize;
43
44 /* Stuffed */
45 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
46
47 if (*size <= jbsize)
48 return 1;
49
50 /* Unstuffed */
51 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
52 (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
53
54 return 0;
55 }
56
57 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
58 {
59 unsigned int size;
60
61 if (dsize > GFS2_EA_MAX_DATA_LEN)
62 return -ERANGE;
63
64 ea_calc_size(sdp, nsize, dsize, &size);
65
66 /* This can only happen with 512 byte blocks */
67 if (size > sdp->sd_jbsize)
68 return -ERANGE;
69
70 return 0;
71 }
72
73 static bool gfs2_eatype_valid(struct gfs2_sbd *sdp, u8 type)
74 {
75 switch(sdp->sd_sb.sb_fs_format) {
76 case GFS2_FS_FORMAT_MAX:
77 return true;
78
79 case GFS2_FS_FORMAT_MIN:
80 return type <= GFS2_EATYPE_SECURITY;
81
82 default:
83 return false;
84 }
85 }
86
87 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
88 struct gfs2_ea_header *ea,
89 struct gfs2_ea_header *prev, void *private);
90
91 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
92 ea_call_t ea_call, void *data)
93 {
94 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
95 struct gfs2_ea_header *ea, *prev = NULL;
96 int error = 0;
97
98 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
99 return -EIO;
100
101 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
102 if (!GFS2_EA_REC_LEN(ea))
103 goto fail;
104 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
105 bh->b_data + bh->b_size))
106 goto fail;
107 if (!gfs2_eatype_valid(sdp, ea->ea_type))
108 goto fail;
109 error = ea_call(ip, bh, ea, prev, data);
110 if (error)
111 return error;
112
113 if (GFS2_EA_IS_LAST(ea)) {
114 if ((char *)GFS2_EA2NEXT(ea) !=
115 bh->b_data + bh->b_size)
116 goto fail;
117 break;
118 }
119 }
120
121 return error;
122
123 fail:
124 gfs2_consist_inode(ip);
125 return -EIO;
126 }
127
128 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
129 {
130 struct buffer_head *bh, *eabh;
131 __be64 *eablk, *end;
132 int error;
133
134 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh);
135 if (error)
136 return error;
137
138 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
139 error = ea_foreach_i(ip, bh, ea_call, data);
140 goto out;
141 }
142
143 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
144 error = -EIO;
145 goto out;
146 }
147
148 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
149 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
150
151 for (; eablk < end; eablk++) {
152 u64 bn;
153
154 if (!*eablk)
155 break;
156 bn = be64_to_cpu(*eablk);
157
158 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh);
159 if (error)
160 break;
161 error = ea_foreach_i(ip, eabh, ea_call, data);
162 brelse(eabh);
163 if (error)
164 break;
165 }
166 out:
167 brelse(bh);
168 return error;
169 }
170
171 struct ea_find {
172 int type;
173 const char *name;
174 size_t namel;
175 struct gfs2_ea_location *ef_el;
176 };
177
178 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
179 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
180 void *private)
181 {
182 struct ea_find *ef = private;
183
184 if (ea->ea_type == GFS2_EATYPE_UNUSED)
185 return 0;
186
187 if (ea->ea_type == ef->type) {
188 if (ea->ea_name_len == ef->namel &&
189 !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
190 struct gfs2_ea_location *el = ef->ef_el;
191 get_bh(bh);
192 el->el_bh = bh;
193 el->el_ea = ea;
194 el->el_prev = prev;
195 return 1;
196 }
197 }
198
199 return 0;
200 }
201
202 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
203 struct gfs2_ea_location *el)
204 {
205 struct ea_find ef;
206 int error;
207
208 ef.type = type;
209 ef.name = name;
210 ef.namel = strlen(name);
211 ef.ef_el = el;
212
213 memset(el, 0, sizeof(struct gfs2_ea_location));
214
215 error = ea_foreach(ip, ea_find_i, &ef);
216 if (error > 0)
217 return 0;
218
219 return error;
220 }
221
222 /**
223 * ea_dealloc_unstuffed -
224 * @ip:
225 * @bh:
226 * @ea:
227 * @prev:
228 * @private:
229 *
230 * Take advantage of the fact that all unstuffed blocks are
231 * allocated from the same RG. But watch, this may not always
232 * be true.
233 *
234 * Returns: errno
235 */
236
237 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
238 struct gfs2_ea_header *ea,
239 struct gfs2_ea_header *prev, void *private)
240 {
241 int *leave = private;
242 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
243 struct gfs2_rgrpd *rgd;
244 struct gfs2_holder rg_gh;
245 __be64 *dataptrs;
246 u64 bn = 0;
247 u64 bstart = 0;
248 unsigned int blen = 0;
249 unsigned int blks = 0;
250 unsigned int x;
251 int error;
252
253 error = gfs2_rindex_update(sdp);
254 if (error)
255 return error;
256
257 if (GFS2_EA_IS_STUFFED(ea))
258 return 0;
259
260 dataptrs = GFS2_EA2DATAPTRS(ea);
261 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
262 if (*dataptrs) {
263 blks++;
264 bn = be64_to_cpu(*dataptrs);
265 }
266 }
267 if (!blks)
268 return 0;
269
270 rgd = gfs2_blk2rgrpd(sdp, bn, 1);
271 if (!rgd) {
272 gfs2_consist_inode(ip);
273 return -EIO;
274 }
275
276 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
277 LM_FLAG_NODE_SCOPE, &rg_gh);
278 if (error)
279 return error;
280
281 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
282 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
283 if (error)
284 goto out_gunlock;
285
286 gfs2_trans_add_meta(ip->i_gl, bh);
287
288 dataptrs = GFS2_EA2DATAPTRS(ea);
289 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
290 if (!*dataptrs)
291 break;
292 bn = be64_to_cpu(*dataptrs);
293
294 if (bstart + blen == bn)
295 blen++;
296 else {
297 if (bstart)
298 gfs2_free_meta(ip, rgd, bstart, blen);
299 bstart = bn;
300 blen = 1;
301 }
302
303 *dataptrs = 0;
304 gfs2_add_inode_blocks(&ip->i_inode, -1);
305 }
306 if (bstart)
307 gfs2_free_meta(ip, rgd, bstart, blen);
308
309 if (prev && !leave) {
310 u32 len;
311
312 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
313 prev->ea_rec_len = cpu_to_be32(len);
314
315 if (GFS2_EA_IS_LAST(ea))
316 prev->ea_flags |= GFS2_EAFLAG_LAST;
317 } else {
318 ea->ea_type = GFS2_EATYPE_UNUSED;
319 ea->ea_num_ptrs = 0;
320 }
321
322 ip->i_inode.i_ctime = current_time(&ip->i_inode);
323 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
324
325 gfs2_trans_end(sdp);
326
327 out_gunlock:
328 gfs2_glock_dq_uninit(&rg_gh);
329 return error;
330 }
331
332 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
333 struct gfs2_ea_header *ea,
334 struct gfs2_ea_header *prev, int leave)
335 {
336 int error;
337
338 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
339 if (error)
340 return error;
341
342 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
343 if (error)
344 goto out_alloc;
345
346 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
347
348 gfs2_quota_unhold(ip);
349 out_alloc:
350 return error;
351 }
352
353 struct ea_list {
354 struct gfs2_ea_request *ei_er;
355 unsigned int ei_size;
356 };
357
358 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
359 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
360 void *private)
361 {
362 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
363 struct ea_list *ei = private;
364 struct gfs2_ea_request *er = ei->ei_er;
365 unsigned int ea_size;
366 char *prefix;
367 unsigned int l;
368
369 if (ea->ea_type == GFS2_EATYPE_UNUSED)
370 return 0;
371
372 BUG_ON(ea->ea_type > GFS2_EATYPE_SECURITY &&
373 sdp->sd_sb.sb_fs_format == GFS2_FS_FORMAT_MIN);
374 switch (ea->ea_type) {
375 case GFS2_EATYPE_USR:
376 prefix = "user.";
377 l = 5;
378 break;
379 case GFS2_EATYPE_SYS:
380 prefix = "system.";
381 l = 7;
382 break;
383 case GFS2_EATYPE_SECURITY:
384 prefix = "security.";
385 l = 9;
386 break;
387 case GFS2_EATYPE_TRUSTED:
388 prefix = "trusted.";
389 l = 8;
390 break;
391 default:
392 return 0;
393 }
394
395 ea_size = l + ea->ea_name_len + 1;
396 if (er->er_data_len) {
397 if (ei->ei_size + ea_size > er->er_data_len)
398 return -ERANGE;
399
400 memcpy(er->er_data + ei->ei_size, prefix, l);
401 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
402 ea->ea_name_len);
403 er->er_data[ei->ei_size + ea_size - 1] = 0;
404 }
405
406 ei->ei_size += ea_size;
407
408 return 0;
409 }
410
411 /**
412 * gfs2_listxattr - List gfs2 extended attributes
413 * @dentry: The dentry whose inode we are interested in
414 * @buffer: The buffer to write the results
415 * @size: The size of the buffer
416 *
417 * Returns: actual size of data on success, -errno on error
418 */
419
420 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
421 {
422 struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
423 struct gfs2_ea_request er;
424 struct gfs2_holder i_gh;
425 int error;
426
427 memset(&er, 0, sizeof(struct gfs2_ea_request));
428 if (size) {
429 er.er_data = buffer;
430 er.er_data_len = size;
431 }
432
433 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
434 if (error)
435 return error;
436
437 if (ip->i_eattr) {
438 struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
439
440 error = ea_foreach(ip, ea_list_i, &ei);
441 if (!error)
442 error = ei.ei_size;
443 }
444
445 gfs2_glock_dq_uninit(&i_gh);
446
447 return error;
448 }
449
450 /**
451 * ea_iter_unstuffed - copies the unstuffed xattr data to/from the
452 * request buffer
453 * @ip: The GFS2 inode
454 * @ea: The extended attribute header structure
455 * @din: The data to be copied in
456 * @dout: The data to be copied out (one of din,dout will be NULL)
457 *
458 * Returns: errno
459 */
460
461 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
462 const char *din, char *dout)
463 {
464 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
465 struct buffer_head **bh;
466 unsigned int amount = GFS2_EA_DATA_LEN(ea);
467 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
468 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
469 unsigned int x;
470 int error = 0;
471 unsigned char *pos;
472 unsigned cp_size;
473
474 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
475 if (!bh)
476 return -ENOMEM;
477
478 for (x = 0; x < nptrs; x++) {
479 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0,
480 bh + x);
481 if (error) {
482 while (x--)
483 brelse(bh[x]);
484 goto out;
485 }
486 dataptrs++;
487 }
488
489 for (x = 0; x < nptrs; x++) {
490 error = gfs2_meta_wait(sdp, bh[x]);
491 if (error) {
492 for (; x < nptrs; x++)
493 brelse(bh[x]);
494 goto out;
495 }
496 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
497 for (; x < nptrs; x++)
498 brelse(bh[x]);
499 error = -EIO;
500 goto out;
501 }
502
503 pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
504 cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
505
506 if (dout) {
507 memcpy(dout, pos, cp_size);
508 dout += sdp->sd_jbsize;
509 }
510
511 if (din) {
512 gfs2_trans_add_meta(ip->i_gl, bh[x]);
513 memcpy(pos, din, cp_size);
514 din += sdp->sd_jbsize;
515 }
516
517 amount -= sdp->sd_jbsize;
518 brelse(bh[x]);
519 }
520
521 out:
522 kfree(bh);
523 return error;
524 }
525
526 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
527 char *data, size_t size)
528 {
529 int ret;
530 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
531 if (len > size)
532 return -ERANGE;
533
534 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
535 memcpy(data, GFS2_EA2DATA(el->el_ea), len);
536 return len;
537 }
538 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
539 if (ret < 0)
540 return ret;
541 return len;
542 }
543
544 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
545 {
546 struct gfs2_ea_location el;
547 int error;
548 int len;
549 char *data;
550
551 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
552 if (error)
553 return error;
554 if (!el.el_ea)
555 goto out;
556 if (!GFS2_EA_DATA_LEN(el.el_ea))
557 goto out;
558
559 len = GFS2_EA_DATA_LEN(el.el_ea);
560 data = kmalloc(len, GFP_NOFS);
561 error = -ENOMEM;
562 if (data == NULL)
563 goto out;
564
565 error = gfs2_ea_get_copy(ip, &el, data, len);
566 if (error < 0)
567 kfree(data);
568 else
569 *ppdata = data;
570 out:
571 brelse(el.el_bh);
572 return error;
573 }
574
575 /**
576 * gfs2_xattr_get - Get a GFS2 extended attribute
577 * @inode: The inode
578 * @name: The name of the extended attribute
579 * @buffer: The buffer to write the result into
580 * @size: The size of the buffer
581 * @type: The type of extended attribute
582 *
583 * Returns: actual size of data on success, -errno on error
584 */
585 static int __gfs2_xattr_get(struct inode *inode, const char *name,
586 void *buffer, size_t size, int type)
587 {
588 struct gfs2_inode *ip = GFS2_I(inode);
589 struct gfs2_ea_location el;
590 int error;
591
592 if (!ip->i_eattr)
593 return -ENODATA;
594 if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
595 return -EINVAL;
596
597 error = gfs2_ea_find(ip, type, name, &el);
598 if (error)
599 return error;
600 if (!el.el_ea)
601 return -ENODATA;
602 if (size)
603 error = gfs2_ea_get_copy(ip, &el, buffer, size);
604 else
605 error = GFS2_EA_DATA_LEN(el.el_ea);
606 brelse(el.el_bh);
607
608 return error;
609 }
610
611 static int gfs2_xattr_get(const struct xattr_handler *handler,
612 struct dentry *unused, struct inode *inode,
613 const char *name, void *buffer, size_t size)
614 {
615 struct gfs2_inode *ip = GFS2_I(inode);
616 struct gfs2_holder gh;
617 int ret;
618
619 /* During lookup, SELinux calls this function with the glock locked. */
620
621 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
622 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
623 if (ret)
624 return ret;
625 } else {
626 gfs2_holder_mark_uninitialized(&gh);
627 }
628 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
629 if (gfs2_holder_initialized(&gh))
630 gfs2_glock_dq_uninit(&gh);
631 return ret;
632 }
633
634 /**
635 * ea_alloc_blk - allocates a new block for extended attributes.
636 * @ip: A pointer to the inode that's getting extended attributes
637 * @bhp: Pointer to pointer to a struct buffer_head
638 *
639 * Returns: errno
640 */
641
642 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
643 {
644 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
645 struct gfs2_ea_header *ea;
646 unsigned int n = 1;
647 u64 block;
648 int error;
649
650 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
651 if (error)
652 return error;
653 gfs2_trans_remove_revoke(sdp, block, 1);
654 *bhp = gfs2_meta_new(ip->i_gl, block);
655 gfs2_trans_add_meta(ip->i_gl, *bhp);
656 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
657 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
658
659 ea = GFS2_EA_BH2FIRST(*bhp);
660 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
661 ea->ea_type = GFS2_EATYPE_UNUSED;
662 ea->ea_flags = GFS2_EAFLAG_LAST;
663 ea->ea_num_ptrs = 0;
664
665 gfs2_add_inode_blocks(&ip->i_inode, 1);
666
667 return 0;
668 }
669
670 /**
671 * ea_write - writes the request info to an ea, creating new blocks if
672 * necessary
673 * @ip: inode that is being modified
674 * @ea: the location of the new ea in a block
675 * @er: the write request
676 *
677 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
678 *
679 * returns : errno
680 */
681
682 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
683 struct gfs2_ea_request *er)
684 {
685 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
686 int error;
687
688 ea->ea_data_len = cpu_to_be32(er->er_data_len);
689 ea->ea_name_len = er->er_name_len;
690 ea->ea_type = er->er_type;
691 ea->__pad = 0;
692
693 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
694
695 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
696 ea->ea_num_ptrs = 0;
697 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
698 } else {
699 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
700 const char *data = er->er_data;
701 unsigned int data_len = er->er_data_len;
702 unsigned int copy;
703 unsigned int x;
704
705 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
706 for (x = 0; x < ea->ea_num_ptrs; x++) {
707 struct buffer_head *bh;
708 u64 block;
709 int mh_size = sizeof(struct gfs2_meta_header);
710 unsigned int n = 1;
711
712 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
713 if (error)
714 return error;
715 gfs2_trans_remove_revoke(sdp, block, 1);
716 bh = gfs2_meta_new(ip->i_gl, block);
717 gfs2_trans_add_meta(ip->i_gl, bh);
718 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
719
720 gfs2_add_inode_blocks(&ip->i_inode, 1);
721
722 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
723 data_len;
724 memcpy(bh->b_data + mh_size, data, copy);
725 if (copy < sdp->sd_jbsize)
726 memset(bh->b_data + mh_size + copy, 0,
727 sdp->sd_jbsize - copy);
728
729 *dataptr++ = cpu_to_be64(bh->b_blocknr);
730 data += copy;
731 data_len -= copy;
732
733 brelse(bh);
734 }
735
736 gfs2_assert_withdraw(sdp, !data_len);
737 }
738
739 return 0;
740 }
741
742 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
743 struct gfs2_ea_request *er, void *private);
744
745 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
746 unsigned int blks,
747 ea_skeleton_call_t skeleton_call, void *private)
748 {
749 struct gfs2_alloc_parms ap = { .target = blks };
750 int error;
751
752 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
753 if (error)
754 return error;
755
756 error = gfs2_quota_lock_check(ip, &ap);
757 if (error)
758 return error;
759
760 error = gfs2_inplace_reserve(ip, &ap);
761 if (error)
762 goto out_gunlock_q;
763
764 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
765 blks + gfs2_rg_blocks(ip, blks) +
766 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
767 if (error)
768 goto out_ipres;
769
770 error = skeleton_call(ip, er, private);
771 if (error)
772 goto out_end_trans;
773
774 ip->i_inode.i_ctime = current_time(&ip->i_inode);
775 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
776
777 out_end_trans:
778 gfs2_trans_end(GFS2_SB(&ip->i_inode));
779 out_ipres:
780 gfs2_inplace_release(ip);
781 out_gunlock_q:
782 gfs2_quota_unlock(ip);
783 return error;
784 }
785
786 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
787 void *private)
788 {
789 struct buffer_head *bh;
790 int error;
791
792 error = ea_alloc_blk(ip, &bh);
793 if (error)
794 return error;
795
796 ip->i_eattr = bh->b_blocknr;
797 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
798
799 brelse(bh);
800
801 return error;
802 }
803
804 /**
805 * ea_init - initializes a new eattr block
806 * @ip:
807 * @er:
808 *
809 * Returns: errno
810 */
811
812 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
813 const void *data, size_t size)
814 {
815 struct gfs2_ea_request er;
816 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
817 unsigned int blks = 1;
818
819 er.er_type = type;
820 er.er_name = name;
821 er.er_name_len = strlen(name);
822 er.er_data = (void *)data;
823 er.er_data_len = size;
824
825 if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
826 blks += DIV_ROUND_UP(er.er_data_len, jbsize);
827
828 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
829 }
830
831 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
832 {
833 u32 ea_size = GFS2_EA_SIZE(ea);
834 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
835 ea_size);
836 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
837 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
838
839 ea->ea_rec_len = cpu_to_be32(ea_size);
840 ea->ea_flags ^= last;
841
842 new->ea_rec_len = cpu_to_be32(new_size);
843 new->ea_flags = last;
844
845 return new;
846 }
847
848 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
849 struct gfs2_ea_location *el)
850 {
851 struct gfs2_ea_header *ea = el->el_ea;
852 struct gfs2_ea_header *prev = el->el_prev;
853 u32 len;
854
855 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
856
857 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
858 ea->ea_type = GFS2_EATYPE_UNUSED;
859 return;
860 } else if (GFS2_EA2NEXT(prev) != ea) {
861 prev = GFS2_EA2NEXT(prev);
862 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
863 }
864
865 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
866 prev->ea_rec_len = cpu_to_be32(len);
867
868 if (GFS2_EA_IS_LAST(ea))
869 prev->ea_flags |= GFS2_EAFLAG_LAST;
870 }
871
872 struct ea_set {
873 int ea_split;
874
875 struct gfs2_ea_request *es_er;
876 struct gfs2_ea_location *es_el;
877
878 struct buffer_head *es_bh;
879 struct gfs2_ea_header *es_ea;
880 };
881
882 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
883 struct gfs2_ea_header *ea, struct ea_set *es)
884 {
885 struct gfs2_ea_request *er = es->es_er;
886 int error;
887
888 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
889 if (error)
890 return error;
891
892 gfs2_trans_add_meta(ip->i_gl, bh);
893
894 if (es->ea_split)
895 ea = ea_split_ea(ea);
896
897 ea_write(ip, ea, er);
898
899 if (es->es_el)
900 ea_set_remove_stuffed(ip, es->es_el);
901
902 ip->i_inode.i_ctime = current_time(&ip->i_inode);
903 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
904
905 gfs2_trans_end(GFS2_SB(&ip->i_inode));
906 return error;
907 }
908
909 static int ea_set_simple_alloc(struct gfs2_inode *ip,
910 struct gfs2_ea_request *er, void *private)
911 {
912 struct ea_set *es = private;
913 struct gfs2_ea_header *ea = es->es_ea;
914 int error;
915
916 gfs2_trans_add_meta(ip->i_gl, es->es_bh);
917
918 if (es->ea_split)
919 ea = ea_split_ea(ea);
920
921 error = ea_write(ip, ea, er);
922 if (error)
923 return error;
924
925 if (es->es_el)
926 ea_set_remove_stuffed(ip, es->es_el);
927
928 return 0;
929 }
930
931 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
932 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
933 void *private)
934 {
935 struct ea_set *es = private;
936 unsigned int size;
937 int stuffed;
938 int error;
939
940 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
941 es->es_er->er_data_len, &size);
942
943 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
944 if (GFS2_EA_REC_LEN(ea) < size)
945 return 0;
946 if (!GFS2_EA_IS_STUFFED(ea)) {
947 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
948 if (error)
949 return error;
950 }
951 es->ea_split = 0;
952 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
953 es->ea_split = 1;
954 else
955 return 0;
956
957 if (stuffed) {
958 error = ea_set_simple_noalloc(ip, bh, ea, es);
959 if (error)
960 return error;
961 } else {
962 unsigned int blks;
963
964 es->es_bh = bh;
965 es->es_ea = ea;
966 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
967 GFS2_SB(&ip->i_inode)->sd_jbsize);
968
969 error = ea_alloc_skeleton(ip, es->es_er, blks,
970 ea_set_simple_alloc, es);
971 if (error)
972 return error;
973 }
974
975 return 1;
976 }
977
978 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
979 void *private)
980 {
981 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
982 struct buffer_head *indbh, *newbh;
983 __be64 *eablk;
984 int error;
985 int mh_size = sizeof(struct gfs2_meta_header);
986
987 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
988 __be64 *end;
989
990 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0,
991 &indbh);
992 if (error)
993 return error;
994
995 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
996 error = -EIO;
997 goto out;
998 }
999
1000 eablk = (__be64 *)(indbh->b_data + mh_size);
1001 end = eablk + sdp->sd_inptrs;
1002
1003 for (; eablk < end; eablk++)
1004 if (!*eablk)
1005 break;
1006
1007 if (eablk == end) {
1008 error = -ENOSPC;
1009 goto out;
1010 }
1011
1012 gfs2_trans_add_meta(ip->i_gl, indbh);
1013 } else {
1014 u64 blk;
1015 unsigned int n = 1;
1016 error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1017 if (error)
1018 return error;
1019 gfs2_trans_remove_revoke(sdp, blk, 1);
1020 indbh = gfs2_meta_new(ip->i_gl, blk);
1021 gfs2_trans_add_meta(ip->i_gl, indbh);
1022 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1023 gfs2_buffer_clear_tail(indbh, mh_size);
1024
1025 eablk = (__be64 *)(indbh->b_data + mh_size);
1026 *eablk = cpu_to_be64(ip->i_eattr);
1027 ip->i_eattr = blk;
1028 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1029 gfs2_add_inode_blocks(&ip->i_inode, 1);
1030
1031 eablk++;
1032 }
1033
1034 error = ea_alloc_blk(ip, &newbh);
1035 if (error)
1036 goto out;
1037
1038 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
1039 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1040 brelse(newbh);
1041 if (error)
1042 goto out;
1043
1044 if (private)
1045 ea_set_remove_stuffed(ip, private);
1046
1047 out:
1048 brelse(indbh);
1049 return error;
1050 }
1051
1052 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1053 const void *value, size_t size, struct gfs2_ea_location *el)
1054 {
1055 struct gfs2_ea_request er;
1056 struct ea_set es;
1057 unsigned int blks = 2;
1058 int error;
1059
1060 er.er_type = type;
1061 er.er_name = name;
1062 er.er_data = (void *)value;
1063 er.er_name_len = strlen(name);
1064 er.er_data_len = size;
1065
1066 memset(&es, 0, sizeof(struct ea_set));
1067 es.es_er = &er;
1068 es.es_el = el;
1069
1070 error = ea_foreach(ip, ea_set_simple, &es);
1071 if (error > 0)
1072 return 0;
1073 if (error)
1074 return error;
1075
1076 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1077 blks++;
1078 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1079 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1080
1081 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1082 }
1083
1084 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1085 struct gfs2_ea_location *el)
1086 {
1087 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1088 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1089 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1090 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1091 }
1092
1093 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1094 }
1095
1096 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1097 {
1098 struct gfs2_ea_header *ea = el->el_ea;
1099 struct gfs2_ea_header *prev = el->el_prev;
1100 int error;
1101
1102 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1103 if (error)
1104 return error;
1105
1106 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1107
1108 if (prev) {
1109 u32 len;
1110
1111 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1112 prev->ea_rec_len = cpu_to_be32(len);
1113
1114 if (GFS2_EA_IS_LAST(ea))
1115 prev->ea_flags |= GFS2_EAFLAG_LAST;
1116 } else {
1117 ea->ea_type = GFS2_EATYPE_UNUSED;
1118 }
1119
1120 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1121 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
1122
1123 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1124
1125 return error;
1126 }
1127
1128 /**
1129 * gfs2_xattr_remove - Remove a GFS2 extended attribute
1130 * @ip: The inode
1131 * @type: The type of the extended attribute
1132 * @name: The name of the extended attribute
1133 *
1134 * This is not called directly by the VFS since we use the (common)
1135 * scheme of making a "set with NULL data" mean a remove request. Note
1136 * that this is different from a set with zero length data.
1137 *
1138 * Returns: 0, or errno on failure
1139 */
1140
1141 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1142 {
1143 struct gfs2_ea_location el;
1144 int error;
1145
1146 if (!ip->i_eattr)
1147 return -ENODATA;
1148
1149 error = gfs2_ea_find(ip, type, name, &el);
1150 if (error)
1151 return error;
1152 if (!el.el_ea)
1153 return -ENODATA;
1154
1155 if (GFS2_EA_IS_STUFFED(el.el_ea))
1156 error = ea_remove_stuffed(ip, &el);
1157 else
1158 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1159
1160 brelse(el.el_bh);
1161
1162 return error;
1163 }
1164
1165 /**
1166 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1167 * @ip: The inode
1168 * @name: The name of the extended attribute
1169 * @value: The value of the extended attribute (NULL for remove)
1170 * @size: The size of the @value argument
1171 * @flags: Create or Replace
1172 * @type: The type of the extended attribute
1173 *
1174 * See gfs2_xattr_remove() for details of the removal of xattrs.
1175 *
1176 * Returns: 0 or errno on failure
1177 */
1178
1179 int __gfs2_xattr_set(struct inode *inode, const char *name,
1180 const void *value, size_t size, int flags, int type)
1181 {
1182 struct gfs2_inode *ip = GFS2_I(inode);
1183 struct gfs2_sbd *sdp = GFS2_SB(inode);
1184 struct gfs2_ea_location el;
1185 unsigned int namel = strlen(name);
1186 int error;
1187
1188 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1189 return -EPERM;
1190 if (namel > GFS2_EA_MAX_NAME_LEN)
1191 return -ERANGE;
1192
1193 if (value == NULL) {
1194 error = gfs2_xattr_remove(ip, type, name);
1195 if (error == -ENODATA && !(flags & XATTR_REPLACE))
1196 error = 0;
1197 return error;
1198 }
1199
1200 if (ea_check_size(sdp, namel, size))
1201 return -ERANGE;
1202
1203 if (!ip->i_eattr) {
1204 if (flags & XATTR_REPLACE)
1205 return -ENODATA;
1206 return ea_init(ip, type, name, value, size);
1207 }
1208
1209 error = gfs2_ea_find(ip, type, name, &el);
1210 if (error)
1211 return error;
1212
1213 if (el.el_ea) {
1214 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1215 brelse(el.el_bh);
1216 return -EPERM;
1217 }
1218
1219 error = -EEXIST;
1220 if (!(flags & XATTR_CREATE)) {
1221 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1222 error = ea_set_i(ip, type, name, value, size, &el);
1223 if (!error && unstuffed)
1224 ea_set_remove_unstuffed(ip, &el);
1225 }
1226
1227 brelse(el.el_bh);
1228 return error;
1229 }
1230
1231 error = -ENODATA;
1232 if (!(flags & XATTR_REPLACE))
1233 error = ea_set_i(ip, type, name, value, size, NULL);
1234
1235 return error;
1236 }
1237
1238 static int gfs2_xattr_set(const struct xattr_handler *handler,
1239 struct user_namespace *mnt_userns,
1240 struct dentry *unused, struct inode *inode,
1241 const char *name, const void *value,
1242 size_t size, int flags)
1243 {
1244 struct gfs2_inode *ip = GFS2_I(inode);
1245 struct gfs2_holder gh;
1246 int ret;
1247
1248 ret = gfs2_qa_get(ip);
1249 if (ret)
1250 return ret;
1251
1252 /* May be called from gfs_setattr with the glock locked. */
1253
1254 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1255 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1256 if (ret)
1257 goto out;
1258 } else {
1259 if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE)) {
1260 ret = -EIO;
1261 goto out;
1262 }
1263 gfs2_holder_mark_uninitialized(&gh);
1264 }
1265 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
1266 if (gfs2_holder_initialized(&gh))
1267 gfs2_glock_dq_uninit(&gh);
1268 out:
1269 gfs2_qa_put(ip);
1270 return ret;
1271 }
1272
1273 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1274 {
1275 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1276 struct gfs2_rgrp_list rlist;
1277 struct gfs2_rgrpd *rgd;
1278 struct buffer_head *indbh, *dibh;
1279 __be64 *eablk, *end;
1280 unsigned int rg_blocks = 0;
1281 u64 bstart = 0;
1282 unsigned int blen = 0;
1283 unsigned int blks = 0;
1284 unsigned int x;
1285 int error;
1286
1287 error = gfs2_rindex_update(sdp);
1288 if (error)
1289 return error;
1290
1291 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1292
1293 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh);
1294 if (error)
1295 return error;
1296
1297 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1298 error = -EIO;
1299 goto out;
1300 }
1301
1302 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1303 end = eablk + sdp->sd_inptrs;
1304
1305 for (; eablk < end; eablk++) {
1306 u64 bn;
1307
1308 if (!*eablk)
1309 break;
1310 bn = be64_to_cpu(*eablk);
1311
1312 if (bstart + blen == bn)
1313 blen++;
1314 else {
1315 if (bstart)
1316 gfs2_rlist_add(ip, &rlist, bstart);
1317 bstart = bn;
1318 blen = 1;
1319 }
1320 blks++;
1321 }
1322 if (bstart)
1323 gfs2_rlist_add(ip, &rlist, bstart);
1324 else
1325 goto out;
1326
1327 gfs2_rlist_alloc(&rlist);
1328
1329 for (x = 0; x < rlist.rl_rgrps; x++) {
1330 rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1331 rg_blocks += rgd->rd_length;
1332 }
1333
1334 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1335 if (error)
1336 goto out_rlist_free;
1337
1338 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1339 RES_STATFS + RES_QUOTA, blks);
1340 if (error)
1341 goto out_gunlock;
1342
1343 gfs2_trans_add_meta(ip->i_gl, indbh);
1344
1345 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1346 bstart = 0;
1347 rgd = NULL;
1348 blen = 0;
1349
1350 for (; eablk < end; eablk++) {
1351 u64 bn;
1352
1353 if (!*eablk)
1354 break;
1355 bn = be64_to_cpu(*eablk);
1356
1357 if (bstart + blen == bn)
1358 blen++;
1359 else {
1360 if (bstart)
1361 gfs2_free_meta(ip, rgd, bstart, blen);
1362 bstart = bn;
1363 rgd = gfs2_blk2rgrpd(sdp, bstart, true);
1364 blen = 1;
1365 }
1366
1367 *eablk = 0;
1368 gfs2_add_inode_blocks(&ip->i_inode, -1);
1369 }
1370 if (bstart)
1371 gfs2_free_meta(ip, rgd, bstart, blen);
1372
1373 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1374
1375 error = gfs2_meta_inode_buffer(ip, &dibh);
1376 if (!error) {
1377 gfs2_trans_add_meta(ip->i_gl, dibh);
1378 gfs2_dinode_out(ip, dibh->b_data);
1379 brelse(dibh);
1380 }
1381
1382 gfs2_trans_end(sdp);
1383
1384 out_gunlock:
1385 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1386 out_rlist_free:
1387 gfs2_rlist_free(&rlist);
1388 out:
1389 brelse(indbh);
1390 return error;
1391 }
1392
1393 static int ea_dealloc_block(struct gfs2_inode *ip)
1394 {
1395 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1396 struct gfs2_rgrpd *rgd;
1397 struct buffer_head *dibh;
1398 struct gfs2_holder gh;
1399 int error;
1400
1401 error = gfs2_rindex_update(sdp);
1402 if (error)
1403 return error;
1404
1405 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1406 if (!rgd) {
1407 gfs2_consist_inode(ip);
1408 return -EIO;
1409 }
1410
1411 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1412 LM_FLAG_NODE_SCOPE, &gh);
1413 if (error)
1414 return error;
1415
1416 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1417 RES_QUOTA, 1);
1418 if (error)
1419 goto out_gunlock;
1420
1421 gfs2_free_meta(ip, rgd, ip->i_eattr, 1);
1422
1423 ip->i_eattr = 0;
1424 gfs2_add_inode_blocks(&ip->i_inode, -1);
1425
1426 error = gfs2_meta_inode_buffer(ip, &dibh);
1427 if (!error) {
1428 gfs2_trans_add_meta(ip->i_gl, dibh);
1429 gfs2_dinode_out(ip, dibh->b_data);
1430 brelse(dibh);
1431 }
1432
1433 gfs2_trans_end(sdp);
1434
1435 out_gunlock:
1436 gfs2_glock_dq_uninit(&gh);
1437 return error;
1438 }
1439
1440 /**
1441 * gfs2_ea_dealloc - deallocate the extended attribute fork
1442 * @ip: the inode
1443 *
1444 * Returns: errno
1445 */
1446
1447 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1448 {
1449 int error;
1450
1451 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1452 if (error)
1453 return error;
1454
1455 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1456 if (error)
1457 return error;
1458
1459 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1460 if (error)
1461 goto out_quota;
1462
1463 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1464 error = ea_dealloc_indirect(ip);
1465 if (error)
1466 goto out_quota;
1467 }
1468
1469 error = ea_dealloc_block(ip);
1470
1471 out_quota:
1472 gfs2_quota_unhold(ip);
1473 return error;
1474 }
1475
1476 static const struct xattr_handler gfs2_xattr_user_handler = {
1477 .prefix = XATTR_USER_PREFIX,
1478 .flags = GFS2_EATYPE_USR,
1479 .get = gfs2_xattr_get,
1480 .set = gfs2_xattr_set,
1481 };
1482
1483 static const struct xattr_handler gfs2_xattr_security_handler = {
1484 .prefix = XATTR_SECURITY_PREFIX,
1485 .flags = GFS2_EATYPE_SECURITY,
1486 .get = gfs2_xattr_get,
1487 .set = gfs2_xattr_set,
1488 };
1489
1490 static bool
1491 gfs2_xattr_trusted_list(struct dentry *dentry)
1492 {
1493 return capable(CAP_SYS_ADMIN);
1494 }
1495
1496 static const struct xattr_handler gfs2_xattr_trusted_handler = {
1497 .prefix = XATTR_TRUSTED_PREFIX,
1498 .flags = GFS2_EATYPE_TRUSTED,
1499 .list = gfs2_xattr_trusted_list,
1500 .get = gfs2_xattr_get,
1501 .set = gfs2_xattr_set,
1502 };
1503
1504 const struct xattr_handler *gfs2_xattr_handlers_max[] = {
1505 /* GFS2_FS_FORMAT_MAX */
1506 &gfs2_xattr_trusted_handler,
1507
1508 /* GFS2_FS_FORMAT_MIN */
1509 &gfs2_xattr_user_handler,
1510 &gfs2_xattr_security_handler,
1511 &posix_acl_access_xattr_handler,
1512 &posix_acl_default_xattr_handler,
1513 NULL,
1514 };
1515
1516 const struct xattr_handler **gfs2_xattr_handlers_min = gfs2_xattr_handlers_max + 1;