2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License v.2.
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/xattr.h>
16 #include <asm/semaphore.h>
17 #include <asm/uaccess.h>
31 * ea_calc_size - returns the acutal number of bytes the request will take up
32 * (not counting any unstuffed data blocks)
37 * Returns: 1 if the EA should be stuffed
40 static int ea_calc_size(struct gfs2_sbd
*sdp
, struct gfs2_ea_request
*er
,
43 *size
= GFS2_EAREQ_SIZE_STUFFED(er
);
44 if (*size
<= sdp
->sd_jbsize
)
47 *size
= GFS2_EAREQ_SIZE_UNSTUFFED(sdp
, er
);
52 static int ea_check_size(struct gfs2_sbd
*sdp
, struct gfs2_ea_request
*er
)
56 if (er
->er_data_len
> GFS2_EA_MAX_DATA_LEN
)
59 ea_calc_size(sdp
, er
, &size
);
61 /* This can only happen with 512 byte blocks */
62 if (size
> sdp
->sd_jbsize
)
68 typedef int (*ea_call_t
) (struct gfs2_inode
*ip
,
69 struct buffer_head
*bh
,
70 struct gfs2_ea_header
*ea
,
71 struct gfs2_ea_header
*prev
,
74 static int ea_foreach_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
75 ea_call_t ea_call
, void *data
)
77 struct gfs2_ea_header
*ea
, *prev
= NULL
;
80 if (gfs2_metatype_check(ip
->i_sbd
, bh
, GFS2_METATYPE_EA
))
83 for (ea
= GFS2_EA_BH2FIRST(bh
);; prev
= ea
, ea
= GFS2_EA2NEXT(ea
)) {
84 if (!GFS2_EA_REC_LEN(ea
))
86 if (!(bh
->b_data
<= (char *)ea
&&
87 (char *)GFS2_EA2NEXT(ea
) <=
88 bh
->b_data
+ bh
->b_size
))
90 if (!GFS2_EATYPE_VALID(ea
->ea_type
))
93 error
= ea_call(ip
, bh
, ea
, prev
, data
);
97 if (GFS2_EA_IS_LAST(ea
)) {
98 if ((char *)GFS2_EA2NEXT(ea
) !=
99 bh
->b_data
+ bh
->b_size
)
108 gfs2_consist_inode(ip
);
112 static int ea_foreach(struct gfs2_inode
*ip
, ea_call_t ea_call
, void *data
)
114 struct buffer_head
*bh
, *eabh
;
115 uint64_t *eablk
, *end
;
118 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_di
.di_eattr
,
119 DIO_START
| DIO_WAIT
, &bh
);
123 if (!(ip
->i_di
.di_flags
& GFS2_DIF_EA_INDIRECT
)) {
124 error
= ea_foreach_i(ip
, bh
, ea_call
, data
);
128 if (gfs2_metatype_check(ip
->i_sbd
, bh
, GFS2_METATYPE_IN
)) {
133 eablk
= (uint64_t *)(bh
->b_data
+ sizeof(struct gfs2_meta_header
));
134 end
= eablk
+ ip
->i_sbd
->sd_inptrs
;
136 for (; eablk
< end
; eablk
++) {
141 bn
= be64_to_cpu(*eablk
);
143 error
= gfs2_meta_read(ip
->i_gl
, bn
, DIO_START
| DIO_WAIT
,
147 error
= ea_foreach_i(ip
, eabh
, ea_call
, data
);
159 struct gfs2_ea_request
*ef_er
;
160 struct gfs2_ea_location
*ef_el
;
163 static int ea_find_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
164 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
167 struct ea_find
*ef
= private;
168 struct gfs2_ea_request
*er
= ef
->ef_er
;
170 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
173 if (ea
->ea_type
== er
->er_type
) {
174 if (ea
->ea_name_len
== er
->er_name_len
&&
175 !memcmp(GFS2_EA2NAME(ea
), er
->er_name
, ea
->ea_name_len
)) {
176 struct gfs2_ea_location
*el
= ef
->ef_el
;
186 else if ((ip
->i_di
.di_flags
& GFS2_DIF_EA_PACKED
) &&
187 er
->er_type
== GFS2_EATYPE_SYS
)
194 int gfs2_ea_find(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
195 struct gfs2_ea_location
*el
)
203 memset(el
, 0, sizeof(struct gfs2_ea_location
));
205 error
= ea_foreach(ip
, ea_find_i
, &ef
);
213 * ea_dealloc_unstuffed -
220 * Take advantage of the fact that all unstuffed blocks are
221 * allocated from the same RG. But watch, this may not always
227 static int ea_dealloc_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
228 struct gfs2_ea_header
*ea
,
229 struct gfs2_ea_header
*prev
, void *private)
231 int *leave
= private;
232 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
233 struct gfs2_rgrpd
*rgd
;
234 struct gfs2_holder rg_gh
;
235 struct buffer_head
*dibh
;
236 uint64_t *dataptrs
, bn
= 0;
238 unsigned int blen
= 0;
239 unsigned int blks
= 0;
243 if (GFS2_EA_IS_STUFFED(ea
))
246 dataptrs
= GFS2_EA2DATAPTRS(ea
);
247 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++)
250 bn
= be64_to_cpu(*dataptrs
);
255 rgd
= gfs2_blk2rgrpd(sdp
, bn
);
257 gfs2_consist_inode(ip
);
261 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, 0, &rg_gh
);
265 error
= gfs2_trans_begin(sdp
, rgd
->rd_ri
.ri_length
+
266 RES_DINODE
+ RES_EATTR
+ RES_STATFS
+
271 gfs2_trans_add_bh(ip
->i_gl
, bh
);
273 dataptrs
= GFS2_EA2DATAPTRS(ea
);
274 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++, dataptrs
++) {
277 bn
= be64_to_cpu(*dataptrs
);
279 if (bstart
+ blen
== bn
)
283 gfs2_free_meta(ip
, bstart
, blen
);
289 if (!ip
->i_di
.di_blocks
)
290 gfs2_consist_inode(ip
);
291 ip
->i_di
.di_blocks
--;
294 gfs2_free_meta(ip
, bstart
, blen
);
296 if (prev
&& !leave
) {
299 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
300 prev
->ea_rec_len
= cpu_to_be32(len
);
302 if (GFS2_EA_IS_LAST(ea
))
303 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
305 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
309 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
311 ip
->i_di
.di_ctime
= get_seconds();
312 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
313 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
320 gfs2_glock_dq_uninit(&rg_gh
);
325 static int ea_remove_unstuffed(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
326 struct gfs2_ea_header
*ea
,
327 struct gfs2_ea_header
*prev
, int leave
)
329 struct gfs2_alloc
*al
;
332 al
= gfs2_alloc_get(ip
);
334 error
= gfs2_quota_hold(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
338 error
= gfs2_rindex_hold(ip
->i_sbd
, &al
->al_ri_gh
);
342 error
= ea_dealloc_unstuffed(ip
,
344 (leave
) ? &error
: NULL
);
346 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
349 gfs2_quota_unhold(ip
);
357 /******************************************************************************/
359 static int gfs2_ea_repack_i(struct gfs2_inode
*ip
)
364 int gfs2_ea_repack(struct gfs2_inode
*ip
)
366 struct gfs2_holder gh
;
369 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &gh
);
373 /* Some sort of permissions checking would be nice */
375 error
= gfs2_ea_repack_i(ip
);
377 gfs2_glock_dq_uninit(&gh
);
383 struct gfs2_ea_request
*ei_er
;
384 unsigned int ei_size
;
387 static int ea_list_i(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
388 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
391 struct ea_list
*ei
= private;
392 struct gfs2_ea_request
*er
= ei
->ei_er
;
393 unsigned int ea_size
= GFS2_EA_STRLEN(ea
);
395 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
)
398 if (er
->er_data_len
) {
403 if (ei
->ei_size
+ ea_size
> er
->er_data_len
)
406 if (ea
->ea_type
== GFS2_EATYPE_USR
) {
414 memcpy(er
->er_data
+ ei
->ei_size
,
416 memcpy(er
->er_data
+ ei
->ei_size
+ l
,
419 memcpy(er
->er_data
+ ei
->ei_size
+
424 ei
->ei_size
+= ea_size
;
434 * Returns: actual size of data on success, -errno on error
437 int gfs2_ea_list(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
439 struct gfs2_holder i_gh
;
442 if (!er
->er_data
|| !er
->er_data_len
) {
447 error
= gfs2_glock_nq_init(ip
->i_gl
,
448 LM_ST_SHARED
, LM_FLAG_ANY
,
453 if (ip
->i_di
.di_eattr
) {
454 struct ea_list ei
= { .ei_er
= er
, .ei_size
= 0 };
456 error
= ea_foreach(ip
, ea_list_i
, &ei
);
461 gfs2_glock_dq_uninit(&i_gh
);
467 * ea_get_unstuffed - actually copies the unstuffed data into the
476 static int ea_get_unstuffed(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
479 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
480 struct buffer_head
**bh
;
481 unsigned int amount
= GFS2_EA_DATA_LEN(ea
);
482 unsigned int nptrs
= DIV_RU(amount
, sdp
->sd_jbsize
);
483 uint64_t *dataptrs
= GFS2_EA2DATAPTRS(ea
);
487 bh
= kcalloc(nptrs
, sizeof(struct buffer_head
*), GFP_KERNEL
);
491 for (x
= 0; x
< nptrs
; x
++) {
492 error
= gfs2_meta_read(ip
->i_gl
, be64_to_cpu(*dataptrs
),
502 for (x
= 0; x
< nptrs
; x
++) {
503 error
= gfs2_meta_reread(sdp
, bh
[x
], DIO_WAIT
);
505 for (; x
< nptrs
; x
++)
509 if (gfs2_metatype_check(sdp
, bh
[x
], GFS2_METATYPE_ED
)) {
510 for (; x
< nptrs
; x
++)
517 bh
[x
]->b_data
+ sizeof(struct gfs2_meta_header
),
518 (sdp
->sd_jbsize
> amount
) ? amount
: sdp
->sd_jbsize
);
520 amount
-= sdp
->sd_jbsize
;
521 data
+= sdp
->sd_jbsize
;
532 int gfs2_ea_get_copy(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
,
535 if (GFS2_EA_IS_STUFFED(el
->el_ea
)) {
537 GFS2_EA2DATA(el
->el_ea
),
538 GFS2_EA_DATA_LEN(el
->el_ea
));
541 return ea_get_unstuffed(ip
, el
->el_ea
, data
);
549 * Returns: actual size of data on success, -errno on error
552 int gfs2_ea_get_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
554 struct gfs2_ea_location el
;
557 if (!ip
->i_di
.di_eattr
)
560 error
= gfs2_ea_find(ip
, er
, &el
);
566 if (er
->er_data_len
) {
567 if (GFS2_EA_DATA_LEN(el
.el_ea
) > er
->er_data_len
)
570 error
= gfs2_ea_get_copy(ip
, &el
, er
->er_data
);
573 error
= GFS2_EA_DATA_LEN(el
.el_ea
);
585 * Returns: actual size of data on success, -errno on error
588 int gfs2_ea_get(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
590 struct gfs2_holder i_gh
;
593 if (!er
->er_name_len
||
594 er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
596 if (!er
->er_data
|| !er
->er_data_len
) {
601 error
= gfs2_glock_nq_init(ip
->i_gl
,
602 LM_ST_SHARED
, LM_FLAG_ANY
,
607 error
= gfs2_ea_ops
[er
->er_type
]->eo_get(ip
, er
);
609 gfs2_glock_dq_uninit(&i_gh
);
615 * ea_alloc_blk - allocates a new block for extended attributes.
616 * @ip: A pointer to the inode that's getting extended attributes
622 static int ea_alloc_blk(struct gfs2_inode
*ip
, struct buffer_head
**bhp
)
624 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
625 struct gfs2_ea_header
*ea
;
628 block
= gfs2_alloc_meta(ip
);
630 *bhp
= gfs2_meta_new(ip
->i_gl
, block
);
631 gfs2_trans_add_bh(ip
->i_gl
, *bhp
);
632 gfs2_metatype_set(*bhp
, GFS2_METATYPE_EA
, GFS2_FORMAT_EA
);
633 gfs2_buffer_clear_tail(*bhp
, sizeof(struct gfs2_meta_header
));
635 ea
= GFS2_EA_BH2FIRST(*bhp
);
636 ea
->ea_rec_len
= cpu_to_be32(sdp
->sd_jbsize
);
637 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
638 ea
->ea_flags
= GFS2_EAFLAG_LAST
;
641 ip
->i_di
.di_blocks
++;
647 * ea_write - writes the request info to an ea, creating new blocks if
649 * @ip: inode that is being modified
650 * @ea: the location of the new ea in a block
651 * @er: the write request
653 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
658 static int ea_write(struct gfs2_inode
*ip
, struct gfs2_ea_header
*ea
,
659 struct gfs2_ea_request
*er
)
661 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
663 ea
->ea_data_len
= cpu_to_be32(er
->er_data_len
);
664 ea
->ea_name_len
= er
->er_name_len
;
665 ea
->ea_type
= er
->er_type
;
668 memcpy(GFS2_EA2NAME(ea
), er
->er_name
, er
->er_name_len
);
670 if (GFS2_EAREQ_SIZE_STUFFED(er
) <= sdp
->sd_jbsize
) {
672 memcpy(GFS2_EA2DATA(ea
), er
->er_data
, er
->er_data_len
);
674 uint64_t *dataptr
= GFS2_EA2DATAPTRS(ea
);
675 const char *data
= er
->er_data
;
676 unsigned int data_len
= er
->er_data_len
;
680 ea
->ea_num_ptrs
= DIV_RU(er
->er_data_len
, sdp
->sd_jbsize
);
681 for (x
= 0; x
< ea
->ea_num_ptrs
; x
++) {
682 struct buffer_head
*bh
;
684 int mh_size
= sizeof(struct gfs2_meta_header
);
686 block
= gfs2_alloc_meta(ip
);
688 bh
= gfs2_meta_new(ip
->i_gl
, block
);
689 gfs2_trans_add_bh(ip
->i_gl
, bh
);
690 gfs2_metatype_set(bh
, GFS2_METATYPE_ED
, GFS2_FORMAT_ED
);
692 ip
->i_di
.di_blocks
++;
694 copy
= (data_len
> sdp
->sd_jbsize
) ? sdp
->sd_jbsize
:
696 memcpy(bh
->b_data
+ mh_size
, data
, copy
);
697 if (copy
< sdp
->sd_jbsize
)
698 memset(bh
->b_data
+ mh_size
+ copy
, 0,
699 sdp
->sd_jbsize
- copy
);
701 *dataptr
++ = cpu_to_be64((uint64_t)bh
->b_blocknr
);
708 gfs2_assert_withdraw(sdp
, !data_len
);
714 typedef int (*ea_skeleton_call_t
) (struct gfs2_inode
*ip
,
715 struct gfs2_ea_request
*er
,
718 static int ea_alloc_skeleton(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
720 ea_skeleton_call_t skeleton_call
,
723 struct gfs2_alloc
*al
;
724 struct buffer_head
*dibh
;
727 al
= gfs2_alloc_get(ip
);
729 error
= gfs2_quota_lock(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
733 error
= gfs2_quota_check(ip
, ip
->i_di
.di_uid
, ip
->i_di
.di_gid
);
737 al
->al_requested
= blks
;
739 error
= gfs2_inplace_reserve(ip
);
743 error
= gfs2_trans_begin(ip
->i_sbd
,
744 blks
+ al
->al_rgd
->rd_ri
.ri_length
+
745 RES_DINODE
+ RES_STATFS
+ RES_QUOTA
, 0);
749 error
= skeleton_call(ip
, er
, private);
753 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
755 if (er
->er_flags
& GFS2_ERF_MODE
) {
756 gfs2_assert_withdraw(ip
->i_sbd
,
757 (ip
->i_di
.di_mode
& S_IFMT
) ==
758 (er
->er_mode
& S_IFMT
));
759 ip
->i_di
.di_mode
= er
->er_mode
;
761 ip
->i_di
.di_ctime
= get_seconds();
762 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
763 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
768 gfs2_trans_end(ip
->i_sbd
);
771 gfs2_inplace_release(ip
);
774 gfs2_quota_unlock(ip
);
782 static int ea_init_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
785 struct buffer_head
*bh
;
788 error
= ea_alloc_blk(ip
, &bh
);
792 ip
->i_di
.di_eattr
= bh
->b_blocknr
;
793 error
= ea_write(ip
, GFS2_EA_BH2FIRST(bh
), er
);
801 * ea_init - initializes a new eattr block
808 static int ea_init(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
810 unsigned int jbsize
= ip
->i_sbd
->sd_jbsize
;
811 unsigned int blks
= 1;
813 if (GFS2_EAREQ_SIZE_STUFFED(er
) > jbsize
)
814 blks
+= DIV_RU(er
->er_data_len
, jbsize
);
816 return ea_alloc_skeleton(ip
, er
, blks
, ea_init_i
, NULL
);
819 static struct gfs2_ea_header
*ea_split_ea(struct gfs2_ea_header
*ea
)
821 uint32_t ea_size
= GFS2_EA_SIZE(ea
);
822 struct gfs2_ea_header
*new = (struct gfs2_ea_header
*)((char *)ea
+ ea_size
);
823 uint32_t new_size
= GFS2_EA_REC_LEN(ea
) - ea_size
;
824 int last
= ea
->ea_flags
& GFS2_EAFLAG_LAST
;
826 ea
->ea_rec_len
= cpu_to_be32(ea_size
);
827 ea
->ea_flags
^= last
;
829 new->ea_rec_len
= cpu_to_be32(new_size
);
830 new->ea_flags
= last
;
835 static void ea_set_remove_stuffed(struct gfs2_inode
*ip
,
836 struct gfs2_ea_location
*el
)
838 struct gfs2_ea_header
*ea
= el
->el_ea
;
839 struct gfs2_ea_header
*prev
= el
->el_prev
;
842 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
);
844 if (!prev
|| !GFS2_EA_IS_STUFFED(ea
)) {
845 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
847 } else if (GFS2_EA2NEXT(prev
) != ea
) {
848 prev
= GFS2_EA2NEXT(prev
);
849 gfs2_assert_withdraw(ip
->i_sbd
, GFS2_EA2NEXT(prev
) == ea
);
852 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
853 prev
->ea_rec_len
= cpu_to_be32(len
);
855 if (GFS2_EA_IS_LAST(ea
))
856 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
862 struct gfs2_ea_request
*es_er
;
863 struct gfs2_ea_location
*es_el
;
865 struct buffer_head
*es_bh
;
866 struct gfs2_ea_header
*es_ea
;
869 static int ea_set_simple_noalloc(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
870 struct gfs2_ea_header
*ea
, struct ea_set
*es
)
872 struct gfs2_ea_request
*er
= es
->es_er
;
873 struct buffer_head
*dibh
;
876 error
= gfs2_trans_begin(ip
->i_sbd
, RES_DINODE
+ 2 * RES_EATTR
, 0);
880 gfs2_trans_add_bh(ip
->i_gl
, bh
);
883 ea
= ea_split_ea(ea
);
885 ea_write(ip
, ea
, er
);
888 ea_set_remove_stuffed(ip
, es
->es_el
);
890 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
894 if (er
->er_flags
& GFS2_ERF_MODE
) {
895 gfs2_assert_withdraw(ip
->i_sbd
,
896 (ip
->i_di
.di_mode
& S_IFMT
) == (er
->er_mode
& S_IFMT
));
897 ip
->i_di
.di_mode
= er
->er_mode
;
899 ip
->i_di
.di_ctime
= get_seconds();
900 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
901 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
904 gfs2_trans_end(ip
->i_sbd
);
909 static int ea_set_simple_alloc(struct gfs2_inode
*ip
,
910 struct gfs2_ea_request
*er
, void *private)
912 struct ea_set
*es
= private;
913 struct gfs2_ea_header
*ea
= es
->es_ea
;
916 gfs2_trans_add_bh(ip
->i_gl
, es
->es_bh
);
919 ea
= ea_split_ea(ea
);
921 error
= ea_write(ip
, ea
, er
);
926 ea_set_remove_stuffed(ip
, es
->es_el
);
931 static int ea_set_simple(struct gfs2_inode
*ip
, struct buffer_head
*bh
,
932 struct gfs2_ea_header
*ea
, struct gfs2_ea_header
*prev
,
935 struct ea_set
*es
= private;
940 stuffed
= ea_calc_size(ip
->i_sbd
, es
->es_er
, &size
);
942 if (ea
->ea_type
== GFS2_EATYPE_UNUSED
) {
943 if (GFS2_EA_REC_LEN(ea
) < size
)
945 if (!GFS2_EA_IS_STUFFED(ea
)) {
946 error
= ea_remove_unstuffed(ip
, bh
, ea
, prev
, 1);
951 } else if (GFS2_EA_REC_LEN(ea
) - GFS2_EA_SIZE(ea
) >= size
)
957 error
= ea_set_simple_noalloc(ip
, bh
, ea
, es
);
965 blks
= 2 + DIV_RU(es
->es_er
->er_data_len
, ip
->i_sbd
->sd_jbsize
);
967 error
= ea_alloc_skeleton(ip
, es
->es_er
, blks
,
968 ea_set_simple_alloc
, es
);
976 static int ea_set_block(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
979 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
980 struct buffer_head
*indbh
, *newbh
;
983 int mh_size
= sizeof(struct gfs2_meta_header
);
985 if (ip
->i_di
.di_flags
& GFS2_DIF_EA_INDIRECT
) {
988 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_di
.di_eattr
,
989 DIO_START
| DIO_WAIT
, &indbh
);
993 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
998 eablk
= (uint64_t *)(indbh
->b_data
+ mh_size
);
999 end
= eablk
+ sdp
->sd_inptrs
;
1001 for (; eablk
< end
; eablk
++)
1010 gfs2_trans_add_bh(ip
->i_gl
, indbh
);
1014 blk
= gfs2_alloc_meta(ip
);
1016 indbh
= gfs2_meta_new(ip
->i_gl
, blk
);
1017 gfs2_trans_add_bh(ip
->i_gl
, indbh
);
1018 gfs2_metatype_set(indbh
, GFS2_METATYPE_IN
, GFS2_FORMAT_IN
);
1019 gfs2_buffer_clear_tail(indbh
, mh_size
);
1021 eablk
= (uint64_t *)(indbh
->b_data
+ mh_size
);
1022 *eablk
= cpu_to_be64(ip
->i_di
.di_eattr
);
1023 ip
->i_di
.di_eattr
= blk
;
1024 ip
->i_di
.di_flags
|= GFS2_DIF_EA_INDIRECT
;
1025 ip
->i_di
.di_blocks
++;
1030 error
= ea_alloc_blk(ip
, &newbh
);
1034 *eablk
= cpu_to_be64((uint64_t)newbh
->b_blocknr
);
1035 error
= ea_write(ip
, GFS2_EA_BH2FIRST(newbh
), er
);
1041 ea_set_remove_stuffed(ip
, (struct gfs2_ea_location
*)private);
1049 static int ea_set_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
,
1050 struct gfs2_ea_location
*el
)
1053 unsigned int blks
= 2;
1056 memset(&es
, 0, sizeof(struct ea_set
));
1060 error
= ea_foreach(ip
, ea_set_simple
, &es
);
1066 if (!(ip
->i_di
.di_flags
& GFS2_DIF_EA_INDIRECT
))
1068 if (GFS2_EAREQ_SIZE_STUFFED(er
) > ip
->i_sbd
->sd_jbsize
)
1069 blks
+= DIV_RU(er
->er_data_len
, ip
->i_sbd
->sd_jbsize
);
1071 return ea_alloc_skeleton(ip
, er
, blks
, ea_set_block
, el
);
1074 static int ea_set_remove_unstuffed(struct gfs2_inode
*ip
,
1075 struct gfs2_ea_location
*el
)
1077 if (el
->el_prev
&& GFS2_EA2NEXT(el
->el_prev
) != el
->el_ea
) {
1078 el
->el_prev
= GFS2_EA2NEXT(el
->el_prev
);
1079 gfs2_assert_withdraw(ip
->i_sbd
,
1080 GFS2_EA2NEXT(el
->el_prev
) == el
->el_ea
);
1083 return ea_remove_unstuffed(ip
, el
->el_bh
, el
->el_ea
, el
->el_prev
,0);
1086 int gfs2_ea_set_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1088 struct gfs2_ea_location el
;
1091 if (!ip
->i_di
.di_eattr
) {
1092 if (er
->er_flags
& XATTR_REPLACE
)
1094 return ea_init(ip
, er
);
1097 error
= gfs2_ea_find(ip
, er
, &el
);
1102 if (ip
->i_di
.di_flags
& GFS2_DIF_APPENDONLY
) {
1108 if (!(er
->er_flags
& XATTR_CREATE
)) {
1109 int unstuffed
= !GFS2_EA_IS_STUFFED(el
.el_ea
);
1110 error
= ea_set_i(ip
, er
, &el
);
1111 if (!error
&& unstuffed
)
1112 ea_set_remove_unstuffed(ip
, &el
);
1118 if (!(er
->er_flags
& XATTR_REPLACE
))
1119 error
= ea_set_i(ip
, er
, NULL
);
1125 int gfs2_ea_set(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1127 struct gfs2_holder i_gh
;
1130 if (!er
->er_name_len
||
1131 er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
1133 if (!er
->er_data
|| !er
->er_data_len
) {
1135 er
->er_data_len
= 0;
1137 error
= ea_check_size(ip
->i_sbd
, er
);
1141 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1145 if (IS_IMMUTABLE(ip
->i_vnode
))
1148 error
= gfs2_ea_ops
[er
->er_type
]->eo_set(ip
, er
);
1150 gfs2_glock_dq_uninit(&i_gh
);
1155 static int ea_remove_stuffed(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
)
1157 struct gfs2_ea_header
*ea
= el
->el_ea
;
1158 struct gfs2_ea_header
*prev
= el
->el_prev
;
1159 struct buffer_head
*dibh
;
1162 error
= gfs2_trans_begin(ip
->i_sbd
, RES_DINODE
+ RES_EATTR
, 0);
1166 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
);
1171 len
= GFS2_EA_REC_LEN(prev
) + GFS2_EA_REC_LEN(ea
);
1172 prev
->ea_rec_len
= cpu_to_be32(len
);
1174 if (GFS2_EA_IS_LAST(ea
))
1175 prev
->ea_flags
|= GFS2_EAFLAG_LAST
;
1177 ea
->ea_type
= GFS2_EATYPE_UNUSED
;
1179 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1181 ip
->i_di
.di_ctime
= get_seconds();
1182 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
1183 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
1187 gfs2_trans_end(ip
->i_sbd
);
1192 int gfs2_ea_remove_i(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1194 struct gfs2_ea_location el
;
1197 if (!ip
->i_di
.di_eattr
)
1200 error
= gfs2_ea_find(ip
, er
, &el
);
1206 if (GFS2_EA_IS_STUFFED(el
.el_ea
))
1207 error
= ea_remove_stuffed(ip
, &el
);
1209 error
= ea_remove_unstuffed(ip
, el
.el_bh
, el
.el_ea
, el
.el_prev
,
1218 * gfs2_ea_remove - sets (or creates or replaces) an extended attribute
1219 * @ip: pointer to the inode of the target file
1220 * @er: request information
1225 int gfs2_ea_remove(struct gfs2_inode
*ip
, struct gfs2_ea_request
*er
)
1227 struct gfs2_holder i_gh
;
1230 if (!er
->er_name_len
|| er
->er_name_len
> GFS2_EA_MAX_NAME_LEN
)
1233 error
= gfs2_glock_nq_init(ip
->i_gl
, LM_ST_EXCLUSIVE
, 0, &i_gh
);
1237 if (IS_IMMUTABLE(ip
->i_vnode
) || IS_APPEND(ip
->i_vnode
))
1240 error
= gfs2_ea_ops
[er
->er_type
]->eo_remove(ip
, er
);
1242 gfs2_glock_dq_uninit(&i_gh
);
1247 static int ea_acl_chmod_unstuffed(struct gfs2_inode
*ip
,
1248 struct gfs2_ea_header
*ea
, char *data
)
1250 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
1251 struct buffer_head
**bh
;
1252 unsigned int amount
= GFS2_EA_DATA_LEN(ea
);
1253 unsigned int nptrs
= DIV_RU(amount
, sdp
->sd_jbsize
);
1254 uint64_t *dataptrs
= GFS2_EA2DATAPTRS(ea
);
1258 bh
= kcalloc(nptrs
, sizeof(struct buffer_head
*), GFP_KERNEL
);
1262 error
= gfs2_trans_begin(sdp
, nptrs
+ RES_DINODE
, 0);
1266 for (x
= 0; x
< nptrs
; x
++) {
1267 error
= gfs2_meta_read(ip
->i_gl
, be64_to_cpu(*dataptrs
),
1277 for (x
= 0; x
< nptrs
; x
++) {
1278 error
= gfs2_meta_reread(sdp
, bh
[x
], DIO_WAIT
);
1280 for (; x
< nptrs
; x
++)
1284 if (gfs2_metatype_check(sdp
, bh
[x
], GFS2_METATYPE_ED
)) {
1285 for (; x
< nptrs
; x
++)
1291 gfs2_trans_add_bh(ip
->i_gl
, bh
[x
]);
1293 memcpy(bh
[x
]->b_data
+ sizeof(struct gfs2_meta_header
),
1295 (sdp
->sd_jbsize
> amount
) ? amount
: sdp
->sd_jbsize
);
1297 amount
-= sdp
->sd_jbsize
;
1298 data
+= sdp
->sd_jbsize
;
1309 gfs2_trans_end(sdp
);
1315 int gfs2_ea_acl_chmod(struct gfs2_inode
*ip
, struct gfs2_ea_location
*el
,
1316 struct iattr
*attr
, char *data
)
1318 struct buffer_head
*dibh
;
1321 if (GFS2_EA_IS_STUFFED(el
->el_ea
)) {
1322 error
= gfs2_trans_begin(ip
->i_sbd
, RES_DINODE
+ RES_EATTR
, 0);
1326 gfs2_trans_add_bh(ip
->i_gl
, el
->el_bh
);
1327 memcpy(GFS2_EA2DATA(el
->el_ea
),
1329 GFS2_EA_DATA_LEN(el
->el_ea
));
1331 error
= ea_acl_chmod_unstuffed(ip
, el
->el_ea
, data
);
1336 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1338 error
= inode_setattr(ip
->i_vnode
, attr
);
1339 gfs2_assert_warn(ip
->i_sbd
, !error
);
1340 gfs2_inode_attr_out(ip
);
1341 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
1342 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
1346 gfs2_trans_end(ip
->i_sbd
);
1351 static int ea_dealloc_indirect(struct gfs2_inode
*ip
)
1353 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
1354 struct gfs2_rgrp_list rlist
;
1355 struct buffer_head
*indbh
, *dibh
;
1356 uint64_t *eablk
, *end
;
1357 unsigned int rg_blocks
= 0;
1358 uint64_t bstart
= 0;
1359 unsigned int blen
= 0;
1360 unsigned int blks
= 0;
1364 memset(&rlist
, 0, sizeof(struct gfs2_rgrp_list
));
1366 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_di
.di_eattr
,
1367 DIO_START
| DIO_WAIT
, &indbh
);
1371 if (gfs2_metatype_check(sdp
, indbh
, GFS2_METATYPE_IN
)) {
1376 eablk
= (uint64_t *)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1377 end
= eablk
+ sdp
->sd_inptrs
;
1379 for (; eablk
< end
; eablk
++) {
1384 bn
= be64_to_cpu(*eablk
);
1386 if (bstart
+ blen
== bn
)
1390 gfs2_rlist_add(sdp
, &rlist
, bstart
);
1397 gfs2_rlist_add(sdp
, &rlist
, bstart
);
1401 gfs2_rlist_alloc(&rlist
, LM_ST_EXCLUSIVE
, 0);
1403 for (x
= 0; x
< rlist
.rl_rgrps
; x
++) {
1404 struct gfs2_rgrpd
*rgd
;
1405 rgd
= get_gl2rgd(rlist
.rl_ghs
[x
].gh_gl
);
1406 rg_blocks
+= rgd
->rd_ri
.ri_length
;
1409 error
= gfs2_glock_nq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1411 goto out_rlist_free
;
1413 error
= gfs2_trans_begin(sdp
, rg_blocks
+ RES_DINODE
+
1414 RES_INDIRECT
+ RES_STATFS
+
1419 gfs2_trans_add_bh(ip
->i_gl
, indbh
);
1421 eablk
= (uint64_t *)(indbh
->b_data
+ sizeof(struct gfs2_meta_header
));
1425 for (; eablk
< end
; eablk
++) {
1430 bn
= be64_to_cpu(*eablk
);
1432 if (bstart
+ blen
== bn
)
1436 gfs2_free_meta(ip
, bstart
, blen
);
1442 if (!ip
->i_di
.di_blocks
)
1443 gfs2_consist_inode(ip
);
1444 ip
->i_di
.di_blocks
--;
1447 gfs2_free_meta(ip
, bstart
, blen
);
1449 ip
->i_di
.di_flags
&= ~GFS2_DIF_EA_INDIRECT
;
1451 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1453 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
1454 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
1458 gfs2_trans_end(sdp
);
1461 gfs2_glock_dq_m(rlist
.rl_rgrps
, rlist
.rl_ghs
);
1464 gfs2_rlist_free(&rlist
);
1472 static int ea_dealloc_block(struct gfs2_inode
*ip
)
1474 struct gfs2_sbd
*sdp
= ip
->i_sbd
;
1475 struct gfs2_alloc
*al
= &ip
->i_alloc
;
1476 struct gfs2_rgrpd
*rgd
;
1477 struct buffer_head
*dibh
;
1480 rgd
= gfs2_blk2rgrpd(sdp
, ip
->i_di
.di_eattr
);
1482 gfs2_consist_inode(ip
);
1486 error
= gfs2_glock_nq_init(rgd
->rd_gl
, LM_ST_EXCLUSIVE
, 0,
1491 error
= gfs2_trans_begin(sdp
, RES_RG_BIT
+ RES_DINODE
+
1492 RES_STATFS
+ RES_QUOTA
, 1);
1496 gfs2_free_meta(ip
, ip
->i_di
.di_eattr
, 1);
1498 ip
->i_di
.di_eattr
= 0;
1499 if (!ip
->i_di
.di_blocks
)
1500 gfs2_consist_inode(ip
);
1501 ip
->i_di
.di_blocks
--;
1503 error
= gfs2_meta_inode_buffer(ip
, &dibh
);
1505 gfs2_trans_add_bh(ip
->i_gl
, dibh
);
1506 gfs2_dinode_out(&ip
->i_di
, dibh
->b_data
);
1510 gfs2_trans_end(sdp
);
1513 gfs2_glock_dq_uninit(&al
->al_rgd_gh
);
1519 * gfs2_ea_dealloc - deallocate the extended attribute fork
1525 int gfs2_ea_dealloc(struct gfs2_inode
*ip
)
1527 struct gfs2_alloc
*al
;
1530 al
= gfs2_alloc_get(ip
);
1532 error
= gfs2_quota_hold(ip
, NO_QUOTA_CHANGE
, NO_QUOTA_CHANGE
);
1536 error
= gfs2_rindex_hold(ip
->i_sbd
, &al
->al_ri_gh
);
1540 error
= ea_foreach(ip
, ea_dealloc_unstuffed
, NULL
);
1544 if (ip
->i_di
.di_flags
& GFS2_DIF_EA_INDIRECT
) {
1545 error
= ea_dealloc_indirect(ip
);
1550 error
= ea_dealloc_block(ip
);
1553 gfs2_glock_dq_uninit(&al
->al_ri_gh
);
1556 gfs2_quota_unhold(ip
);
1565 * gfs2_get_eattr_meta - return all the eattr blocks of a file
1566 * @dip: the directory
1567 * @ub: the structure representing the user buffer to copy to
1572 int gfs2_get_eattr_meta(struct gfs2_inode
*ip
, struct gfs2_user_buffer
*ub
)
1574 struct buffer_head
*bh
;
1577 error
= gfs2_meta_read(ip
->i_gl
, ip
->i_di
.di_eattr
,
1578 DIO_START
| DIO_WAIT
, &bh
);
1582 gfs2_add_bh_to_ub(ub
, bh
);
1584 if (ip
->i_di
.di_flags
& GFS2_DIF_EA_INDIRECT
) {
1585 struct buffer_head
*eabh
;
1586 uint64_t *eablk
, *end
;
1588 if (gfs2_metatype_check(ip
->i_sbd
, bh
, GFS2_METATYPE_IN
)) {
1593 eablk
= (uint64_t *)(bh
->b_data
+
1594 sizeof(struct gfs2_meta_header
));
1595 end
= eablk
+ ip
->i_sbd
->sd_inptrs
;
1597 for (; eablk
< end
; eablk
++) {
1602 bn
= be64_to_cpu(*eablk
);
1604 error
= gfs2_meta_read(ip
->i_gl
, bn
,
1605 DIO_START
| DIO_WAIT
, &eabh
);
1608 gfs2_add_bh_to_ub(ub
, eabh
);