4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2013, 2017 by Delphix. All rights reserved.
25 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
28 #include <sys/zfs_context.h>
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <sys/sysmacros.h>
33 #include <sys/dmu_impl.h>
34 #include <sys/dmu_objset.h>
35 #include <sys/dmu_tx.h>
37 #include <sys/dnode.h>
40 #include <sys/sunddi.h>
41 #include <sys/sa_impl.h>
42 #include <sys/dnode.h>
43 #include <sys/errno.h>
44 #include <sys/zfs_context.h>
47 #include <sys/zfs_znode.h>
51 * ZFS System attributes:
53 * A generic mechanism to allow for arbitrary attributes
54 * to be stored in a dnode. The data will be stored in the bonus buffer of
55 * the dnode and if necessary a special "spill" block will be used to handle
56 * overflow situations. The spill block will be sized to fit the data
57 * from 512 - 128K. When a spill block is used the BP (blkptr_t) for the
58 * spill block is stored at the end of the current bonus buffer. Any
59 * attributes that would be in the way of the blkptr_t will be relocated
60 * into the spill block.
62 * Attribute registration:
64 * Stored persistently on a per dataset basis
65 * a mapping between attribute "string" names and their actual attribute
66 * numeric values, length, and byteswap function. The names are only used
67 * during registration. All attributes are known by their unique attribute
68 * id value. If an attribute can have a variable size then the value
69 * 0 will be used to indicate this.
73 * Attribute layouts are a way to compactly store multiple attributes, but
74 * without taking the overhead associated with managing each attribute
75 * individually. Since you will typically have the same set of attributes
76 * stored in the same order a single table will be used to represent that
77 * layout. The ZPL for example will usually have only about 10 different
78 * layouts (regular files, device files, symlinks,
79 * regular files + scanstamp, files/dir with extended attributes, and then
80 * you have the possibility of all of those minus ACL, because it would
81 * be kicked out into the spill block)
83 * Layouts are simply an array of the attributes and their
84 * ordering i.e. [0, 1, 4, 5, 2]
86 * Each distinct layout is given a unique layout number and that is whats
87 * stored in the header at the beginning of the SA data buffer.
89 * A layout only covers a single dbuf (bonus or spill). If a set of
90 * attributes is split up between the bonus buffer and a spill buffer then
91 * two different layouts will be used. This allows us to byteswap the
92 * spill without looking at the bonus buffer and keeps the on disk format of
93 * the bonus and spill buffer the same.
95 * Adding a single attribute will cause the entire set of attributes to
96 * be rewritten and could result in a new layout number being constructed
97 * as part of the rewrite if no such layout exists for the new set of
98 * attribues. The new attribute will be appended to the end of the already
99 * existing attributes.
101 * Both the attribute registration and attribute layout information are
102 * stored in normal ZAP attributes. Their should be a small number of
103 * known layouts and the set of attributes is assumed to typically be quite
106 * The registered attributes and layout "table" information is maintained
107 * in core and a special "sa_os_t" is attached to the objset_t.
109 * A special interface is provided to allow for quickly applying
110 * a large set of attributes at once. sa_replace_all_by_template() is
111 * used to set an array of attributes. This is used by the ZPL when
112 * creating a brand new file. The template that is passed into the function
113 * specifies the attribute, size for variable length attributes, location of
114 * data and special "data locator" function if the data isn't in a contiguous
117 * Byteswap implications:
119 * Since the SA attributes are not entirely self describing we can't do
120 * the normal byteswap processing. The special ZAP layout attribute and
121 * attribute registration attributes define the byteswap function and the
122 * size of the attributes, unless it is variable sized.
123 * The normal ZFS byteswapping infrastructure assumes you don't need
124 * to read any objects in order to do the necessary byteswapping. Whereas
125 * SA attributes can only be properly byteswapped if the dataset is opened
126 * and the layout/attribute ZAP attributes are available. Because of this
127 * the SA attributes will be byteswapped when they are first accessed by
128 * the SA code that will read the SA data.
131 typedef void (sa_iterfunc_t
)(void *hdr
, void *addr
, sa_attr_type_t
,
132 uint16_t length
, int length_idx
, boolean_t
, void *userp
);
134 static int sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
);
135 static void sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
);
136 static sa_idx_tab_t
*sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
,
138 static void sa_idx_tab_rele(objset_t
*os
, void *arg
);
139 static void sa_copy_data(sa_data_locator_t
*func
, void *start
, void *target
,
141 static int sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
142 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
143 uint16_t buflen
, dmu_tx_t
*tx
);
145 arc_byteswap_func_t sa_bswap_table
[] = {
146 byteswap_uint64_array
,
147 byteswap_uint32_array
,
148 byteswap_uint16_array
,
149 byteswap_uint8_array
,
153 #define SA_COPY_DATA(f, s, t, l) \
157 *(uint64_t *)t = *(uint64_t *)s; \
158 } else if (l == 16) { \
159 *(uint64_t *)t = *(uint64_t *)s; \
160 *(uint64_t *)((uintptr_t)t + 8) = \
161 *(uint64_t *)((uintptr_t)s + 8); \
166 sa_copy_data(f, s, t, l); \
170 * This table is fixed and cannot be changed. Its purpose is to
171 * allow the SA code to work with both old/new ZPL file systems.
172 * It contains the list of legacy attributes. These attributes aren't
173 * stored in the "attribute" registry zap objects, since older ZPL file systems
174 * won't have the registry. Only objsets of type ZFS_TYPE_FILESYSTEM will
175 * use this static table.
177 sa_attr_reg_t sa_legacy_attrs
[] = {
178 {"ZPL_ATIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 0},
179 {"ZPL_MTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 1},
180 {"ZPL_CTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 2},
181 {"ZPL_CRTIME", sizeof (uint64_t) * 2, SA_UINT64_ARRAY
, 3},
182 {"ZPL_GEN", sizeof (uint64_t), SA_UINT64_ARRAY
, 4},
183 {"ZPL_MODE", sizeof (uint64_t), SA_UINT64_ARRAY
, 5},
184 {"ZPL_SIZE", sizeof (uint64_t), SA_UINT64_ARRAY
, 6},
185 {"ZPL_PARENT", sizeof (uint64_t), SA_UINT64_ARRAY
, 7},
186 {"ZPL_LINKS", sizeof (uint64_t), SA_UINT64_ARRAY
, 8},
187 {"ZPL_XATTR", sizeof (uint64_t), SA_UINT64_ARRAY
, 9},
188 {"ZPL_RDEV", sizeof (uint64_t), SA_UINT64_ARRAY
, 10},
189 {"ZPL_FLAGS", sizeof (uint64_t), SA_UINT64_ARRAY
, 11},
190 {"ZPL_UID", sizeof (uint64_t), SA_UINT64_ARRAY
, 12},
191 {"ZPL_GID", sizeof (uint64_t), SA_UINT64_ARRAY
, 13},
192 {"ZPL_PAD", sizeof (uint64_t) * 4, SA_UINT64_ARRAY
, 14},
193 {"ZPL_ZNODE_ACL", 88, SA_UINT8_ARRAY
, 15},
197 * This is only used for objects of type DMU_OT_ZNODE
199 sa_attr_type_t sa_legacy_zpl_layout
[] = {
200 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
204 * Special dummy layout used for buffers with no attributes.
206 sa_attr_type_t sa_dummy_zpl_layout
[] = { 0 };
208 static int sa_legacy_attr_count
= ARRAY_SIZE(sa_legacy_attrs
);
209 static kmem_cache_t
*sa_cache
= NULL
;
213 sa_cache_constructor(void *buf
, void *unused
, int kmflag
)
215 sa_handle_t
*hdl
= buf
;
217 mutex_init(&hdl
->sa_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
223 sa_cache_destructor(void *buf
, void *unused
)
225 sa_handle_t
*hdl
= buf
;
226 mutex_destroy(&hdl
->sa_lock
);
232 sa_cache
= kmem_cache_create("sa_cache",
233 sizeof (sa_handle_t
), 0, sa_cache_constructor
,
234 sa_cache_destructor
, NULL
, NULL
, NULL
, 0);
241 kmem_cache_destroy(sa_cache
);
245 layout_num_compare(const void *arg1
, const void *arg2
)
247 const sa_lot_t
*node1
= (const sa_lot_t
*)arg1
;
248 const sa_lot_t
*node2
= (const sa_lot_t
*)arg2
;
250 return (AVL_CMP(node1
->lot_num
, node2
->lot_num
));
254 layout_hash_compare(const void *arg1
, const void *arg2
)
256 const sa_lot_t
*node1
= (const sa_lot_t
*)arg1
;
257 const sa_lot_t
*node2
= (const sa_lot_t
*)arg2
;
259 int cmp
= AVL_CMP(node1
->lot_hash
, node2
->lot_hash
);
263 return (AVL_CMP(node1
->lot_instance
, node2
->lot_instance
));
267 sa_layout_equal(sa_lot_t
*tbf
, sa_attr_type_t
*attrs
, int count
)
271 if (count
!= tbf
->lot_attr_count
)
274 for (i
= 0; i
!= count
; i
++) {
275 if (attrs
[i
] != tbf
->lot_attrs
[i
])
281 #define SA_ATTR_HASH(attr) (zfs_crc64_table[(-1ULL ^ attr) & 0xFF])
284 sa_layout_info_hash(sa_attr_type_t
*attrs
, int attr_count
)
287 uint64_t crc
= -1ULL;
289 for (i
= 0; i
!= attr_count
; i
++)
290 crc
^= SA_ATTR_HASH(attrs
[i
]);
296 sa_get_spill(sa_handle_t
*hdl
)
299 if (hdl
->sa_spill
== NULL
) {
300 if ((rc
= dmu_spill_hold_existing(hdl
->sa_bonus
, NULL
,
301 &hdl
->sa_spill
)) == 0)
302 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
311 * Main attribute lookup/update function
312 * returns 0 for success or non zero for failures
314 * Operates on bulk array, first failure will abort further processing
317 sa_attr_op(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
318 sa_data_op_t data_op
, dmu_tx_t
*tx
)
320 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
323 sa_buf_type_t buftypes
;
328 for (i
= 0; i
!= count
; i
++) {
329 ASSERT(bulk
[i
].sa_attr
<= hdl
->sa_os
->os_sa
->sa_num_attrs
);
331 bulk
[i
].sa_addr
= NULL
;
332 /* First check the bonus buffer */
334 if (hdl
->sa_bonus_tab
&& TOC_ATTR_PRESENT(
335 hdl
->sa_bonus_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
336 SA_ATTR_INFO(sa
, hdl
->sa_bonus_tab
,
337 SA_GET_HDR(hdl
, SA_BONUS
),
338 bulk
[i
].sa_attr
, bulk
[i
], SA_BONUS
, hdl
);
339 if (tx
&& !(buftypes
& SA_BONUS
)) {
340 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
341 buftypes
|= SA_BONUS
;
344 if (bulk
[i
].sa_addr
== NULL
&&
345 ((error
= sa_get_spill(hdl
)) == 0)) {
346 if (TOC_ATTR_PRESENT(
347 hdl
->sa_spill_tab
->sa_idx_tab
[bulk
[i
].sa_attr
])) {
348 SA_ATTR_INFO(sa
, hdl
->sa_spill_tab
,
349 SA_GET_HDR(hdl
, SA_SPILL
),
350 bulk
[i
].sa_attr
, bulk
[i
], SA_SPILL
, hdl
);
351 if (tx
&& !(buftypes
& SA_SPILL
) &&
352 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
353 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
354 buftypes
|= SA_SPILL
;
358 if (error
&& error
!= ENOENT
) {
359 return ((error
== ECKSUM
) ? EIO
: error
);
364 if (bulk
[i
].sa_addr
== NULL
)
365 return (SET_ERROR(ENOENT
));
366 if (bulk
[i
].sa_data
) {
367 SA_COPY_DATA(bulk
[i
].sa_data_func
,
368 bulk
[i
].sa_addr
, bulk
[i
].sa_data
,
374 /* existing rewrite of attr */
375 if (bulk
[i
].sa_addr
&&
376 bulk
[i
].sa_size
== bulk
[i
].sa_length
) {
377 SA_COPY_DATA(bulk
[i
].sa_data_func
,
378 bulk
[i
].sa_data
, bulk
[i
].sa_addr
,
381 } else if (bulk
[i
].sa_addr
) { /* attr size change */
382 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
383 SA_REPLACE
, bulk
[i
].sa_data_func
,
384 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
385 } else { /* adding new attribute */
386 error
= sa_modify_attrs(hdl
, bulk
[i
].sa_attr
,
387 SA_ADD
, bulk
[i
].sa_data_func
,
388 bulk
[i
].sa_data
, bulk
[i
].sa_length
, tx
);
401 sa_add_layout_entry(objset_t
*os
, sa_attr_type_t
*attrs
, int attr_count
,
402 uint64_t lot_num
, uint64_t hash
, boolean_t zapadd
, dmu_tx_t
*tx
)
404 sa_os_t
*sa
= os
->os_sa
;
405 sa_lot_t
*tb
, *findtb
;
409 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
410 tb
= kmem_zalloc(sizeof (sa_lot_t
), KM_SLEEP
);
411 tb
->lot_attr_count
= attr_count
;
412 tb
->lot_attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
414 bcopy(attrs
, tb
->lot_attrs
, sizeof (sa_attr_type_t
) * attr_count
);
415 tb
->lot_num
= lot_num
;
417 tb
->lot_instance
= 0;
422 if (sa
->sa_layout_attr_obj
== 0) {
423 sa
->sa_layout_attr_obj
= zap_create_link(os
,
424 DMU_OT_SA_ATTR_LAYOUTS
,
425 sa
->sa_master_obj
, SA_LAYOUTS
, tx
);
428 (void) snprintf(attr_name
, sizeof (attr_name
),
430 VERIFY(0 == zap_update(os
, os
->os_sa
->sa_layout_attr_obj
,
431 attr_name
, 2, attr_count
, attrs
, tx
));
434 list_create(&tb
->lot_idx_tab
, sizeof (sa_idx_tab_t
),
435 offsetof(sa_idx_tab_t
, sa_next
));
437 for (i
= 0; i
!= attr_count
; i
++) {
438 if (sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
== 0)
442 avl_add(&sa
->sa_layout_num_tree
, tb
);
444 /* verify we don't have a hash collision */
445 if ((findtb
= avl_find(&sa
->sa_layout_hash_tree
, tb
, &loc
)) != NULL
) {
446 for (; findtb
&& findtb
->lot_hash
== hash
;
447 findtb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, findtb
)) {
448 if (findtb
->lot_instance
!= tb
->lot_instance
)
453 avl_add(&sa
->sa_layout_hash_tree
, tb
);
458 sa_find_layout(objset_t
*os
, uint64_t hash
, sa_attr_type_t
*attrs
,
459 int count
, dmu_tx_t
*tx
, sa_lot_t
**lot
)
461 sa_lot_t
*tb
, tbsearch
;
463 sa_os_t
*sa
= os
->os_sa
;
464 boolean_t found
= B_FALSE
;
466 mutex_enter(&sa
->sa_lock
);
467 tbsearch
.lot_hash
= hash
;
468 tbsearch
.lot_instance
= 0;
469 tb
= avl_find(&sa
->sa_layout_hash_tree
, &tbsearch
, &loc
);
471 for (; tb
&& tb
->lot_hash
== hash
;
472 tb
= AVL_NEXT(&sa
->sa_layout_hash_tree
, tb
)) {
473 if (sa_layout_equal(tb
, attrs
, count
) == 0) {
480 tb
= sa_add_layout_entry(os
, attrs
, count
,
481 avl_numnodes(&sa
->sa_layout_num_tree
), hash
, B_TRUE
, tx
);
483 mutex_exit(&sa
->sa_lock
);
488 sa_resize_spill(sa_handle_t
*hdl
, uint32_t size
, dmu_tx_t
*tx
)
494 blocksize
= SPA_MINBLOCKSIZE
;
495 } else if (size
> SPA_OLD_MAXBLOCKSIZE
) {
497 return (SET_ERROR(EFBIG
));
499 blocksize
= P2ROUNDUP_TYPED(size
, SPA_MINBLOCKSIZE
, uint32_t);
502 error
= dbuf_spill_set_blksz(hdl
->sa_spill
, blocksize
, tx
);
508 sa_copy_data(sa_data_locator_t
*func
, void *datastart
, void *target
, int buflen
)
511 bcopy(datastart
, target
, buflen
);
516 void *saptr
= target
;
521 while (bytes
< buflen
) {
522 func(&dataptr
, &length
, buflen
, start
, datastart
);
523 bcopy(dataptr
, saptr
, length
);
524 saptr
= (void *)((caddr_t
)saptr
+ length
);
532 * Determine several different values pertaining to system attribute
535 * Return the size of the sa_hdr_phys_t header for the buffer. Each
536 * variable length attribute except the first contributes two bytes to
537 * the header size, which is then rounded up to an 8-byte boundary.
539 * The following output parameters are also computed.
541 * index - The index of the first attribute in attr_desc that will
542 * spill over. Only valid if will_spill is set.
544 * total - The total number of bytes of all system attributes described
547 * will_spill - Set when spilling is necessary. It is only set when
548 * the buftype is SA_BONUS.
551 sa_find_sizes(sa_os_t
*sa
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
552 dmu_buf_t
*db
, sa_buf_type_t buftype
, int full_space
, int *index
,
553 int *total
, boolean_t
*will_spill
)
555 int var_size_count
= 0;
560 if (buftype
== SA_BONUS
&& sa
->sa_force_spill
) {
563 *will_spill
= B_TRUE
;
569 *will_spill
= B_FALSE
;
572 hdrsize
= (SA_BONUSTYPE_FROM_DB(db
) == DMU_OT_ZNODE
) ? 0 :
573 sizeof (sa_hdr_phys_t
);
575 ASSERT(IS_P2ALIGNED(full_space
, 8));
577 for (i
= 0; i
!= attr_count
; i
++) {
578 boolean_t is_var_sz
, might_spill_here
;
581 *total
= P2ROUNDUP(*total
, 8);
582 *total
+= attr_desc
[i
].sa_length
;
586 is_var_sz
= (SA_REGISTERED_LEN(sa
, attr_desc
[i
].sa_attr
) == 0);
591 * Calculate what the SA header size would be if this
592 * attribute doesn't spill.
594 tmp_hdrsize
= hdrsize
+ ((is_var_sz
&& var_size_count
> 1) ?
595 sizeof (uint16_t) : 0);
598 * Check whether this attribute spans into the space
599 * that would be used by the spill block pointer should
600 * a spill block be needed.
603 buftype
== SA_BONUS
&& *index
== -1 &&
604 (*total
+ P2ROUNDUP(tmp_hdrsize
, 8)) >
605 (full_space
- sizeof (blkptr_t
));
607 if (is_var_sz
&& var_size_count
> 1) {
608 if (buftype
== SA_SPILL
||
609 tmp_hdrsize
+ *total
< full_space
) {
611 * Record the extra header size in case this
612 * increase needs to be reversed due to
615 hdrsize
= tmp_hdrsize
;
616 if (*index
!= -1 || might_spill_here
)
617 extra_hdrsize
+= sizeof (uint16_t);
619 ASSERT(buftype
== SA_BONUS
);
622 *will_spill
= B_TRUE
;
628 * Store index of where spill *could* occur. Then
629 * continue to count the remaining attribute sizes. The
630 * sum is used later for sizing bonus and spill buffer.
632 if (might_spill_here
)
635 if ((*total
+ P2ROUNDUP(hdrsize
, 8)) > full_space
&&
637 *will_spill
= B_TRUE
;
641 hdrsize
-= extra_hdrsize
;
643 hdrsize
= P2ROUNDUP(hdrsize
, 8);
647 #define BUF_SPACE_NEEDED(total, header) (total + header)
650 * Find layout that corresponds to ordering of attributes
651 * If not found a new layout number is created and added to
652 * persistent layout tables.
655 sa_build_layouts(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
, int attr_count
,
658 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
660 sa_buf_type_t buftype
;
661 sa_hdr_phys_t
*sahdr
;
663 sa_attr_type_t
*attrs
, *attrs_start
;
668 int spillhdrsize
= 0;
670 dmu_object_type_t bonustype
;
677 dmu_buf_will_dirty(hdl
->sa_bonus
, tx
);
678 bonustype
= SA_BONUSTYPE_FROM_DB(hdl
->sa_bonus
);
679 dmu_object_dnsize_from_db(hdl
->sa_bonus
, &dnodesize
);
680 bonuslen
= DN_BONUS_SIZE(dnodesize
);
682 /* first determine bonus header size and sum of all attributes */
683 hdrsize
= sa_find_sizes(sa
, attr_desc
, attr_count
, hdl
->sa_bonus
,
684 SA_BONUS
, bonuslen
, &spill_idx
, &used
, &spilling
);
686 if (used
> SPA_OLD_MAXBLOCKSIZE
)
687 return (SET_ERROR(EFBIG
));
689 VERIFY0(dmu_set_bonus(hdl
->sa_bonus
, spilling
?
690 MIN(bonuslen
- sizeof (blkptr_t
), used
+ hdrsize
) :
691 used
+ hdrsize
, tx
));
693 ASSERT((bonustype
== DMU_OT_ZNODE
&& spilling
== 0) ||
694 bonustype
== DMU_OT_SA
);
696 /* setup and size spill buffer when needed */
700 if (hdl
->sa_spill
== NULL
) {
701 VERIFY(dmu_spill_hold_by_bonus(hdl
->sa_bonus
, 0, NULL
,
702 &hdl
->sa_spill
) == 0);
704 dmu_buf_will_dirty(hdl
->sa_spill
, tx
);
706 spillhdrsize
= sa_find_sizes(sa
, &attr_desc
[spill_idx
],
707 attr_count
- spill_idx
, hdl
->sa_spill
, SA_SPILL
,
708 hdl
->sa_spill
->db_size
, &i
, &spill_used
, &dummy
);
710 if (spill_used
> SPA_OLD_MAXBLOCKSIZE
)
711 return (SET_ERROR(EFBIG
));
713 if (BUF_SPACE_NEEDED(spill_used
, spillhdrsize
) >
714 hdl
->sa_spill
->db_size
)
715 VERIFY(0 == sa_resize_spill(hdl
,
716 BUF_SPACE_NEEDED(spill_used
, spillhdrsize
), tx
));
719 /* setup starting pointers to lay down data */
720 data_start
= (void *)((uintptr_t)hdl
->sa_bonus
->db_data
+ hdrsize
);
721 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_bonus
->db_data
;
724 attrs_start
= attrs
= kmem_alloc(sizeof (sa_attr_type_t
) * attr_count
,
728 for (i
= 0, len_idx
= 0, hash
= -1ULL; i
!= attr_count
; i
++) {
731 ASSERT(IS_P2ALIGNED(data_start
, 8));
732 attrs
[i
] = attr_desc
[i
].sa_attr
;
733 length
= SA_REGISTERED_LEN(sa
, attrs
[i
]);
735 length
= attr_desc
[i
].sa_length
;
737 if (spilling
&& i
== spill_idx
) { /* switch to spill buffer */
738 VERIFY(bonustype
== DMU_OT_SA
);
739 if (buftype
== SA_BONUS
&& !sa
->sa_force_spill
) {
740 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
,
741 lot_count
, tx
, &lot
);
742 SA_SET_HDR(sahdr
, lot
->lot_num
, hdrsize
);
749 sahdr
= (sa_hdr_phys_t
*)hdl
->sa_spill
->db_data
;
750 sahdr
->sa_magic
= SA_MAGIC
;
751 data_start
= (void *)((uintptr_t)sahdr
+
753 attrs_start
= &attrs
[i
];
756 hash
^= SA_ATTR_HASH(attrs
[i
]);
757 attr_desc
[i
].sa_addr
= data_start
;
758 attr_desc
[i
].sa_size
= length
;
759 SA_COPY_DATA(attr_desc
[i
].sa_data_func
, attr_desc
[i
].sa_data
,
761 if (sa
->sa_attr_table
[attrs
[i
]].sa_length
== 0) {
762 sahdr
->sa_lengths
[len_idx
++] = length
;
764 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
769 sa_find_layout(hdl
->sa_os
, hash
, attrs_start
, lot_count
, tx
, &lot
);
772 * Verify that old znodes always have layout number 0.
773 * Must be DMU_OT_SA for arbitrary layouts
775 VERIFY((bonustype
== DMU_OT_ZNODE
&& lot
->lot_num
== 0) ||
776 (bonustype
== DMU_OT_SA
&& lot
->lot_num
> 1));
778 if (bonustype
== DMU_OT_SA
) {
779 SA_SET_HDR(sahdr
, lot
->lot_num
,
780 buftype
== SA_BONUS
? hdrsize
: spillhdrsize
);
783 kmem_free(attrs
, sizeof (sa_attr_type_t
) * attr_count
);
784 if (hdl
->sa_bonus_tab
) {
785 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
786 hdl
->sa_bonus_tab
= NULL
;
788 if (!sa
->sa_force_spill
)
789 VERIFY(0 == sa_build_index(hdl
, SA_BONUS
));
791 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
794 * remove spill block that is no longer needed.
796 dmu_buf_rele(hdl
->sa_spill
, NULL
);
797 hdl
->sa_spill
= NULL
;
798 hdl
->sa_spill_tab
= NULL
;
799 VERIFY(0 == dmu_rm_spill(hdl
->sa_os
,
800 sa_handle_object(hdl
), tx
));
802 VERIFY(0 == sa_build_index(hdl
, SA_SPILL
));
810 sa_free_attr_table(sa_os_t
*sa
)
814 if (sa
->sa_attr_table
== NULL
)
817 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
818 if (sa
->sa_attr_table
[i
].sa_name
)
819 kmem_free(sa
->sa_attr_table
[i
].sa_name
,
820 strlen(sa
->sa_attr_table
[i
].sa_name
) + 1);
823 kmem_free(sa
->sa_attr_table
,
824 sizeof (sa_attr_table_t
) * sa
->sa_num_attrs
);
826 sa
->sa_attr_table
= NULL
;
830 sa_attr_table_setup(objset_t
*os
, sa_attr_reg_t
*reg_attrs
, int count
)
832 sa_os_t
*sa
= os
->os_sa
;
833 uint64_t sa_attr_count
= 0;
834 uint64_t sa_reg_count
= 0;
840 int registered_count
= 0;
842 dmu_objset_type_t ostype
= dmu_objset_type(os
);
845 kmem_zalloc(count
* sizeof (sa_attr_type_t
), KM_SLEEP
);
846 sa
->sa_user_table_sz
= count
* sizeof (sa_attr_type_t
);
848 if (sa
->sa_reg_attr_obj
!= 0) {
849 error
= zap_count(os
, sa
->sa_reg_attr_obj
,
853 * Make sure we retrieved a count and that it isn't zero
855 if (error
|| (error
== 0 && sa_attr_count
== 0)) {
857 error
= SET_ERROR(EINVAL
);
860 sa_reg_count
= sa_attr_count
;
863 if (ostype
== DMU_OST_ZFS
&& sa_attr_count
== 0)
864 sa_attr_count
+= sa_legacy_attr_count
;
866 /* Allocate attribute numbers for attributes that aren't registered */
867 for (i
= 0; i
!= count
; i
++) {
868 boolean_t found
= B_FALSE
;
871 if (ostype
== DMU_OST_ZFS
) {
872 for (j
= 0; j
!= sa_legacy_attr_count
; j
++) {
873 if (strcmp(reg_attrs
[i
].sa_name
,
874 sa_legacy_attrs
[j
].sa_name
) == 0) {
875 sa
->sa_user_table
[i
] =
876 sa_legacy_attrs
[j
].sa_attr
;
884 if (sa
->sa_reg_attr_obj
)
885 error
= zap_lookup(os
, sa
->sa_reg_attr_obj
,
886 reg_attrs
[i
].sa_name
, 8, 1, &attr_value
);
888 error
= SET_ERROR(ENOENT
);
891 sa
->sa_user_table
[i
] = (sa_attr_type_t
)sa_attr_count
;
895 sa
->sa_user_table
[i
] = ATTR_NUM(attr_value
);
902 sa
->sa_num_attrs
= sa_attr_count
;
903 tb
= sa
->sa_attr_table
=
904 kmem_zalloc(sizeof (sa_attr_table_t
) * sa_attr_count
, KM_SLEEP
);
907 * Attribute table is constructed from requested attribute list,
908 * previously foreign registered attributes, and also the legacy
909 * ZPL set of attributes.
912 if (sa
->sa_reg_attr_obj
) {
913 for (zap_cursor_init(&zc
, os
, sa
->sa_reg_attr_obj
);
914 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
915 zap_cursor_advance(&zc
)) {
917 value
= za
.za_first_integer
;
920 tb
[ATTR_NUM(value
)].sa_attr
= ATTR_NUM(value
);
921 tb
[ATTR_NUM(value
)].sa_length
= ATTR_LENGTH(value
);
922 tb
[ATTR_NUM(value
)].sa_byteswap
= ATTR_BSWAP(value
);
923 tb
[ATTR_NUM(value
)].sa_registered
= B_TRUE
;
925 if (tb
[ATTR_NUM(value
)].sa_name
) {
928 tb
[ATTR_NUM(value
)].sa_name
=
929 kmem_zalloc(strlen(za
.za_name
) +1, KM_SLEEP
);
930 (void) strlcpy(tb
[ATTR_NUM(value
)].sa_name
, za
.za_name
,
931 strlen(za
.za_name
) +1);
933 zap_cursor_fini(&zc
);
935 * Make sure we processed the correct number of registered
938 if (registered_count
!= sa_reg_count
) {
945 if (ostype
== DMU_OST_ZFS
) {
946 for (i
= 0; i
!= sa_legacy_attr_count
; i
++) {
949 tb
[i
].sa_attr
= sa_legacy_attrs
[i
].sa_attr
;
950 tb
[i
].sa_length
= sa_legacy_attrs
[i
].sa_length
;
951 tb
[i
].sa_byteswap
= sa_legacy_attrs
[i
].sa_byteswap
;
952 tb
[i
].sa_registered
= B_FALSE
;
954 kmem_zalloc(strlen(sa_legacy_attrs
[i
].sa_name
) +1,
956 (void) strlcpy(tb
[i
].sa_name
,
957 sa_legacy_attrs
[i
].sa_name
,
958 strlen(sa_legacy_attrs
[i
].sa_name
) + 1);
962 for (i
= 0; i
!= count
; i
++) {
963 sa_attr_type_t attr_id
;
965 attr_id
= sa
->sa_user_table
[i
];
966 if (tb
[attr_id
].sa_name
)
969 tb
[attr_id
].sa_length
= reg_attrs
[i
].sa_length
;
970 tb
[attr_id
].sa_byteswap
= reg_attrs
[i
].sa_byteswap
;
971 tb
[attr_id
].sa_attr
= attr_id
;
972 tb
[attr_id
].sa_name
=
973 kmem_zalloc(strlen(reg_attrs
[i
].sa_name
) + 1, KM_SLEEP
);
974 (void) strlcpy(tb
[attr_id
].sa_name
, reg_attrs
[i
].sa_name
,
975 strlen(reg_attrs
[i
].sa_name
) + 1);
978 sa
->sa_need_attr_registration
=
979 (sa_attr_count
!= registered_count
);
983 kmem_free(sa
->sa_user_table
, count
* sizeof (sa_attr_type_t
));
984 sa
->sa_user_table
= NULL
;
985 sa_free_attr_table(sa
);
991 sa_setup(objset_t
*os
, uint64_t sa_obj
, sa_attr_reg_t
*reg_attrs
, int count
,
992 sa_attr_type_t
**user_table
)
997 dmu_objset_type_t ostype
= dmu_objset_type(os
);
1001 mutex_enter(&os
->os_user_ptr_lock
);
1003 mutex_enter(&os
->os_sa
->sa_lock
);
1004 mutex_exit(&os
->os_user_ptr_lock
);
1005 tb
= os
->os_sa
->sa_user_table
;
1006 mutex_exit(&os
->os_sa
->sa_lock
);
1011 sa
= kmem_zalloc(sizeof (sa_os_t
), KM_SLEEP
);
1012 mutex_init(&sa
->sa_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1013 sa
->sa_master_obj
= sa_obj
;
1016 mutex_enter(&sa
->sa_lock
);
1017 mutex_exit(&os
->os_user_ptr_lock
);
1018 avl_create(&sa
->sa_layout_num_tree
, layout_num_compare
,
1019 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_num_node
));
1020 avl_create(&sa
->sa_layout_hash_tree
, layout_hash_compare
,
1021 sizeof (sa_lot_t
), offsetof(sa_lot_t
, lot_hash_node
));
1024 error
= zap_lookup(os
, sa_obj
, SA_LAYOUTS
,
1025 8, 1, &sa
->sa_layout_attr_obj
);
1026 if (error
!= 0 && error
!= ENOENT
)
1028 error
= zap_lookup(os
, sa_obj
, SA_REGISTRY
,
1029 8, 1, &sa
->sa_reg_attr_obj
);
1030 if (error
!= 0 && error
!= ENOENT
)
1034 if ((error
= sa_attr_table_setup(os
, reg_attrs
, count
)) != 0)
1037 if (sa
->sa_layout_attr_obj
!= 0) {
1038 uint64_t layout_count
;
1040 error
= zap_count(os
, sa
->sa_layout_attr_obj
,
1044 * Layout number count should be > 0
1046 if (error
|| (error
== 0 && layout_count
== 0)) {
1048 error
= SET_ERROR(EINVAL
);
1052 for (zap_cursor_init(&zc
, os
, sa
->sa_layout_attr_obj
);
1053 (error
= zap_cursor_retrieve(&zc
, &za
)) == 0;
1054 zap_cursor_advance(&zc
)) {
1055 sa_attr_type_t
*lot_attrs
;
1058 lot_attrs
= kmem_zalloc(sizeof (sa_attr_type_t
) *
1059 za
.za_num_integers
, KM_SLEEP
);
1061 if ((error
= (zap_lookup(os
, sa
->sa_layout_attr_obj
,
1062 za
.za_name
, 2, za
.za_num_integers
,
1063 lot_attrs
))) != 0) {
1064 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1065 za
.za_num_integers
);
1068 VERIFY(ddi_strtoull(za
.za_name
, NULL
, 10,
1069 (unsigned long long *)&lot_num
) == 0);
1071 (void) sa_add_layout_entry(os
, lot_attrs
,
1072 za
.za_num_integers
, lot_num
,
1073 sa_layout_info_hash(lot_attrs
,
1074 za
.za_num_integers
), B_FALSE
, NULL
);
1075 kmem_free(lot_attrs
, sizeof (sa_attr_type_t
) *
1076 za
.za_num_integers
);
1078 zap_cursor_fini(&zc
);
1081 * Make sure layout count matches number of entries added
1084 if (avl_numnodes(&sa
->sa_layout_num_tree
) != layout_count
) {
1090 /* Add special layout number for old ZNODES */
1091 if (ostype
== DMU_OST_ZFS
) {
1092 (void) sa_add_layout_entry(os
, sa_legacy_zpl_layout
,
1093 sa_legacy_attr_count
, 0,
1094 sa_layout_info_hash(sa_legacy_zpl_layout
,
1095 sa_legacy_attr_count
), B_FALSE
, NULL
);
1097 (void) sa_add_layout_entry(os
, sa_dummy_zpl_layout
, 0, 1,
1100 *user_table
= os
->os_sa
->sa_user_table
;
1101 mutex_exit(&sa
->sa_lock
);
1105 sa_free_attr_table(sa
);
1106 if (sa
->sa_user_table
)
1107 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1108 mutex_exit(&sa
->sa_lock
);
1109 avl_destroy(&sa
->sa_layout_hash_tree
);
1110 avl_destroy(&sa
->sa_layout_num_tree
);
1111 mutex_destroy(&sa
->sa_lock
);
1112 kmem_free(sa
, sizeof (sa_os_t
));
1113 return ((error
== ECKSUM
) ? EIO
: error
);
1117 sa_tear_down(objset_t
*os
)
1119 sa_os_t
*sa
= os
->os_sa
;
1123 kmem_free(sa
->sa_user_table
, sa
->sa_user_table_sz
);
1125 /* Free up attr table */
1127 sa_free_attr_table(sa
);
1131 avl_destroy_nodes(&sa
->sa_layout_hash_tree
, &cookie
))) {
1133 while ((tab
= list_head(&layout
->lot_idx_tab
))) {
1134 ASSERT(refcount_count(&tab
->sa_refcount
));
1135 sa_idx_tab_rele(os
, tab
);
1140 while ((layout
= avl_destroy_nodes(&sa
->sa_layout_num_tree
, &cookie
))) {
1141 kmem_free(layout
->lot_attrs
,
1142 sizeof (sa_attr_type_t
) * layout
->lot_attr_count
);
1143 kmem_free(layout
, sizeof (sa_lot_t
));
1146 avl_destroy(&sa
->sa_layout_hash_tree
);
1147 avl_destroy(&sa
->sa_layout_num_tree
);
1148 mutex_destroy(&sa
->sa_lock
);
1150 kmem_free(sa
, sizeof (sa_os_t
));
1155 sa_build_idx_tab(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1156 uint16_t length
, int length_idx
, boolean_t var_length
, void *userp
)
1158 sa_idx_tab_t
*idx_tab
= userp
;
1161 ASSERT(idx_tab
->sa_variable_lengths
);
1162 idx_tab
->sa_variable_lengths
[length_idx
] = length
;
1164 TOC_ATTR_ENCODE(idx_tab
->sa_idx_tab
[attr
], length_idx
,
1165 (uint32_t)((uintptr_t)attr_addr
- (uintptr_t)hdr
));
1169 sa_attr_iter(objset_t
*os
, sa_hdr_phys_t
*hdr
, dmu_object_type_t type
,
1170 sa_iterfunc_t func
, sa_lot_t
*tab
, void *userp
)
1176 sa_os_t
*sa
= os
->os_sa
;
1178 uint16_t *length_start
= NULL
;
1179 uint8_t length_idx
= 0;
1182 search
.lot_num
= SA_LAYOUT_NUM(hdr
, type
);
1183 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1187 if (IS_SA_BONUSTYPE(type
)) {
1188 data_start
= (void *)P2ROUNDUP(((uintptr_t)hdr
+
1189 offsetof(sa_hdr_phys_t
, sa_lengths
) +
1190 (sizeof (uint16_t) * tb
->lot_var_sizes
)), 8);
1191 length_start
= hdr
->sa_lengths
;
1196 for (i
= 0; i
!= tb
->lot_attr_count
; i
++) {
1197 int attr_length
, reg_length
;
1200 reg_length
= sa
->sa_attr_table
[tb
->lot_attrs
[i
]].sa_length
;
1202 attr_length
= reg_length
;
1205 attr_length
= length_start
[length_idx
];
1206 idx_len
= length_idx
++;
1209 func(hdr
, data_start
, tb
->lot_attrs
[i
], attr_length
,
1210 idx_len
, reg_length
== 0 ? B_TRUE
: B_FALSE
, userp
);
1212 data_start
= (void *)P2ROUNDUP(((uintptr_t)data_start
+
1219 sa_byteswap_cb(void *hdr
, void *attr_addr
, sa_attr_type_t attr
,
1220 uint16_t length
, int length_idx
, boolean_t variable_length
, void *userp
)
1222 sa_handle_t
*hdl
= userp
;
1223 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1225 sa_bswap_table
[sa
->sa_attr_table
[attr
].sa_byteswap
](attr_addr
, length
);
1229 sa_byteswap(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1231 sa_hdr_phys_t
*sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1233 int num_lengths
= 1;
1235 ASSERTV(sa_os_t
*sa
= hdl
->sa_os
->os_sa
);
1237 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1238 if (sa_hdr_phys
->sa_magic
== SA_MAGIC
)
1241 db
= SA_GET_DB(hdl
, buftype
);
1243 if (buftype
== SA_SPILL
) {
1244 arc_release(db
->db_buf
, NULL
);
1245 arc_buf_thaw(db
->db_buf
);
1248 sa_hdr_phys
->sa_magic
= BSWAP_32(sa_hdr_phys
->sa_magic
);
1249 sa_hdr_phys
->sa_layout_info
= BSWAP_16(sa_hdr_phys
->sa_layout_info
);
1252 * Determine number of variable lengths in header
1253 * The standard 8 byte header has one for free and a
1254 * 16 byte header would have 4 + 1;
1256 if (SA_HDR_SIZE(sa_hdr_phys
) > 8)
1257 num_lengths
+= (SA_HDR_SIZE(sa_hdr_phys
) - 8) >> 1;
1258 for (i
= 0; i
!= num_lengths
; i
++)
1259 sa_hdr_phys
->sa_lengths
[i
] =
1260 BSWAP_16(sa_hdr_phys
->sa_lengths
[i
]);
1262 sa_attr_iter(hdl
->sa_os
, sa_hdr_phys
, DMU_OT_SA
,
1263 sa_byteswap_cb
, NULL
, hdl
);
1265 if (buftype
== SA_SPILL
)
1266 arc_buf_freeze(((dmu_buf_impl_t
*)hdl
->sa_spill
)->db_buf
);
1270 sa_build_index(sa_handle_t
*hdl
, sa_buf_type_t buftype
)
1272 sa_hdr_phys_t
*sa_hdr_phys
;
1273 dmu_buf_impl_t
*db
= SA_GET_DB(hdl
, buftype
);
1274 dmu_object_type_t bonustype
= SA_BONUSTYPE_FROM_DB(db
);
1275 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1276 sa_idx_tab_t
*idx_tab
;
1278 sa_hdr_phys
= SA_GET_HDR(hdl
, buftype
);
1280 mutex_enter(&sa
->sa_lock
);
1282 /* Do we need to byteswap? */
1284 /* only check if not old znode */
1285 if (IS_SA_BONUSTYPE(bonustype
) && sa_hdr_phys
->sa_magic
!= SA_MAGIC
&&
1286 sa_hdr_phys
->sa_magic
!= 0) {
1287 VERIFY(BSWAP_32(sa_hdr_phys
->sa_magic
) == SA_MAGIC
);
1288 sa_byteswap(hdl
, buftype
);
1291 idx_tab
= sa_find_idx_tab(hdl
->sa_os
, bonustype
, sa_hdr_phys
);
1293 if (buftype
== SA_BONUS
)
1294 hdl
->sa_bonus_tab
= idx_tab
;
1296 hdl
->sa_spill_tab
= idx_tab
;
1298 mutex_exit(&sa
->sa_lock
);
1304 sa_evict_sync(void *dbu
)
1306 panic("evicting sa dbuf\n");
1310 sa_idx_tab_rele(objset_t
*os
, void *arg
)
1312 sa_os_t
*sa
= os
->os_sa
;
1313 sa_idx_tab_t
*idx_tab
= arg
;
1315 if (idx_tab
== NULL
)
1318 mutex_enter(&sa
->sa_lock
);
1319 if (refcount_remove(&idx_tab
->sa_refcount
, NULL
) == 0) {
1320 list_remove(&idx_tab
->sa_layout
->lot_idx_tab
, idx_tab
);
1321 if (idx_tab
->sa_variable_lengths
)
1322 kmem_free(idx_tab
->sa_variable_lengths
,
1324 idx_tab
->sa_layout
->lot_var_sizes
);
1325 refcount_destroy(&idx_tab
->sa_refcount
);
1326 kmem_free(idx_tab
->sa_idx_tab
,
1327 sizeof (uint32_t) * sa
->sa_num_attrs
);
1328 kmem_free(idx_tab
, sizeof (sa_idx_tab_t
));
1330 mutex_exit(&sa
->sa_lock
);
1334 sa_idx_tab_hold(objset_t
*os
, sa_idx_tab_t
*idx_tab
)
1336 ASSERTV(sa_os_t
*sa
= os
->os_sa
);
1338 ASSERT(MUTEX_HELD(&sa
->sa_lock
));
1339 (void) refcount_add(&idx_tab
->sa_refcount
, NULL
);
1343 sa_spill_rele(sa_handle_t
*hdl
)
1345 mutex_enter(&hdl
->sa_lock
);
1346 if (hdl
->sa_spill
) {
1347 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
1348 dmu_buf_rele(hdl
->sa_spill
, NULL
);
1349 hdl
->sa_spill
= NULL
;
1350 hdl
->sa_spill_tab
= NULL
;
1352 mutex_exit(&hdl
->sa_lock
);
1356 sa_handle_destroy(sa_handle_t
*hdl
)
1358 dmu_buf_t
*db
= hdl
->sa_bonus
;
1360 mutex_enter(&hdl
->sa_lock
);
1361 (void) dmu_buf_remove_user(db
, &hdl
->sa_dbu
);
1363 if (hdl
->sa_bonus_tab
)
1364 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_bonus_tab
);
1366 if (hdl
->sa_spill_tab
)
1367 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
1369 dmu_buf_rele(hdl
->sa_bonus
, NULL
);
1372 dmu_buf_rele((dmu_buf_t
*)hdl
->sa_spill
, NULL
);
1373 mutex_exit(&hdl
->sa_lock
);
1375 kmem_cache_free(sa_cache
, hdl
);
1379 sa_handle_get_from_db(objset_t
*os
, dmu_buf_t
*db
, void *userp
,
1380 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1383 sa_handle_t
*handle
= NULL
;
1385 dmu_object_info_t doi
;
1387 dmu_object_info_from_db(db
, &doi
);
1388 ASSERT(doi
.doi_bonus_type
== DMU_OT_SA
||
1389 doi
.doi_bonus_type
== DMU_OT_ZNODE
);
1391 /* find handle, if it exists */
1392 /* if one doesn't exist then create a new one, and initialize it */
1394 if (hdl_type
== SA_HDL_SHARED
)
1395 handle
= dmu_buf_get_user(db
);
1397 if (handle
== NULL
) {
1398 sa_handle_t
*winner
= NULL
;
1400 handle
= kmem_cache_alloc(sa_cache
, KM_SLEEP
);
1401 handle
->sa_dbu
.dbu_evict_func_sync
= NULL
;
1402 handle
->sa_dbu
.dbu_evict_func_async
= NULL
;
1403 handle
->sa_userp
= userp
;
1404 handle
->sa_bonus
= db
;
1406 handle
->sa_spill
= NULL
;
1407 handle
->sa_bonus_tab
= NULL
;
1408 handle
->sa_spill_tab
= NULL
;
1410 error
= sa_build_index(handle
, SA_BONUS
);
1412 if (hdl_type
== SA_HDL_SHARED
) {
1413 dmu_buf_init_user(&handle
->sa_dbu
, sa_evict_sync
, NULL
,
1415 winner
= dmu_buf_set_user_ie(db
, &handle
->sa_dbu
);
1418 if (winner
!= NULL
) {
1419 kmem_cache_free(sa_cache
, handle
);
1429 sa_handle_get(objset_t
*objset
, uint64_t objid
, void *userp
,
1430 sa_handle_type_t hdl_type
, sa_handle_t
**handlepp
)
1435 if ((error
= dmu_bonus_hold(objset
, objid
, NULL
, &db
)))
1438 return (sa_handle_get_from_db(objset
, db
, userp
, hdl_type
,
1443 sa_buf_hold(objset_t
*objset
, uint64_t obj_num
, void *tag
, dmu_buf_t
**db
)
1445 return (dmu_bonus_hold(objset
, obj_num
, tag
, db
));
1449 sa_buf_rele(dmu_buf_t
*db
, void *tag
)
1451 dmu_buf_rele(db
, tag
);
1455 sa_lookup_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
)
1458 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1459 return (sa_attr_op(hdl
, bulk
, count
, SA_LOOKUP
, NULL
));
1463 sa_lookup_locked(sa_handle_t
*hdl
, sa_attr_type_t attr
, void *buf
,
1467 sa_bulk_attr_t bulk
;
1469 VERIFY3U(buflen
, <=, SA_ATTR_MAX_LEN
);
1471 bulk
.sa_attr
= attr
;
1473 bulk
.sa_length
= buflen
;
1474 bulk
.sa_data_func
= NULL
;
1477 error
= sa_lookup_impl(hdl
, &bulk
, 1);
1482 sa_lookup(sa_handle_t
*hdl
, sa_attr_type_t attr
, void *buf
, uint32_t buflen
)
1486 mutex_enter(&hdl
->sa_lock
);
1487 error
= sa_lookup_locked(hdl
, attr
, buf
, buflen
);
1488 mutex_exit(&hdl
->sa_lock
);
1495 sa_lookup_uio(sa_handle_t
*hdl
, sa_attr_type_t attr
, uio_t
*uio
)
1498 sa_bulk_attr_t bulk
;
1500 bulk
.sa_data
= NULL
;
1501 bulk
.sa_attr
= attr
;
1502 bulk
.sa_data_func
= NULL
;
1506 mutex_enter(&hdl
->sa_lock
);
1507 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) == 0) {
1508 error
= uiomove((void *)bulk
.sa_addr
, MIN(bulk
.sa_size
,
1509 uio
->uio_resid
), UIO_READ
, uio
);
1511 mutex_exit(&hdl
->sa_lock
);
1516 * For the existed object that is upgraded from old system, its ondisk layout
1517 * has no slot for the project ID attribute. But quota accounting logic needs
1518 * to access related slots by offset directly. So we need to adjust these old
1519 * objects' layout to make the project ID to some unified and fixed offset.
1522 sa_add_projid(sa_handle_t
*hdl
, dmu_tx_t
*tx
, uint64_t projid
)
1524 znode_t
*zp
= sa_get_userdata(hdl
);
1525 dmu_buf_t
*db
= sa_get_db(hdl
);
1526 zfsvfs_t
*zfsvfs
= ZTOZSB(zp
);
1527 int count
= 0, err
= 0;
1528 sa_bulk_attr_t
*bulk
, *attrs
;
1529 zfs_acl_locator_cb_t locate
= { 0 };
1530 uint64_t uid
, gid
, mode
, rdev
, xattr
= 0, parent
, gen
, links
;
1531 uint64_t crtime
[2], mtime
[2], ctime
[2], atime
[2];
1532 zfs_acl_phys_t znode_acl
= { 0 };
1533 char scanstamp
[AV_SCANSTAMP_SZ
];
1535 if (zp
->z_acl_cached
== NULL
) {
1538 mutex_enter(&zp
->z_acl_lock
);
1539 err
= zfs_acl_node_read(zp
, B_FALSE
, &aclp
, B_FALSE
);
1540 mutex_exit(&zp
->z_acl_lock
);
1541 if (err
!= 0 && err
!= ENOENT
)
1545 bulk
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
1546 attrs
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * ZPL_END
, KM_SLEEP
);
1547 mutex_enter(&hdl
->sa_lock
);
1548 mutex_enter(&zp
->z_lock
);
1550 err
= sa_lookup_locked(hdl
, SA_ZPL_PROJID(zfsvfs
), &projid
,
1552 if (unlikely(err
== 0))
1553 /* Someone has added project ID attr by race. */
1558 /* First do a bulk query of the attributes that aren't cached */
1560 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1562 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1564 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1566 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1568 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
,
1570 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1572 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
1574 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
1576 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1578 if (S_ISBLK(ZTOI(zp
)->i_mode
) || S_ISCHR(ZTOI(zp
)->i_mode
))
1579 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1582 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
,
1584 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
,
1586 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
,
1588 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1590 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GEN(zfsvfs
), NULL
,
1592 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_MODE(zfsvfs
), NULL
,
1594 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
,
1596 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_XATTR(zfsvfs
), NULL
,
1598 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1600 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_UID(zfsvfs
), NULL
,
1602 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_GID(zfsvfs
), NULL
,
1604 SA_ADD_BULK_ATTR(bulk
, count
, SA_ZPL_ZNODE_ACL(zfsvfs
), NULL
,
1607 err
= sa_bulk_lookup_locked(hdl
, bulk
, count
);
1611 err
= sa_lookup_locked(hdl
, SA_ZPL_XATTR(zfsvfs
), &xattr
, 8);
1612 if (err
!= 0 && err
!= ENOENT
)
1615 zp
->z_projid
= projid
;
1616 zp
->z_pflags
|= ZFS_PROJID
;
1617 links
= ZTOI(zp
)->i_nlink
;
1621 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_MODE(zfsvfs
), NULL
, &mode
, 8);
1622 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_SIZE(zfsvfs
), NULL
,
1624 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_GEN(zfsvfs
), NULL
, &gen
, 8);
1625 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_UID(zfsvfs
), NULL
, &uid
, 8);
1626 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_GID(zfsvfs
), NULL
, &gid
, 8);
1627 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_PARENT(zfsvfs
), NULL
, &parent
, 8);
1628 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_FLAGS(zfsvfs
), NULL
,
1630 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_ATIME(zfsvfs
), NULL
, &atime
, 16);
1631 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_MTIME(zfsvfs
), NULL
, &mtime
, 16);
1632 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_CTIME(zfsvfs
), NULL
, &ctime
, 16);
1633 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_CRTIME(zfsvfs
), NULL
,
1635 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_LINKS(zfsvfs
), NULL
, &links
, 8);
1636 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_PROJID(zfsvfs
), NULL
, &projid
, 8);
1638 if (S_ISBLK(ZTOI(zp
)->i_mode
) || S_ISCHR(ZTOI(zp
)->i_mode
))
1639 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_RDEV(zfsvfs
), NULL
,
1642 if (zp
->z_acl_cached
!= NULL
) {
1643 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_DACL_COUNT(zfsvfs
), NULL
,
1644 &zp
->z_acl_cached
->z_acl_count
, 8);
1645 if (zp
->z_acl_cached
->z_version
< ZFS_ACL_VERSION_FUID
)
1646 zfs_acl_xform(zp
, zp
->z_acl_cached
, CRED());
1647 locate
.cb_aclp
= zp
->z_acl_cached
;
1648 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_DACL_ACES(zfsvfs
),
1649 zfs_acl_data_locator
, &locate
,
1650 zp
->z_acl_cached
->z_acl_bytes
);
1654 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_XATTR(zfsvfs
), NULL
,
1657 if (zp
->z_pflags
& ZFS_BONUS_SCANSTAMP
) {
1658 bcopy((caddr_t
)db
->db_data
+ ZFS_OLD_ZNODE_PHYS_SIZE
,
1659 scanstamp
, AV_SCANSTAMP_SZ
);
1660 SA_ADD_BULK_ATTR(attrs
, count
, SA_ZPL_SCANSTAMP(zfsvfs
), NULL
,
1661 scanstamp
, AV_SCANSTAMP_SZ
);
1662 zp
->z_pflags
&= ~ZFS_BONUS_SCANSTAMP
;
1665 VERIFY(dmu_set_bonustype(db
, DMU_OT_SA
, tx
) == 0);
1666 VERIFY(sa_replace_all_by_template_locked(hdl
, attrs
, count
, tx
) == 0);
1667 if (znode_acl
.z_acl_extern_obj
) {
1668 VERIFY(0 == dmu_object_free(zfsvfs
->z_os
,
1669 znode_acl
.z_acl_extern_obj
, tx
));
1672 zp
->z_is_sa
= B_TRUE
;
1675 mutex_exit(&zp
->z_lock
);
1676 mutex_exit(&hdl
->sa_lock
);
1677 kmem_free(attrs
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
1678 kmem_free(bulk
, sizeof (sa_bulk_attr_t
) * ZPL_END
);
1683 static sa_idx_tab_t
*
1684 sa_find_idx_tab(objset_t
*os
, dmu_object_type_t bonustype
, sa_hdr_phys_t
*hdr
)
1686 sa_idx_tab_t
*idx_tab
;
1687 sa_os_t
*sa
= os
->os_sa
;
1688 sa_lot_t
*tb
, search
;
1692 * Deterimine layout number. If SA node and header == 0 then
1693 * force the index table to the dummy "1" empty layout.
1695 * The layout number would only be zero for a newly created file
1696 * that has not added any attributes yet, or with crypto enabled which
1697 * doesn't write any attributes to the bonus buffer.
1700 search
.lot_num
= SA_LAYOUT_NUM(hdr
, bonustype
);
1702 tb
= avl_find(&sa
->sa_layout_num_tree
, &search
, &loc
);
1704 /* Verify header size is consistent with layout information */
1706 ASSERT((IS_SA_BONUSTYPE(bonustype
) &&
1707 SA_HDR_SIZE_MATCH_LAYOUT(hdr
, tb
)) || !IS_SA_BONUSTYPE(bonustype
) ||
1708 (IS_SA_BONUSTYPE(bonustype
) && hdr
->sa_layout_info
== 0));
1711 * See if any of the already existing TOC entries can be reused?
1714 for (idx_tab
= list_head(&tb
->lot_idx_tab
); idx_tab
;
1715 idx_tab
= list_next(&tb
->lot_idx_tab
, idx_tab
)) {
1716 boolean_t valid_idx
= B_TRUE
;
1719 if (tb
->lot_var_sizes
!= 0 &&
1720 idx_tab
->sa_variable_lengths
!= NULL
) {
1721 for (i
= 0; i
!= tb
->lot_var_sizes
; i
++) {
1722 if (hdr
->sa_lengths
[i
] !=
1723 idx_tab
->sa_variable_lengths
[i
]) {
1724 valid_idx
= B_FALSE
;
1730 sa_idx_tab_hold(os
, idx_tab
);
1735 /* No such luck, create a new entry */
1736 idx_tab
= kmem_zalloc(sizeof (sa_idx_tab_t
), KM_SLEEP
);
1737 idx_tab
->sa_idx_tab
=
1738 kmem_zalloc(sizeof (uint32_t) * sa
->sa_num_attrs
, KM_SLEEP
);
1739 idx_tab
->sa_layout
= tb
;
1740 refcount_create(&idx_tab
->sa_refcount
);
1741 if (tb
->lot_var_sizes
)
1742 idx_tab
->sa_variable_lengths
= kmem_alloc(sizeof (uint16_t) *
1743 tb
->lot_var_sizes
, KM_SLEEP
);
1745 sa_attr_iter(os
, hdr
, bonustype
, sa_build_idx_tab
,
1747 sa_idx_tab_hold(os
, idx_tab
); /* one hold for consumer */
1748 sa_idx_tab_hold(os
, idx_tab
); /* one for layout */
1749 list_insert_tail(&tb
->lot_idx_tab
, idx_tab
);
1754 sa_default_locator(void **dataptr
, uint32_t *len
, uint32_t total_len
,
1755 boolean_t start
, void *userdata
)
1759 *dataptr
= userdata
;
1764 sa_attr_register_sync(sa_handle_t
*hdl
, dmu_tx_t
*tx
)
1766 uint64_t attr_value
= 0;
1767 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1768 sa_attr_table_t
*tb
= sa
->sa_attr_table
;
1771 mutex_enter(&sa
->sa_lock
);
1773 if (!sa
->sa_need_attr_registration
|| sa
->sa_master_obj
== 0) {
1774 mutex_exit(&sa
->sa_lock
);
1778 if (sa
->sa_reg_attr_obj
== 0) {
1779 sa
->sa_reg_attr_obj
= zap_create_link(hdl
->sa_os
,
1780 DMU_OT_SA_ATTR_REGISTRATION
,
1781 sa
->sa_master_obj
, SA_REGISTRY
, tx
);
1783 for (i
= 0; i
!= sa
->sa_num_attrs
; i
++) {
1784 if (sa
->sa_attr_table
[i
].sa_registered
)
1786 ATTR_ENCODE(attr_value
, tb
[i
].sa_attr
, tb
[i
].sa_length
,
1788 VERIFY(0 == zap_update(hdl
->sa_os
, sa
->sa_reg_attr_obj
,
1789 tb
[i
].sa_name
, 8, 1, &attr_value
, tx
));
1790 tb
[i
].sa_registered
= B_TRUE
;
1792 sa
->sa_need_attr_registration
= B_FALSE
;
1793 mutex_exit(&sa
->sa_lock
);
1797 * Replace all attributes with attributes specified in template.
1798 * If dnode had a spill buffer then those attributes will be
1799 * also be replaced, possibly with just an empty spill block
1801 * This interface is intended to only be used for bulk adding of
1802 * attributes for a new file. It will also be used by the ZPL
1803 * when converting and old formatted znode to native SA support.
1806 sa_replace_all_by_template_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1807 int attr_count
, dmu_tx_t
*tx
)
1809 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1811 if (sa
->sa_need_attr_registration
)
1812 sa_attr_register_sync(hdl
, tx
);
1813 return (sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
));
1817 sa_replace_all_by_template(sa_handle_t
*hdl
, sa_bulk_attr_t
*attr_desc
,
1818 int attr_count
, dmu_tx_t
*tx
)
1822 mutex_enter(&hdl
->sa_lock
);
1823 error
= sa_replace_all_by_template_locked(hdl
, attr_desc
,
1825 mutex_exit(&hdl
->sa_lock
);
1830 * Add/remove a single attribute or replace a variable-sized attribute value
1831 * with a value of a different size, and then rewrite the entire set
1833 * Same-length attribute value replacement (including fixed-length attributes)
1834 * is handled more efficiently by the upper layers.
1837 sa_modify_attrs(sa_handle_t
*hdl
, sa_attr_type_t newattr
,
1838 sa_data_op_t action
, sa_data_locator_t
*locator
, void *datastart
,
1839 uint16_t buflen
, dmu_tx_t
*tx
)
1841 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1842 dmu_buf_impl_t
*db
= (dmu_buf_impl_t
*)hdl
->sa_bonus
;
1844 sa_bulk_attr_t
*attr_desc
;
1846 int bonus_attr_count
= 0;
1847 int bonus_data_size
= 0;
1848 int spill_data_size
= 0;
1849 int spill_attr_count
= 0;
1851 uint16_t length
, reg_length
;
1852 int i
, j
, k
, length_idx
;
1854 sa_idx_tab_t
*idx_tab
;
1858 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1860 /* First make of copy of the old data */
1864 if (dn
->dn_bonuslen
!= 0) {
1865 bonus_data_size
= hdl
->sa_bonus
->db_size
;
1866 old_data
[0] = kmem_alloc(bonus_data_size
, KM_SLEEP
);
1867 bcopy(hdl
->sa_bonus
->db_data
, old_data
[0],
1868 hdl
->sa_bonus
->db_size
);
1869 bonus_attr_count
= hdl
->sa_bonus_tab
->sa_layout
->lot_attr_count
;
1875 /* Bring spill buffer online if it isn't currently */
1877 if ((error
= sa_get_spill(hdl
)) == 0) {
1878 spill_data_size
= hdl
->sa_spill
->db_size
;
1879 old_data
[1] = vmem_alloc(spill_data_size
, KM_SLEEP
);
1880 bcopy(hdl
->sa_spill
->db_data
, old_data
[1],
1881 hdl
->sa_spill
->db_size
);
1883 hdl
->sa_spill_tab
->sa_layout
->lot_attr_count
;
1884 } else if (error
&& error
!= ENOENT
) {
1886 kmem_free(old_data
[0], bonus_data_size
);
1892 /* build descriptor of all attributes */
1894 attr_count
= bonus_attr_count
+ spill_attr_count
;
1895 if (action
== SA_ADD
)
1897 else if (action
== SA_REMOVE
)
1900 attr_desc
= kmem_zalloc(sizeof (sa_bulk_attr_t
) * attr_count
, KM_SLEEP
);
1903 * loop through bonus and spill buffer if it exists, and
1904 * build up new attr_descriptor to reset the attributes
1907 count
= bonus_attr_count
;
1908 hdr
= SA_GET_HDR(hdl
, SA_BONUS
);
1909 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_BONUS
);
1910 for (; k
!= 2; k
++) {
1912 * Iterate over each attribute in layout. Fetch the
1913 * size of variable-length attributes needing rewrite
1914 * from sa_lengths[].
1916 for (i
= 0, length_idx
= 0; i
!= count
; i
++) {
1917 sa_attr_type_t attr
;
1919 attr
= idx_tab
->sa_layout
->lot_attrs
[i
];
1920 reg_length
= SA_REGISTERED_LEN(sa
, attr
);
1921 if (reg_length
== 0) {
1922 length
= hdr
->sa_lengths
[length_idx
];
1925 length
= reg_length
;
1927 if (attr
== newattr
) {
1929 * There is nothing to do for SA_REMOVE,
1930 * so it is just skipped.
1932 if (action
== SA_REMOVE
)
1936 * Duplicate attributes are not allowed, so the
1937 * action can not be SA_ADD here.
1939 ASSERT3S(action
, ==, SA_REPLACE
);
1942 * Only a variable-sized attribute can be
1943 * replaced here, and its size must be changing.
1945 ASSERT3U(reg_length
, ==, 0);
1946 ASSERT3U(length
, !=, buflen
);
1947 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1948 locator
, datastart
, buflen
);
1950 SA_ADD_BULK_ATTR(attr_desc
, j
, attr
,
1952 (TOC_OFF(idx_tab
->sa_idx_tab
[attr
]) +
1953 (uintptr_t)old_data
[k
]), length
);
1956 if (k
== 0 && hdl
->sa_spill
) {
1957 hdr
= SA_GET_HDR(hdl
, SA_SPILL
);
1958 idx_tab
= SA_IDX_TAB_GET(hdl
, SA_SPILL
);
1959 count
= spill_attr_count
;
1964 if (action
== SA_ADD
) {
1965 reg_length
= SA_REGISTERED_LEN(sa
, newattr
);
1966 IMPLY(reg_length
!= 0, reg_length
== buflen
);
1967 SA_ADD_BULK_ATTR(attr_desc
, j
, newattr
, locator
,
1970 ASSERT3U(j
, ==, attr_count
);
1972 error
= sa_build_layouts(hdl
, attr_desc
, attr_count
, tx
);
1975 kmem_free(old_data
[0], bonus_data_size
);
1977 vmem_free(old_data
[1], spill_data_size
);
1978 kmem_free(attr_desc
, sizeof (sa_bulk_attr_t
) * attr_count
);
1984 sa_bulk_update_impl(sa_handle_t
*hdl
, sa_bulk_attr_t
*bulk
, int count
,
1988 sa_os_t
*sa
= hdl
->sa_os
->os_sa
;
1989 dmu_object_type_t bonustype
;
1990 dmu_buf_t
*saved_spill
;
1993 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
1995 bonustype
= SA_BONUSTYPE_FROM_DB(SA_GET_DB(hdl
, SA_BONUS
));
1996 saved_spill
= hdl
->sa_spill
;
1998 /* sync out registration table if necessary */
1999 if (sa
->sa_need_attr_registration
)
2000 sa_attr_register_sync(hdl
, tx
);
2002 error
= sa_attr_op(hdl
, bulk
, count
, SA_UPDATE
, tx
);
2003 if (error
== 0 && !IS_SA_BONUSTYPE(bonustype
) && sa
->sa_update_cb
)
2004 sa
->sa_update_cb(hdl
, tx
);
2007 * If saved_spill is NULL and current sa_spill is not NULL that
2008 * means we increased the refcount of the spill buffer through
2009 * sa_get_spill() or dmu_spill_hold_by_dnode(). Therefore we
2010 * must release the hold before calling dmu_tx_commit() to avoid
2011 * making a copy of this buffer in dbuf_sync_leaf() due to the
2012 * reference count now being greater than 1.
2014 if (!saved_spill
&& hdl
->sa_spill
) {
2015 if (hdl
->sa_spill_tab
) {
2016 sa_idx_tab_rele(hdl
->sa_os
, hdl
->sa_spill_tab
);
2017 hdl
->sa_spill_tab
= NULL
;
2020 dmu_buf_rele((dmu_buf_t
*)hdl
->sa_spill
, NULL
);
2021 hdl
->sa_spill
= NULL
;
2028 * update or add new attribute
2031 sa_update(sa_handle_t
*hdl
, sa_attr_type_t type
,
2032 void *buf
, uint32_t buflen
, dmu_tx_t
*tx
)
2035 sa_bulk_attr_t bulk
;
2037 VERIFY3U(buflen
, <=, SA_ATTR_MAX_LEN
);
2039 bulk
.sa_attr
= type
;
2040 bulk
.sa_data_func
= NULL
;
2041 bulk
.sa_length
= buflen
;
2044 mutex_enter(&hdl
->sa_lock
);
2045 error
= sa_bulk_update_impl(hdl
, &bulk
, 1, tx
);
2046 mutex_exit(&hdl
->sa_lock
);
2051 * Return size of an attribute
2055 sa_size(sa_handle_t
*hdl
, sa_attr_type_t attr
, int *size
)
2057 sa_bulk_attr_t bulk
;
2060 bulk
.sa_data
= NULL
;
2061 bulk
.sa_attr
= attr
;
2062 bulk
.sa_data_func
= NULL
;
2065 mutex_enter(&hdl
->sa_lock
);
2066 if ((error
= sa_attr_op(hdl
, &bulk
, 1, SA_LOOKUP
, NULL
)) != 0) {
2067 mutex_exit(&hdl
->sa_lock
);
2070 *size
= bulk
.sa_size
;
2072 mutex_exit(&hdl
->sa_lock
);
2077 sa_bulk_lookup_locked(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
2080 ASSERT(MUTEX_HELD(&hdl
->sa_lock
));
2081 return (sa_lookup_impl(hdl
, attrs
, count
));
2085 sa_bulk_lookup(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
)
2090 mutex_enter(&hdl
->sa_lock
);
2091 error
= sa_bulk_lookup_locked(hdl
, attrs
, count
);
2092 mutex_exit(&hdl
->sa_lock
);
2097 sa_bulk_update(sa_handle_t
*hdl
, sa_bulk_attr_t
*attrs
, int count
, dmu_tx_t
*tx
)
2102 mutex_enter(&hdl
->sa_lock
);
2103 error
= sa_bulk_update_impl(hdl
, attrs
, count
, tx
);
2104 mutex_exit(&hdl
->sa_lock
);
2109 sa_remove(sa_handle_t
*hdl
, sa_attr_type_t attr
, dmu_tx_t
*tx
)
2113 mutex_enter(&hdl
->sa_lock
);
2114 error
= sa_modify_attrs(hdl
, attr
, SA_REMOVE
, NULL
,
2116 mutex_exit(&hdl
->sa_lock
);
2121 sa_object_info(sa_handle_t
*hdl
, dmu_object_info_t
*doi
)
2123 dmu_object_info_from_db((dmu_buf_t
*)hdl
->sa_bonus
, doi
);
2127 sa_object_size(sa_handle_t
*hdl
, uint32_t *blksize
, u_longlong_t
*nblocks
)
2129 dmu_object_size_from_db((dmu_buf_t
*)hdl
->sa_bonus
,
2134 sa_set_userp(sa_handle_t
*hdl
, void *ptr
)
2136 hdl
->sa_userp
= ptr
;
2140 sa_get_db(sa_handle_t
*hdl
)
2142 return ((dmu_buf_t
*)hdl
->sa_bonus
);
2146 sa_get_userdata(sa_handle_t
*hdl
)
2148 return (hdl
->sa_userp
);
2152 sa_register_update_callback_locked(objset_t
*os
, sa_update_cb_t
*func
)
2154 ASSERT(MUTEX_HELD(&os
->os_sa
->sa_lock
));
2155 os
->os_sa
->sa_update_cb
= func
;
2159 sa_register_update_callback(objset_t
*os
, sa_update_cb_t
*func
)
2162 mutex_enter(&os
->os_sa
->sa_lock
);
2163 sa_register_update_callback_locked(os
, func
);
2164 mutex_exit(&os
->os_sa
->sa_lock
);
2168 sa_handle_object(sa_handle_t
*hdl
)
2170 return (hdl
->sa_bonus
->db_object
);
2174 sa_enabled(objset_t
*os
)
2176 return (os
->os_sa
== NULL
);
2180 sa_set_sa_object(objset_t
*os
, uint64_t sa_object
)
2182 sa_os_t
*sa
= os
->os_sa
;
2184 if (sa
->sa_master_obj
)
2187 sa
->sa_master_obj
= sa_object
;
2193 sa_hdrsize(void *arg
)
2195 sa_hdr_phys_t
*hdr
= arg
;
2197 return (SA_HDR_SIZE(hdr
));
2201 sa_handle_lock(sa_handle_t
*hdl
)
2204 mutex_enter(&hdl
->sa_lock
);
2208 sa_handle_unlock(sa_handle_t
*hdl
)
2211 mutex_exit(&hdl
->sa_lock
);
2215 EXPORT_SYMBOL(sa_handle_get
);
2216 EXPORT_SYMBOL(sa_handle_get_from_db
);
2217 EXPORT_SYMBOL(sa_handle_destroy
);
2218 EXPORT_SYMBOL(sa_buf_hold
);
2219 EXPORT_SYMBOL(sa_buf_rele
);
2220 EXPORT_SYMBOL(sa_spill_rele
);
2221 EXPORT_SYMBOL(sa_lookup
);
2222 EXPORT_SYMBOL(sa_update
);
2223 EXPORT_SYMBOL(sa_remove
);
2224 EXPORT_SYMBOL(sa_bulk_lookup
);
2225 EXPORT_SYMBOL(sa_bulk_lookup_locked
);
2226 EXPORT_SYMBOL(sa_bulk_update
);
2227 EXPORT_SYMBOL(sa_size
);
2228 EXPORT_SYMBOL(sa_object_info
);
2229 EXPORT_SYMBOL(sa_object_size
);
2230 EXPORT_SYMBOL(sa_get_userdata
);
2231 EXPORT_SYMBOL(sa_set_userp
);
2232 EXPORT_SYMBOL(sa_get_db
);
2233 EXPORT_SYMBOL(sa_handle_object
);
2234 EXPORT_SYMBOL(sa_register_update_callback
);
2235 EXPORT_SYMBOL(sa_setup
);
2236 EXPORT_SYMBOL(sa_replace_all_by_template
);
2237 EXPORT_SYMBOL(sa_replace_all_by_template_locked
);
2238 EXPORT_SYMBOL(sa_enabled
);
2239 EXPORT_SYMBOL(sa_cache_init
);
2240 EXPORT_SYMBOL(sa_cache_fini
);
2241 EXPORT_SYMBOL(sa_set_sa_object
);
2242 EXPORT_SYMBOL(sa_hdrsize
);
2243 EXPORT_SYMBOL(sa_handle_lock
);
2244 EXPORT_SYMBOL(sa_handle_unlock
);
2245 EXPORT_SYMBOL(sa_lookup_uio
);
2246 EXPORT_SYMBOL(sa_add_projid
);
2247 #endif /* _KERNEL */