4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
30 #include <sys/zfs_context.h>
32 #include <sys/refcount.h>
33 #include <sys/zap_impl.h>
34 #include <sys/zap_leaf.h>
39 #include <sys/sunddi.h>
42 extern inline mzap_phys_t
*zap_m_phys(zap_t
*zap
);
44 static int mzap_upgrade(zap_t
**zapp
, dmu_tx_t
*tx
, zap_flags_t flags
);
47 zap_getflags(zap_t
*zap
)
51 return (zap_f_phys(zap
)->zap_flags
);
55 zap_hashbits(zap_t
*zap
)
57 if (zap_getflags(zap
) & ZAP_FLAG_HASH64
)
66 if (zap_getflags(zap
) & ZAP_FLAG_HASH64
)
73 zap_hash(zap_name_t
*zn
)
75 zap_t
*zap
= zn
->zn_zap
;
78 if (zap_getflags(zap
) & ZAP_FLAG_PRE_HASHED_KEY
) {
79 ASSERT(zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
);
80 h
= *(uint64_t *)zn
->zn_key_orig
;
84 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
86 if (zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
) {
88 const uint64_t *wp
= zn
->zn_key_norm
;
90 ASSERT(zn
->zn_key_intlen
== 8);
91 for (i
= 0; i
< zn
->zn_key_norm_numints
; wp
++, i
++) {
95 for (j
= 0; j
< zn
->zn_key_intlen
; j
++) {
97 zfs_crc64_table
[(h
^ word
) & 0xFF];
103 const uint8_t *cp
= zn
->zn_key_norm
;
106 * We previously stored the terminating null on
107 * disk, but didn't hash it, so we need to
108 * continue to not hash it. (The
109 * zn_key_*_numints includes the terminating
110 * null for non-binary keys.)
112 len
= zn
->zn_key_norm_numints
- 1;
114 ASSERT(zn
->zn_key_intlen
== 1);
115 for (i
= 0; i
< len
; cp
++, i
++) {
117 zfs_crc64_table
[(h
^ *cp
) & 0xFF];
122 * Don't use all 64 bits, since we need some in the cookie for
123 * the collision differentiator. We MUST use the high bits,
124 * since those are the ones that we first pay attention to when
125 * chosing the bucket.
127 h
&= ~((1ULL << (64 - zap_hashbits(zap
))) - 1);
133 zap_normalize(zap_t
*zap
, const char *name
, char *namenorm
)
135 size_t inlen
, outlen
;
138 ASSERT(!(zap_getflags(zap
) & ZAP_FLAG_UINT64_KEY
));
140 inlen
= strlen(name
) + 1;
141 outlen
= ZAP_MAXNAMELEN
;
144 (void) u8_textprep_str((char *)name
, &inlen
, namenorm
, &outlen
,
145 zap
->zap_normflags
| U8_TEXTPREP_IGNORE_NULL
|
146 U8_TEXTPREP_IGNORE_INVALID
, U8_UNICODE_LATEST
, &err
);
152 zap_match(zap_name_t
*zn
, const char *matchname
)
154 ASSERT(!(zap_getflags(zn
->zn_zap
) & ZAP_FLAG_UINT64_KEY
));
156 if (zn
->zn_matchtype
== MT_FIRST
) {
157 char norm
[ZAP_MAXNAMELEN
];
159 if (zap_normalize(zn
->zn_zap
, matchname
, norm
) != 0)
162 return (strcmp(zn
->zn_key_norm
, norm
) == 0);
164 /* MT_BEST or MT_EXACT */
165 return (strcmp(zn
->zn_key_orig
, matchname
) == 0);
170 zap_name_free(zap_name_t
*zn
)
172 kmem_free(zn
, sizeof (zap_name_t
));
176 zap_name_alloc(zap_t
*zap
, const char *key
, matchtype_t mt
)
178 zap_name_t
*zn
= kmem_alloc(sizeof (zap_name_t
), KM_SLEEP
);
181 zn
->zn_key_intlen
= sizeof (*key
);
182 zn
->zn_key_orig
= key
;
183 zn
->zn_key_orig_numints
= strlen(zn
->zn_key_orig
) + 1;
184 zn
->zn_matchtype
= mt
;
185 if (zap
->zap_normflags
) {
186 if (zap_normalize(zap
, key
, zn
->zn_normbuf
) != 0) {
190 zn
->zn_key_norm
= zn
->zn_normbuf
;
191 zn
->zn_key_norm_numints
= strlen(zn
->zn_key_norm
) + 1;
193 if (mt
!= MT_EXACT
) {
197 zn
->zn_key_norm
= zn
->zn_key_orig
;
198 zn
->zn_key_norm_numints
= zn
->zn_key_orig_numints
;
201 zn
->zn_hash
= zap_hash(zn
);
206 zap_name_alloc_uint64(zap_t
*zap
, const uint64_t *key
, int numints
)
208 zap_name_t
*zn
= kmem_alloc(sizeof (zap_name_t
), KM_SLEEP
);
210 ASSERT(zap
->zap_normflags
== 0);
212 zn
->zn_key_intlen
= sizeof (*key
);
213 zn
->zn_key_orig
= zn
->zn_key_norm
= key
;
214 zn
->zn_key_orig_numints
= zn
->zn_key_norm_numints
= numints
;
215 zn
->zn_matchtype
= MT_EXACT
;
217 zn
->zn_hash
= zap_hash(zn
);
222 mzap_byteswap(mzap_phys_t
*buf
, size_t size
)
225 buf
->mz_block_type
= BSWAP_64(buf
->mz_block_type
);
226 buf
->mz_salt
= BSWAP_64(buf
->mz_salt
);
227 buf
->mz_normflags
= BSWAP_64(buf
->mz_normflags
);
228 max
= (size
/ MZAP_ENT_LEN
) - 1;
229 for (i
= 0; i
< max
; i
++) {
230 buf
->mz_chunk
[i
].mze_value
=
231 BSWAP_64(buf
->mz_chunk
[i
].mze_value
);
232 buf
->mz_chunk
[i
].mze_cd
=
233 BSWAP_32(buf
->mz_chunk
[i
].mze_cd
);
238 zap_byteswap(void *buf
, size_t size
)
242 block_type
= *(uint64_t *)buf
;
244 if (block_type
== ZBT_MICRO
|| block_type
== BSWAP_64(ZBT_MICRO
)) {
245 /* ASSERT(magic == ZAP_LEAF_MAGIC); */
246 mzap_byteswap(buf
, size
);
248 fzap_byteswap(buf
, size
);
253 mze_compare(const void *arg1
, const void *arg2
)
255 const mzap_ent_t
*mze1
= arg1
;
256 const mzap_ent_t
*mze2
= arg2
;
258 if (mze1
->mze_hash
> mze2
->mze_hash
)
260 if (mze1
->mze_hash
< mze2
->mze_hash
)
262 if (mze1
->mze_cd
> mze2
->mze_cd
)
264 if (mze1
->mze_cd
< mze2
->mze_cd
)
270 mze_insert(zap_t
*zap
, int chunkid
, uint64_t hash
)
274 ASSERT(zap
->zap_ismicro
);
275 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
277 mze
= kmem_alloc(sizeof (mzap_ent_t
), KM_SLEEP
);
278 mze
->mze_chunkid
= chunkid
;
279 mze
->mze_hash
= hash
;
280 mze
->mze_cd
= MZE_PHYS(zap
, mze
)->mze_cd
;
281 ASSERT(MZE_PHYS(zap
, mze
)->mze_name
[0] != 0);
282 avl_add(&zap
->zap_m
.zap_avl
, mze
);
286 mze_find(zap_name_t
*zn
)
288 mzap_ent_t mze_tofind
;
291 avl_tree_t
*avl
= &zn
->zn_zap
->zap_m
.zap_avl
;
293 ASSERT(zn
->zn_zap
->zap_ismicro
);
294 ASSERT(RW_LOCK_HELD(&zn
->zn_zap
->zap_rwlock
));
296 mze_tofind
.mze_hash
= zn
->zn_hash
;
297 mze_tofind
.mze_cd
= 0;
300 mze
= avl_find(avl
, &mze_tofind
, &idx
);
302 mze
= avl_nearest(avl
, idx
, AVL_AFTER
);
303 for (; mze
&& mze
->mze_hash
== zn
->zn_hash
; mze
= AVL_NEXT(avl
, mze
)) {
304 ASSERT3U(mze
->mze_cd
, ==, MZE_PHYS(zn
->zn_zap
, mze
)->mze_cd
);
305 if (zap_match(zn
, MZE_PHYS(zn
->zn_zap
, mze
)->mze_name
))
308 if (zn
->zn_matchtype
== MT_BEST
) {
309 zn
->zn_matchtype
= MT_FIRST
;
316 mze_find_unused_cd(zap_t
*zap
, uint64_t hash
)
318 mzap_ent_t mze_tofind
;
321 avl_tree_t
*avl
= &zap
->zap_m
.zap_avl
;
324 ASSERT(zap
->zap_ismicro
);
325 ASSERT(RW_LOCK_HELD(&zap
->zap_rwlock
));
327 mze_tofind
.mze_hash
= hash
;
328 mze_tofind
.mze_cd
= 0;
331 for (mze
= avl_find(avl
, &mze_tofind
, &idx
);
332 mze
&& mze
->mze_hash
== hash
; mze
= AVL_NEXT(avl
, mze
)) {
333 if (mze
->mze_cd
!= cd
)
342 mze_remove(zap_t
*zap
, mzap_ent_t
*mze
)
344 ASSERT(zap
->zap_ismicro
);
345 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
347 avl_remove(&zap
->zap_m
.zap_avl
, mze
);
348 kmem_free(mze
, sizeof (mzap_ent_t
));
352 mze_destroy(zap_t
*zap
)
355 void *avlcookie
= NULL
;
357 while ((mze
= avl_destroy_nodes(&zap
->zap_m
.zap_avl
, &avlcookie
)))
358 kmem_free(mze
, sizeof (mzap_ent_t
));
359 avl_destroy(&zap
->zap_m
.zap_avl
);
363 mzap_open(objset_t
*os
, uint64_t obj
, dmu_buf_t
*db
)
369 ASSERT3U(MZAP_ENT_LEN
, ==, sizeof (mzap_ent_phys_t
));
371 zap
= kmem_zalloc(sizeof (zap_t
), KM_SLEEP
);
372 rw_init(&zap
->zap_rwlock
, NULL
, RW_DEFAULT
, NULL
);
373 rw_enter(&zap
->zap_rwlock
, RW_WRITER
);
374 zap
->zap_objset
= os
;
375 zap
->zap_object
= obj
;
378 if (*(uint64_t *)db
->db_data
!= ZBT_MICRO
) {
379 mutex_init(&zap
->zap_f
.zap_num_entries_mtx
, 0, 0, 0);
380 zap
->zap_f
.zap_block_shift
= highbit64(db
->db_size
) - 1;
382 zap
->zap_ismicro
= TRUE
;
386 * Make sure that zap_ismicro is set before we let others see
387 * it, because zap_lockdir() checks zap_ismicro without the lock
390 dmu_buf_init_user(&zap
->zap_dbu
, zap_evict
, &zap
->zap_dbuf
);
391 winner
= dmu_buf_set_user(db
, &zap
->zap_dbu
);
393 if (winner
!= NULL
) {
394 rw_exit(&zap
->zap_rwlock
);
395 rw_destroy(&zap
->zap_rwlock
);
396 if (!zap
->zap_ismicro
)
397 mutex_destroy(&zap
->zap_f
.zap_num_entries_mtx
);
398 kmem_free(zap
, sizeof (zap_t
));
402 if (zap
->zap_ismicro
) {
403 zap
->zap_salt
= zap_m_phys(zap
)->mz_salt
;
404 zap
->zap_normflags
= zap_m_phys(zap
)->mz_normflags
;
405 zap
->zap_m
.zap_num_chunks
= db
->db_size
/ MZAP_ENT_LEN
- 1;
406 avl_create(&zap
->zap_m
.zap_avl
, mze_compare
,
407 sizeof (mzap_ent_t
), offsetof(mzap_ent_t
, mze_node
));
409 for (i
= 0; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
410 mzap_ent_phys_t
*mze
=
411 &zap_m_phys(zap
)->mz_chunk
[i
];
412 if (mze
->mze_name
[0]) {
415 zap
->zap_m
.zap_num_entries
++;
416 zn
= zap_name_alloc(zap
, mze
->mze_name
,
418 mze_insert(zap
, i
, zn
->zn_hash
);
423 zap
->zap_salt
= zap_f_phys(zap
)->zap_salt
;
424 zap
->zap_normflags
= zap_f_phys(zap
)->zap_normflags
;
426 ASSERT3U(sizeof (struct zap_leaf_header
), ==,
427 2*ZAP_LEAF_CHUNKSIZE
);
430 * The embedded pointer table should not overlap the
433 ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap
, 0), >,
434 &zap_f_phys(zap
)->zap_salt
);
437 * The embedded pointer table should end at the end of
440 ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap
,
441 1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap
)) -
442 (uintptr_t)zap_f_phys(zap
), ==,
443 zap
->zap_dbuf
->db_size
);
445 rw_exit(&zap
->zap_rwlock
);
450 zap_lockdir(objset_t
*os
, uint64_t obj
, dmu_tx_t
*tx
,
451 krw_t lti
, boolean_t fatreader
, boolean_t adding
, zap_t
**zapp
)
453 dmu_object_info_t doi
;
461 err
= dmu_buf_hold(os
, obj
, 0, NULL
, &db
, DMU_READ_NO_PREFETCH
);
465 dmu_object_info_from_db(db
, &doi
);
466 if (DMU_OT_BYTESWAP(doi
.doi_type
) != DMU_BSWAP_ZAP
)
467 return (SET_ERROR(EINVAL
));
469 zap
= dmu_buf_get_user(db
);
471 zap
= mzap_open(os
, obj
, db
);
474 * We're checking zap_ismicro without the lock held, in order to
475 * tell what type of lock we want. Once we have some sort of
476 * lock, see if it really is the right type. In practice this
477 * can only be different if it was upgraded from micro to fat,
478 * and micro wanted WRITER but fat only needs READER.
480 lt
= (!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
;
481 rw_enter(&zap
->zap_rwlock
, lt
);
482 if (lt
!= ((!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
)) {
483 /* it was upgraded, now we only need reader */
484 ASSERT(lt
== RW_WRITER
);
486 (!zap
->zap_ismicro
&& fatreader
) ? RW_READER
: lti
);
487 rw_downgrade(&zap
->zap_rwlock
);
491 zap
->zap_objset
= os
;
494 dmu_buf_will_dirty(db
, tx
);
496 ASSERT3P(zap
->zap_dbuf
, ==, db
);
498 ASSERT(!zap
->zap_ismicro
||
499 zap
->zap_m
.zap_num_entries
<= zap
->zap_m
.zap_num_chunks
);
500 if (zap
->zap_ismicro
&& tx
&& adding
&&
501 zap
->zap_m
.zap_num_entries
== zap
->zap_m
.zap_num_chunks
) {
502 uint64_t newsz
= db
->db_size
+ SPA_MINBLOCKSIZE
;
503 if (newsz
> MZAP_MAX_BLKSZ
) {
504 dprintf("upgrading obj %llu: num_entries=%u\n",
505 obj
, zap
->zap_m
.zap_num_entries
);
507 return (mzap_upgrade(zapp
, tx
, 0));
509 err
= dmu_object_set_blocksize(os
, obj
, newsz
, 0, tx
);
511 zap
->zap_m
.zap_num_chunks
=
512 db
->db_size
/ MZAP_ENT_LEN
- 1;
520 zap_unlockdir(zap_t
*zap
)
522 rw_exit(&zap
->zap_rwlock
);
523 dmu_buf_rele(zap
->zap_dbuf
, NULL
);
527 mzap_upgrade(zap_t
**zapp
, dmu_tx_t
*tx
, zap_flags_t flags
)
534 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
536 sz
= zap
->zap_dbuf
->db_size
;
537 mzp
= zio_buf_alloc(sz
);
538 bcopy(zap
->zap_dbuf
->db_data
, mzp
, sz
);
539 nchunks
= zap
->zap_m
.zap_num_chunks
;
542 err
= dmu_object_set_blocksize(zap
->zap_objset
, zap
->zap_object
,
543 1ULL << fzap_default_block_shift
, 0, tx
);
545 zio_buf_free(mzp
, sz
);
550 dprintf("upgrading obj=%llu with %u chunks\n",
551 zap
->zap_object
, nchunks
);
552 /* XXX destroy the avl later, so we can use the stored hash value */
555 fzap_upgrade(zap
, tx
, flags
);
557 for (i
= 0; i
< nchunks
; i
++) {
558 mzap_ent_phys_t
*mze
= &mzp
->mz_chunk
[i
];
560 if (mze
->mze_name
[0] == 0)
562 dprintf("adding %s=%llu\n",
563 mze
->mze_name
, mze
->mze_value
);
564 zn
= zap_name_alloc(zap
, mze
->mze_name
, MT_EXACT
);
565 err
= fzap_add_cd(zn
, 8, 1, &mze
->mze_value
, mze
->mze_cd
, tx
);
566 zap
= zn
->zn_zap
; /* fzap_add_cd() may change zap */
571 zio_buf_free(mzp
, sz
);
577 mzap_create_impl(objset_t
*os
, uint64_t obj
, int normflags
, zap_flags_t flags
,
583 VERIFY(0 == dmu_buf_hold(os
, obj
, 0, FTAG
, &db
, DMU_READ_NO_PREFETCH
));
587 dmu_object_info_t doi
;
588 dmu_object_info_from_db(db
, &doi
);
589 ASSERT3U(DMU_OT_BYTESWAP(doi
.doi_type
), ==, DMU_BSWAP_ZAP
);
593 dmu_buf_will_dirty(db
, tx
);
595 zp
->mz_block_type
= ZBT_MICRO
;
596 zp
->mz_salt
= ((uintptr_t)db
^ (uintptr_t)tx
^ (obj
<< 1)) | 1ULL;
597 zp
->mz_normflags
= normflags
;
598 dmu_buf_rele(db
, FTAG
);
602 /* Only fat zap supports flags; upgrade immediately. */
603 VERIFY(0 == zap_lockdir(os
, obj
, tx
, RW_WRITER
,
604 B_FALSE
, B_FALSE
, &zap
));
605 VERIFY3U(0, ==, mzap_upgrade(&zap
, tx
, flags
));
611 zap_create_claim(objset_t
*os
, uint64_t obj
, dmu_object_type_t ot
,
612 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
614 return (zap_create_claim_norm(os
, obj
,
615 0, ot
, bonustype
, bonuslen
, tx
));
619 zap_create_claim_norm(objset_t
*os
, uint64_t obj
, int normflags
,
620 dmu_object_type_t ot
,
621 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
625 err
= dmu_object_claim(os
, obj
, ot
, 0, bonustype
, bonuslen
, tx
);
628 mzap_create_impl(os
, obj
, normflags
, 0, tx
);
633 zap_create(objset_t
*os
, dmu_object_type_t ot
,
634 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
636 return (zap_create_norm(os
, 0, ot
, bonustype
, bonuslen
, tx
));
640 zap_create_norm(objset_t
*os
, int normflags
, dmu_object_type_t ot
,
641 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
643 uint64_t obj
= dmu_object_alloc(os
, ot
, 0, bonustype
, bonuslen
, tx
);
645 mzap_create_impl(os
, obj
, normflags
, 0, tx
);
650 zap_create_flags(objset_t
*os
, int normflags
, zap_flags_t flags
,
651 dmu_object_type_t ot
, int leaf_blockshift
, int indirect_blockshift
,
652 dmu_object_type_t bonustype
, int bonuslen
, dmu_tx_t
*tx
)
654 uint64_t obj
= dmu_object_alloc(os
, ot
, 0, bonustype
, bonuslen
, tx
);
656 ASSERT(leaf_blockshift
>= SPA_MINBLOCKSHIFT
&&
657 leaf_blockshift
<= SPA_MAXBLOCKSHIFT
&&
658 indirect_blockshift
>= SPA_MINBLOCKSHIFT
&&
659 indirect_blockshift
<= SPA_MAXBLOCKSHIFT
);
661 VERIFY(dmu_object_set_blocksize(os
, obj
,
662 1ULL << leaf_blockshift
, indirect_blockshift
, tx
) == 0);
664 mzap_create_impl(os
, obj
, normflags
, flags
, tx
);
669 zap_destroy(objset_t
*os
, uint64_t zapobj
, dmu_tx_t
*tx
)
672 * dmu_object_free will free the object number and free the
673 * data. Freeing the data will cause our pageout function to be
674 * called, which will destroy our data (zap_leaf_t's and zap_t).
677 return (dmu_object_free(os
, zapobj
, tx
));
685 rw_destroy(&zap
->zap_rwlock
);
687 if (zap
->zap_ismicro
)
690 mutex_destroy(&zap
->zap_f
.zap_num_entries_mtx
);
692 kmem_free(zap
, sizeof (zap_t
));
696 zap_count(objset_t
*os
, uint64_t zapobj
, uint64_t *count
)
701 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
704 if (!zap
->zap_ismicro
) {
705 err
= fzap_count(zap
, count
);
707 *count
= zap
->zap_m
.zap_num_entries
;
714 * zn may be NULL; if not specified, it will be computed if needed.
715 * See also the comment above zap_entry_normalization_conflict().
718 mzap_normalization_conflict(zap_t
*zap
, zap_name_t
*zn
, mzap_ent_t
*mze
)
721 int direction
= AVL_BEFORE
;
722 boolean_t allocdzn
= B_FALSE
;
724 if (zap
->zap_normflags
== 0)
728 for (other
= avl_walk(&zap
->zap_m
.zap_avl
, mze
, direction
);
729 other
&& other
->mze_hash
== mze
->mze_hash
;
730 other
= avl_walk(&zap
->zap_m
.zap_avl
, other
, direction
)) {
733 zn
= zap_name_alloc(zap
, MZE_PHYS(zap
, mze
)->mze_name
,
737 if (zap_match(zn
, MZE_PHYS(zap
, other
)->mze_name
)) {
744 if (direction
== AVL_BEFORE
) {
745 direction
= AVL_AFTER
;
755 * Routines for manipulating attributes.
759 zap_lookup(objset_t
*os
, uint64_t zapobj
, const char *name
,
760 uint64_t integer_size
, uint64_t num_integers
, void *buf
)
762 return (zap_lookup_norm(os
, zapobj
, name
, integer_size
,
763 num_integers
, buf
, MT_EXACT
, NULL
, 0, NULL
));
767 zap_lookup_norm(objset_t
*os
, uint64_t zapobj
, const char *name
,
768 uint64_t integer_size
, uint64_t num_integers
, void *buf
,
769 matchtype_t mt
, char *realname
, int rn_len
,
777 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
780 zn
= zap_name_alloc(zap
, name
, mt
);
783 return (SET_ERROR(ENOTSUP
));
786 if (!zap
->zap_ismicro
) {
787 err
= fzap_lookup(zn
, integer_size
, num_integers
, buf
,
788 realname
, rn_len
, ncp
);
792 err
= SET_ERROR(ENOENT
);
794 if (num_integers
< 1) {
795 err
= SET_ERROR(EOVERFLOW
);
796 } else if (integer_size
!= 8) {
797 err
= SET_ERROR(EINVAL
);
800 MZE_PHYS(zap
, mze
)->mze_value
;
801 (void) strlcpy(realname
,
802 MZE_PHYS(zap
, mze
)->mze_name
, rn_len
);
804 *ncp
= mzap_normalization_conflict(zap
,
816 zap_prefetch_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
823 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
826 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
829 return (SET_ERROR(ENOTSUP
));
839 zap_lookup_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
840 int key_numints
, uint64_t integer_size
, uint64_t num_integers
, void *buf
)
846 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
849 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
852 return (SET_ERROR(ENOTSUP
));
855 err
= fzap_lookup(zn
, integer_size
, num_integers
, buf
,
863 zap_contains(objset_t
*os
, uint64_t zapobj
, const char *name
)
865 int err
= zap_lookup_norm(os
, zapobj
, name
, 0,
866 0, NULL
, MT_EXACT
, NULL
, 0, NULL
);
867 if (err
== EOVERFLOW
|| err
== EINVAL
)
868 err
= 0; /* found, but skipped reading the value */
873 zap_length(objset_t
*os
, uint64_t zapobj
, const char *name
,
874 uint64_t *integer_size
, uint64_t *num_integers
)
881 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
884 zn
= zap_name_alloc(zap
, name
, MT_EXACT
);
887 return (SET_ERROR(ENOTSUP
));
889 if (!zap
->zap_ismicro
) {
890 err
= fzap_length(zn
, integer_size
, num_integers
);
894 err
= SET_ERROR(ENOENT
);
908 zap_length_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
909 int key_numints
, uint64_t *integer_size
, uint64_t *num_integers
)
915 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
918 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
921 return (SET_ERROR(ENOTSUP
));
923 err
= fzap_length(zn
, integer_size
, num_integers
);
930 mzap_addent(zap_name_t
*zn
, uint64_t value
)
933 zap_t
*zap
= zn
->zn_zap
;
934 int start
= zap
->zap_m
.zap_alloc_next
;
937 ASSERT(RW_WRITE_HELD(&zap
->zap_rwlock
));
940 for (i
= 0; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
941 ASSERTV(mzap_ent_phys_t
*mze
);
942 ASSERT(mze
= &zap_m_phys(zap
)->mz_chunk
[i
]);
943 ASSERT(strcmp(zn
->zn_key_orig
, mze
->mze_name
) != 0);
947 cd
= mze_find_unused_cd(zap
, zn
->zn_hash
);
948 /* given the limited size of the microzap, this can't happen */
949 ASSERT(cd
< zap_maxcd(zap
));
952 for (i
= start
; i
< zap
->zap_m
.zap_num_chunks
; i
++) {
953 mzap_ent_phys_t
*mze
= &zap_m_phys(zap
)->mz_chunk
[i
];
954 if (mze
->mze_name
[0] == 0) {
955 mze
->mze_value
= value
;
957 (void) strcpy(mze
->mze_name
, zn
->zn_key_orig
);
958 zap
->zap_m
.zap_num_entries
++;
959 zap
->zap_m
.zap_alloc_next
= i
+1;
960 if (zap
->zap_m
.zap_alloc_next
==
961 zap
->zap_m
.zap_num_chunks
)
962 zap
->zap_m
.zap_alloc_next
= 0;
963 mze_insert(zap
, i
, zn
->zn_hash
);
971 cmn_err(CE_PANIC
, "out of entries!");
975 zap_add(objset_t
*os
, uint64_t zapobj
, const char *key
,
976 int integer_size
, uint64_t num_integers
,
977 const void *val
, dmu_tx_t
*tx
)
982 const uint64_t *intval
= val
;
985 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, &zap
);
988 zn
= zap_name_alloc(zap
, key
, MT_EXACT
);
991 return (SET_ERROR(ENOTSUP
));
993 if (!zap
->zap_ismicro
) {
994 err
= fzap_add(zn
, integer_size
, num_integers
, val
, tx
);
995 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
996 } else if (integer_size
!= 8 || num_integers
!= 1 ||
997 strlen(key
) >= MZAP_NAME_LEN
) {
998 err
= mzap_upgrade(&zn
->zn_zap
, tx
, 0);
1000 err
= fzap_add(zn
, integer_size
, num_integers
, val
, tx
);
1001 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
1005 err
= SET_ERROR(EEXIST
);
1007 mzap_addent(zn
, *intval
);
1010 ASSERT(zap
== zn
->zn_zap
);
1012 if (zap
!= NULL
) /* may be NULL if fzap_add() failed */
1018 zap_add_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1019 int key_numints
, int integer_size
, uint64_t num_integers
,
1020 const void *val
, dmu_tx_t
*tx
)
1026 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, &zap
);
1029 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1032 return (SET_ERROR(ENOTSUP
));
1034 err
= fzap_add(zn
, integer_size
, num_integers
, val
, tx
);
1035 zap
= zn
->zn_zap
; /* fzap_add() may change zap */
1037 if (zap
!= NULL
) /* may be NULL if fzap_add() failed */
1043 zap_update(objset_t
*os
, uint64_t zapobj
, const char *name
,
1044 int integer_size
, uint64_t num_integers
, const void *val
, dmu_tx_t
*tx
)
1048 const uint64_t *intval
= val
;
1056 * If there is an old value, it shouldn't change across the
1057 * lockdir (eg, due to bprewrite's xlation).
1059 if (integer_size
== 8 && num_integers
== 1)
1060 (void) zap_lookup(os
, zapobj
, name
, 8, 1, &oldval
);
1063 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, &zap
);
1066 zn
= zap_name_alloc(zap
, name
, MT_EXACT
);
1069 return (SET_ERROR(ENOTSUP
));
1071 if (!zap
->zap_ismicro
) {
1072 err
= fzap_update(zn
, integer_size
, num_integers
, val
, tx
);
1073 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1074 } else if (integer_size
!= 8 || num_integers
!= 1 ||
1075 strlen(name
) >= MZAP_NAME_LEN
) {
1076 dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
1077 zapobj
, integer_size
, num_integers
, name
);
1078 err
= mzap_upgrade(&zn
->zn_zap
, tx
, 0);
1080 err
= fzap_update(zn
, integer_size
, num_integers
,
1082 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1086 ASSERT3U(MZE_PHYS(zap
, mze
)->mze_value
, ==, oldval
);
1087 MZE_PHYS(zap
, mze
)->mze_value
= *intval
;
1089 mzap_addent(zn
, *intval
);
1092 ASSERT(zap
== zn
->zn_zap
);
1094 if (zap
!= NULL
) /* may be NULL if fzap_upgrade() failed */
1100 zap_update_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1102 int integer_size
, uint64_t num_integers
, const void *val
, dmu_tx_t
*tx
)
1108 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, TRUE
, &zap
);
1111 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1114 return (SET_ERROR(ENOTSUP
));
1116 err
= fzap_update(zn
, integer_size
, num_integers
, val
, tx
);
1117 zap
= zn
->zn_zap
; /* fzap_update() may change zap */
1119 if (zap
!= NULL
) /* may be NULL if fzap_upgrade() failed */
1125 zap_remove(objset_t
*os
, uint64_t zapobj
, const char *name
, dmu_tx_t
*tx
)
1127 return (zap_remove_norm(os
, zapobj
, name
, MT_EXACT
, tx
));
1131 zap_remove_norm(objset_t
*os
, uint64_t zapobj
, const char *name
,
1132 matchtype_t mt
, dmu_tx_t
*tx
)
1139 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, FALSE
, &zap
);
1142 zn
= zap_name_alloc(zap
, name
, mt
);
1145 return (SET_ERROR(ENOTSUP
));
1147 if (!zap
->zap_ismicro
) {
1148 err
= fzap_remove(zn
, tx
);
1152 err
= SET_ERROR(ENOENT
);
1154 zap
->zap_m
.zap_num_entries
--;
1155 bzero(&zap_m_phys(zap
)->mz_chunk
[mze
->mze_chunkid
],
1156 sizeof (mzap_ent_phys_t
));
1157 mze_remove(zap
, mze
);
1166 zap_remove_uint64(objset_t
*os
, uint64_t zapobj
, const uint64_t *key
,
1167 int key_numints
, dmu_tx_t
*tx
)
1173 err
= zap_lockdir(os
, zapobj
, tx
, RW_WRITER
, TRUE
, FALSE
, &zap
);
1176 zn
= zap_name_alloc_uint64(zap
, key
, key_numints
);
1179 return (SET_ERROR(ENOTSUP
));
1181 err
= fzap_remove(zn
, tx
);
1188 * Routines for iterating over the attributes.
1192 zap_cursor_init_serialized(zap_cursor_t
*zc
, objset_t
*os
, uint64_t zapobj
,
1193 uint64_t serialized
)
1198 zc
->zc_zapobj
= zapobj
;
1199 zc
->zc_serialized
= serialized
;
1205 zap_cursor_init(zap_cursor_t
*zc
, objset_t
*os
, uint64_t zapobj
)
1207 zap_cursor_init_serialized(zc
, os
, zapobj
, 0);
1211 zap_cursor_fini(zap_cursor_t
*zc
)
1214 rw_enter(&zc
->zc_zap
->zap_rwlock
, RW_READER
);
1215 zap_unlockdir(zc
->zc_zap
);
1219 rw_enter(&zc
->zc_leaf
->l_rwlock
, RW_READER
);
1220 zap_put_leaf(zc
->zc_leaf
);
1223 zc
->zc_objset
= NULL
;
1227 zap_cursor_serialize(zap_cursor_t
*zc
)
1229 if (zc
->zc_hash
== -1ULL)
1231 if (zc
->zc_zap
== NULL
)
1232 return (zc
->zc_serialized
);
1233 ASSERT((zc
->zc_hash
& zap_maxcd(zc
->zc_zap
)) == 0);
1234 ASSERT(zc
->zc_cd
< zap_maxcd(zc
->zc_zap
));
1237 * We want to keep the high 32 bits of the cursor zero if we can, so
1238 * that 32-bit programs can access this. So usually use a small
1239 * (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
1242 * [ collision differentiator | zap_hashbits()-bit hash value ]
1244 return ((zc
->zc_hash
>> (64 - zap_hashbits(zc
->zc_zap
))) |
1245 ((uint64_t)zc
->zc_cd
<< zap_hashbits(zc
->zc_zap
)));
1249 zap_cursor_retrieve(zap_cursor_t
*zc
, zap_attribute_t
*za
)
1253 mzap_ent_t mze_tofind
;
1256 if (zc
->zc_hash
== -1ULL)
1257 return (SET_ERROR(ENOENT
));
1259 if (zc
->zc_zap
== NULL
) {
1261 err
= zap_lockdir(zc
->zc_objset
, zc
->zc_zapobj
, NULL
,
1262 RW_READER
, TRUE
, FALSE
, &zc
->zc_zap
);
1267 * To support zap_cursor_init_serialized, advance, retrieve,
1268 * we must add to the existing zc_cd, which may already
1269 * be 1 due to the zap_cursor_advance.
1271 ASSERT(zc
->zc_hash
== 0);
1272 hb
= zap_hashbits(zc
->zc_zap
);
1273 zc
->zc_hash
= zc
->zc_serialized
<< (64 - hb
);
1274 zc
->zc_cd
+= zc
->zc_serialized
>> hb
;
1275 if (zc
->zc_cd
>= zap_maxcd(zc
->zc_zap
)) /* corrupt serialized */
1278 rw_enter(&zc
->zc_zap
->zap_rwlock
, RW_READER
);
1280 if (!zc
->zc_zap
->zap_ismicro
) {
1281 err
= fzap_cursor_retrieve(zc
->zc_zap
, zc
, za
);
1283 mze_tofind
.mze_hash
= zc
->zc_hash
;
1284 mze_tofind
.mze_cd
= zc
->zc_cd
;
1286 mze
= avl_find(&zc
->zc_zap
->zap_m
.zap_avl
, &mze_tofind
, &idx
);
1288 mze
= avl_nearest(&zc
->zc_zap
->zap_m
.zap_avl
,
1292 mzap_ent_phys_t
*mzep
= MZE_PHYS(zc
->zc_zap
, mze
);
1293 ASSERT3U(mze
->mze_cd
, ==, mzep
->mze_cd
);
1294 za
->za_normalization_conflict
=
1295 mzap_normalization_conflict(zc
->zc_zap
, NULL
, mze
);
1296 za
->za_integer_length
= 8;
1297 za
->za_num_integers
= 1;
1298 za
->za_first_integer
= mzep
->mze_value
;
1299 (void) strcpy(za
->za_name
, mzep
->mze_name
);
1300 zc
->zc_hash
= mze
->mze_hash
;
1301 zc
->zc_cd
= mze
->mze_cd
;
1304 zc
->zc_hash
= -1ULL;
1305 err
= SET_ERROR(ENOENT
);
1308 rw_exit(&zc
->zc_zap
->zap_rwlock
);
1313 zap_cursor_advance(zap_cursor_t
*zc
)
1315 if (zc
->zc_hash
== -1ULL)
1321 zap_cursor_move_to_key(zap_cursor_t
*zc
, const char *name
, matchtype_t mt
)
1327 if (zc
->zc_zap
== NULL
) {
1328 err
= zap_lockdir(zc
->zc_objset
, zc
->zc_zapobj
, NULL
,
1329 RW_READER
, TRUE
, FALSE
, &zc
->zc_zap
);
1333 rw_enter(&zc
->zc_zap
->zap_rwlock
, RW_READER
);
1336 zn
= zap_name_alloc(zc
->zc_zap
, name
, mt
);
1338 rw_exit(&zc
->zc_zap
->zap_rwlock
);
1339 return (SET_ERROR(ENOTSUP
));
1342 if (!zc
->zc_zap
->zap_ismicro
) {
1343 err
= fzap_cursor_move_to_key(zc
, zn
);
1347 err
= SET_ERROR(ENOENT
);
1350 zc
->zc_hash
= mze
->mze_hash
;
1351 zc
->zc_cd
= mze
->mze_cd
;
1356 rw_exit(&zc
->zc_zap
->zap_rwlock
);
1361 zap_get_stats(objset_t
*os
, uint64_t zapobj
, zap_stats_t
*zs
)
1366 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
1370 bzero(zs
, sizeof (zap_stats_t
));
1372 if (zap
->zap_ismicro
) {
1373 zs
->zs_blocksize
= zap
->zap_dbuf
->db_size
;
1374 zs
->zs_num_entries
= zap
->zap_m
.zap_num_entries
;
1375 zs
->zs_num_blocks
= 1;
1377 fzap_get_stats(zap
, zs
);
1384 zap_count_write(objset_t
*os
, uint64_t zapobj
, const char *name
, int add
,
1385 uint64_t *towrite
, uint64_t *tooverwrite
)
1392 * Since, we don't have a name, we cannot figure out which blocks will
1393 * be affected in this operation. So, account for the worst case :
1394 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
1395 * - 4 new blocks written if adding:
1396 * - 2 blocks for possibly split leaves,
1397 * - 2 grown ptrtbl blocks
1399 * This also accomodates the case where an add operation to a fairly
1400 * large microzap results in a promotion to fatzap.
1403 *towrite
+= (3 + (add
? 4 : 0)) * SPA_MAXBLOCKSIZE
;
1408 * We lock the zap with adding == FALSE. Because, if we pass
1409 * the actual value of add, it could trigger a mzap_upgrade().
1410 * At present we are just evaluating the possibility of this operation
1411 * and hence we donot want to trigger an upgrade.
1413 err
= zap_lockdir(os
, zapobj
, NULL
, RW_READER
, TRUE
, FALSE
, &zap
);
1417 if (!zap
->zap_ismicro
) {
1418 zap_name_t
*zn
= zap_name_alloc(zap
, name
, MT_EXACT
);
1420 err
= fzap_count_write(zn
, add
, towrite
,
1425 * We treat this case as similar to (name == NULL)
1427 *towrite
+= (3 + (add
? 4 : 0)) * SPA_MAXBLOCKSIZE
;
1431 * We are here if (name != NULL) and this is a micro-zap.
1432 * We account for the header block depending on whether it
1435 * Incase of an add-operation it is hard to find out
1436 * if this add will promote this microzap to fatzap.
1437 * Hence, we consider the worst case and account for the
1438 * blocks assuming this microzap would be promoted to a
1441 * 1 block overwritten : header block
1442 * 4 new blocks written : 2 new split leaf, 2 grown
1445 if (dmu_buf_freeable(zap
->zap_dbuf
))
1446 *tooverwrite
+= SPA_MAXBLOCKSIZE
;
1448 *towrite
+= SPA_MAXBLOCKSIZE
;
1451 *towrite
+= 4 * SPA_MAXBLOCKSIZE
;
1459 #if defined(_KERNEL) && defined(HAVE_SPL)
1460 EXPORT_SYMBOL(zap_create
);
1461 EXPORT_SYMBOL(zap_create_norm
);
1462 EXPORT_SYMBOL(zap_create_flags
);
1463 EXPORT_SYMBOL(zap_create_claim
);
1464 EXPORT_SYMBOL(zap_create_claim_norm
);
1465 EXPORT_SYMBOL(zap_destroy
);
1466 EXPORT_SYMBOL(zap_lookup
);
1467 EXPORT_SYMBOL(zap_lookup_norm
);
1468 EXPORT_SYMBOL(zap_lookup_uint64
);
1469 EXPORT_SYMBOL(zap_contains
);
1470 EXPORT_SYMBOL(zap_prefetch_uint64
);
1471 EXPORT_SYMBOL(zap_count_write
);
1472 EXPORT_SYMBOL(zap_add
);
1473 EXPORT_SYMBOL(zap_add_uint64
);
1474 EXPORT_SYMBOL(zap_update
);
1475 EXPORT_SYMBOL(zap_update_uint64
);
1476 EXPORT_SYMBOL(zap_length
);
1477 EXPORT_SYMBOL(zap_length_uint64
);
1478 EXPORT_SYMBOL(zap_remove
);
1479 EXPORT_SYMBOL(zap_remove_norm
);
1480 EXPORT_SYMBOL(zap_remove_uint64
);
1481 EXPORT_SYMBOL(zap_count
);
1482 EXPORT_SYMBOL(zap_value_search
);
1483 EXPORT_SYMBOL(zap_join
);
1484 EXPORT_SYMBOL(zap_join_increment
);
1485 EXPORT_SYMBOL(zap_add_int
);
1486 EXPORT_SYMBOL(zap_remove_int
);
1487 EXPORT_SYMBOL(zap_lookup_int
);
1488 EXPORT_SYMBOL(zap_increment_int
);
1489 EXPORT_SYMBOL(zap_add_int_key
);
1490 EXPORT_SYMBOL(zap_lookup_int_key
);
1491 EXPORT_SYMBOL(zap_increment
);
1492 EXPORT_SYMBOL(zap_cursor_init
);
1493 EXPORT_SYMBOL(zap_cursor_fini
);
1494 EXPORT_SYMBOL(zap_cursor_retrieve
);
1495 EXPORT_SYMBOL(zap_cursor_advance
);
1496 EXPORT_SYMBOL(zap_cursor_serialize
);
1497 EXPORT_SYMBOL(zap_cursor_move_to_key
);
1498 EXPORT_SYMBOL(zap_cursor_init_serialized
);
1499 EXPORT_SYMBOL(zap_get_stats
);