]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zap_micro.c
Add *_by-dnode routines
[mirror_zfs.git] / module / zfs / zap_micro.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
24 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
25 */
26
27 #include <sys/zio.h>
28 #include <sys/spa.h>
29 #include <sys/dmu.h>
30 #include <sys/zfs_context.h>
31 #include <sys/zap.h>
32 #include <sys/refcount.h>
33 #include <sys/zap_impl.h>
34 #include <sys/zap_leaf.h>
35 #include <sys/avl.h>
36 #include <sys/arc.h>
37 #include <sys/dmu_objset.h>
38
39 #ifdef _KERNEL
40 #include <sys/sunddi.h>
41 #endif
42
43 extern inline mzap_phys_t *zap_m_phys(zap_t *zap);
44
45 static int mzap_upgrade(zap_t **zapp,
46 void *tag, dmu_tx_t *tx, zap_flags_t flags);
47
48 uint64_t
49 zap_getflags(zap_t *zap)
50 {
51 if (zap->zap_ismicro)
52 return (0);
53 return (zap_f_phys(zap)->zap_flags);
54 }
55
56 int
57 zap_hashbits(zap_t *zap)
58 {
59 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
60 return (48);
61 else
62 return (28);
63 }
64
65 uint32_t
66 zap_maxcd(zap_t *zap)
67 {
68 if (zap_getflags(zap) & ZAP_FLAG_HASH64)
69 return ((1<<16)-1);
70 else
71 return (-1U);
72 }
73
74 static uint64_t
75 zap_hash(zap_name_t *zn)
76 {
77 zap_t *zap = zn->zn_zap;
78 uint64_t h = 0;
79
80 if (zap_getflags(zap) & ZAP_FLAG_PRE_HASHED_KEY) {
81 ASSERT(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY);
82 h = *(uint64_t *)zn->zn_key_orig;
83 } else {
84 h = zap->zap_salt;
85 ASSERT(h != 0);
86 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
87
88 if (zap_getflags(zap) & ZAP_FLAG_UINT64_KEY) {
89 int i;
90 const uint64_t *wp = zn->zn_key_norm;
91
92 ASSERT(zn->zn_key_intlen == 8);
93 for (i = 0; i < zn->zn_key_norm_numints; wp++, i++) {
94 int j;
95 uint64_t word = *wp;
96
97 for (j = 0; j < zn->zn_key_intlen; j++) {
98 h = (h >> 8) ^
99 zfs_crc64_table[(h ^ word) & 0xFF];
100 word >>= NBBY;
101 }
102 }
103 } else {
104 int i, len;
105 const uint8_t *cp = zn->zn_key_norm;
106
107 /*
108 * We previously stored the terminating null on
109 * disk, but didn't hash it, so we need to
110 * continue to not hash it. (The
111 * zn_key_*_numints includes the terminating
112 * null for non-binary keys.)
113 */
114 len = zn->zn_key_norm_numints - 1;
115
116 ASSERT(zn->zn_key_intlen == 1);
117 for (i = 0; i < len; cp++, i++) {
118 h = (h >> 8) ^
119 zfs_crc64_table[(h ^ *cp) & 0xFF];
120 }
121 }
122 }
123 /*
124 * Don't use all 64 bits, since we need some in the cookie for
125 * the collision differentiator. We MUST use the high bits,
126 * since those are the ones that we first pay attention to when
127 * choosing the bucket.
128 */
129 h &= ~((1ULL << (64 - zap_hashbits(zap))) - 1);
130
131 return (h);
132 }
133
134 static int
135 zap_normalize(zap_t *zap, const char *name, char *namenorm)
136 {
137 size_t inlen, outlen;
138 int err;
139
140 ASSERT(!(zap_getflags(zap) & ZAP_FLAG_UINT64_KEY));
141
142 inlen = strlen(name) + 1;
143 outlen = ZAP_MAXNAMELEN;
144
145 err = 0;
146 (void) u8_textprep_str((char *)name, &inlen, namenorm, &outlen,
147 zap->zap_normflags | U8_TEXTPREP_IGNORE_NULL |
148 U8_TEXTPREP_IGNORE_INVALID, U8_UNICODE_LATEST, &err);
149
150 return (err);
151 }
152
153 boolean_t
154 zap_match(zap_name_t *zn, const char *matchname)
155 {
156 ASSERT(!(zap_getflags(zn->zn_zap) & ZAP_FLAG_UINT64_KEY));
157
158 if (zn->zn_matchtype == MT_FIRST) {
159 char norm[ZAP_MAXNAMELEN];
160
161 if (zap_normalize(zn->zn_zap, matchname, norm) != 0)
162 return (B_FALSE);
163
164 return (strcmp(zn->zn_key_norm, norm) == 0);
165 } else {
166 /* MT_BEST or MT_EXACT */
167 return (strcmp(zn->zn_key_orig, matchname) == 0);
168 }
169 }
170
171 void
172 zap_name_free(zap_name_t *zn)
173 {
174 kmem_free(zn, sizeof (zap_name_t));
175 }
176
177 zap_name_t *
178 zap_name_alloc(zap_t *zap, const char *key, matchtype_t mt)
179 {
180 zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
181
182 zn->zn_zap = zap;
183 zn->zn_key_intlen = sizeof (*key);
184 zn->zn_key_orig = key;
185 zn->zn_key_orig_numints = strlen(zn->zn_key_orig) + 1;
186 zn->zn_matchtype = mt;
187 if (zap->zap_normflags) {
188 if (zap_normalize(zap, key, zn->zn_normbuf) != 0) {
189 zap_name_free(zn);
190 return (NULL);
191 }
192 zn->zn_key_norm = zn->zn_normbuf;
193 zn->zn_key_norm_numints = strlen(zn->zn_key_norm) + 1;
194 } else {
195 if (mt != MT_EXACT) {
196 zap_name_free(zn);
197 return (NULL);
198 }
199 zn->zn_key_norm = zn->zn_key_orig;
200 zn->zn_key_norm_numints = zn->zn_key_orig_numints;
201 }
202
203 zn->zn_hash = zap_hash(zn);
204 return (zn);
205 }
206
207 zap_name_t *
208 zap_name_alloc_uint64(zap_t *zap, const uint64_t *key, int numints)
209 {
210 zap_name_t *zn = kmem_alloc(sizeof (zap_name_t), KM_SLEEP);
211
212 ASSERT(zap->zap_normflags == 0);
213 zn->zn_zap = zap;
214 zn->zn_key_intlen = sizeof (*key);
215 zn->zn_key_orig = zn->zn_key_norm = key;
216 zn->zn_key_orig_numints = zn->zn_key_norm_numints = numints;
217 zn->zn_matchtype = MT_EXACT;
218
219 zn->zn_hash = zap_hash(zn);
220 return (zn);
221 }
222
223 static void
224 mzap_byteswap(mzap_phys_t *buf, size_t size)
225 {
226 int i, max;
227 buf->mz_block_type = BSWAP_64(buf->mz_block_type);
228 buf->mz_salt = BSWAP_64(buf->mz_salt);
229 buf->mz_normflags = BSWAP_64(buf->mz_normflags);
230 max = (size / MZAP_ENT_LEN) - 1;
231 for (i = 0; i < max; i++) {
232 buf->mz_chunk[i].mze_value =
233 BSWAP_64(buf->mz_chunk[i].mze_value);
234 buf->mz_chunk[i].mze_cd =
235 BSWAP_32(buf->mz_chunk[i].mze_cd);
236 }
237 }
238
239 void
240 zap_byteswap(void *buf, size_t size)
241 {
242 uint64_t block_type;
243
244 block_type = *(uint64_t *)buf;
245
246 if (block_type == ZBT_MICRO || block_type == BSWAP_64(ZBT_MICRO)) {
247 /* ASSERT(magic == ZAP_LEAF_MAGIC); */
248 mzap_byteswap(buf, size);
249 } else {
250 fzap_byteswap(buf, size);
251 }
252 }
253
254 static int
255 mze_compare(const void *arg1, const void *arg2)
256 {
257 const mzap_ent_t *mze1 = arg1;
258 const mzap_ent_t *mze2 = arg2;
259
260 int cmp = AVL_CMP(mze1->mze_hash, mze2->mze_hash);
261 if (likely(cmp))
262 return (cmp);
263
264 return (AVL_CMP(mze1->mze_cd, mze2->mze_cd));
265 }
266
267 static void
268 mze_insert(zap_t *zap, int chunkid, uint64_t hash)
269 {
270 mzap_ent_t *mze;
271
272 ASSERT(zap->zap_ismicro);
273 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
274
275 mze = kmem_alloc(sizeof (mzap_ent_t), KM_SLEEP);
276 mze->mze_chunkid = chunkid;
277 mze->mze_hash = hash;
278 mze->mze_cd = MZE_PHYS(zap, mze)->mze_cd;
279 ASSERT(MZE_PHYS(zap, mze)->mze_name[0] != 0);
280 avl_add(&zap->zap_m.zap_avl, mze);
281 }
282
283 static mzap_ent_t *
284 mze_find(zap_name_t *zn)
285 {
286 mzap_ent_t mze_tofind;
287 mzap_ent_t *mze;
288 avl_index_t idx;
289 avl_tree_t *avl = &zn->zn_zap->zap_m.zap_avl;
290
291 ASSERT(zn->zn_zap->zap_ismicro);
292 ASSERT(RW_LOCK_HELD(&zn->zn_zap->zap_rwlock));
293
294 mze_tofind.mze_hash = zn->zn_hash;
295 mze_tofind.mze_cd = 0;
296
297 again:
298 mze = avl_find(avl, &mze_tofind, &idx);
299 if (mze == NULL)
300 mze = avl_nearest(avl, idx, AVL_AFTER);
301 for (; mze && mze->mze_hash == zn->zn_hash; mze = AVL_NEXT(avl, mze)) {
302 ASSERT3U(mze->mze_cd, ==, MZE_PHYS(zn->zn_zap, mze)->mze_cd);
303 if (zap_match(zn, MZE_PHYS(zn->zn_zap, mze)->mze_name))
304 return (mze);
305 }
306 if (zn->zn_matchtype == MT_BEST) {
307 zn->zn_matchtype = MT_FIRST;
308 goto again;
309 }
310 return (NULL);
311 }
312
313 static uint32_t
314 mze_find_unused_cd(zap_t *zap, uint64_t hash)
315 {
316 mzap_ent_t mze_tofind;
317 mzap_ent_t *mze;
318 avl_index_t idx;
319 avl_tree_t *avl = &zap->zap_m.zap_avl;
320 uint32_t cd;
321
322 ASSERT(zap->zap_ismicro);
323 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
324
325 mze_tofind.mze_hash = hash;
326 mze_tofind.mze_cd = 0;
327
328 cd = 0;
329 for (mze = avl_find(avl, &mze_tofind, &idx);
330 mze && mze->mze_hash == hash; mze = AVL_NEXT(avl, mze)) {
331 if (mze->mze_cd != cd)
332 break;
333 cd++;
334 }
335
336 return (cd);
337 }
338
339 static void
340 mze_remove(zap_t *zap, mzap_ent_t *mze)
341 {
342 ASSERT(zap->zap_ismicro);
343 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
344
345 avl_remove(&zap->zap_m.zap_avl, mze);
346 kmem_free(mze, sizeof (mzap_ent_t));
347 }
348
349 static void
350 mze_destroy(zap_t *zap)
351 {
352 mzap_ent_t *mze;
353 void *avlcookie = NULL;
354
355 while ((mze = avl_destroy_nodes(&zap->zap_m.zap_avl, &avlcookie)))
356 kmem_free(mze, sizeof (mzap_ent_t));
357 avl_destroy(&zap->zap_m.zap_avl);
358 }
359
360 static zap_t *
361 mzap_open(objset_t *os, uint64_t obj, dmu_buf_t *db)
362 {
363 zap_t *winner;
364 zap_t *zap;
365 int i;
366 uint64_t *zap_hdr = (uint64_t *)db->db_data;
367 uint64_t zap_block_type = zap_hdr[0];
368 uint64_t zap_magic = zap_hdr[1];
369
370 ASSERT3U(MZAP_ENT_LEN, ==, sizeof (mzap_ent_phys_t));
371
372 zap = kmem_zalloc(sizeof (zap_t), KM_SLEEP);
373 rw_init(&zap->zap_rwlock, NULL, RW_DEFAULT, NULL);
374 rw_enter(&zap->zap_rwlock, RW_WRITER);
375 zap->zap_objset = os;
376 zap->zap_object = obj;
377 zap->zap_dbuf = db;
378
379 if (zap_block_type != ZBT_MICRO) {
380 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
381 zap->zap_f.zap_block_shift = highbit64(db->db_size) - 1;
382 if (zap_block_type != ZBT_HEADER || zap_magic != ZAP_MAGIC) {
383 winner = NULL; /* No actual winner here... */
384 goto handle_winner;
385 }
386 } else {
387 zap->zap_ismicro = TRUE;
388 }
389
390 /*
391 * Make sure that zap_ismicro is set before we let others see
392 * it, because zap_lockdir() checks zap_ismicro without the lock
393 * held.
394 */
395 dmu_buf_init_user(&zap->zap_dbu, zap_evict, &zap->zap_dbuf);
396 winner = dmu_buf_set_user(db, &zap->zap_dbu);
397
398 if (winner != NULL)
399 goto handle_winner;
400
401 if (zap->zap_ismicro) {
402 zap->zap_salt = zap_m_phys(zap)->mz_salt;
403 zap->zap_normflags = zap_m_phys(zap)->mz_normflags;
404 zap->zap_m.zap_num_chunks = db->db_size / MZAP_ENT_LEN - 1;
405 avl_create(&zap->zap_m.zap_avl, mze_compare,
406 sizeof (mzap_ent_t), offsetof(mzap_ent_t, mze_node));
407
408 for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
409 mzap_ent_phys_t *mze =
410 &zap_m_phys(zap)->mz_chunk[i];
411 if (mze->mze_name[0]) {
412 zap_name_t *zn;
413
414 zap->zap_m.zap_num_entries++;
415 zn = zap_name_alloc(zap, mze->mze_name,
416 MT_EXACT);
417 mze_insert(zap, i, zn->zn_hash);
418 zap_name_free(zn);
419 }
420 }
421 } else {
422 zap->zap_salt = zap_f_phys(zap)->zap_salt;
423 zap->zap_normflags = zap_f_phys(zap)->zap_normflags;
424
425 ASSERT3U(sizeof (struct zap_leaf_header), ==,
426 2*ZAP_LEAF_CHUNKSIZE);
427
428 /*
429 * The embedded pointer table should not overlap the
430 * other members.
431 */
432 ASSERT3P(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0), >,
433 &zap_f_phys(zap)->zap_salt);
434
435 /*
436 * The embedded pointer table should end at the end of
437 * the block
438 */
439 ASSERT3U((uintptr_t)&ZAP_EMBEDDED_PTRTBL_ENT(zap,
440 1<<ZAP_EMBEDDED_PTRTBL_SHIFT(zap)) -
441 (uintptr_t)zap_f_phys(zap), ==,
442 zap->zap_dbuf->db_size);
443 }
444 rw_exit(&zap->zap_rwlock);
445 return (zap);
446
447 handle_winner:
448 rw_exit(&zap->zap_rwlock);
449 rw_destroy(&zap->zap_rwlock);
450 if (!zap->zap_ismicro)
451 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
452 kmem_free(zap, sizeof (zap_t));
453 return (winner);
454 }
455
456 static int
457 zap_lockdir_impl(dmu_buf_t *db, void *tag, dmu_tx_t *tx,
458 krw_t lti, boolean_t fatreader, boolean_t adding, zap_t **zapp)
459 {
460 dmu_object_info_t doi;
461 zap_t *zap;
462 krw_t lt;
463
464 objset_t *os = dmu_buf_get_objset(db);
465 uint64_t obj = db->db_object;
466
467 ASSERT0(db->db_offset);
468 *zapp = NULL;
469
470 dmu_object_info_from_db(db, &doi);
471 if (DMU_OT_BYTESWAP(doi.doi_type) != DMU_BSWAP_ZAP)
472 return (SET_ERROR(EINVAL));
473
474 zap = dmu_buf_get_user(db);
475 if (zap == NULL) {
476 zap = mzap_open(os, obj, db);
477 if (zap == NULL) {
478 /*
479 * mzap_open() didn't like what it saw on-disk.
480 * Check for corruption!
481 */
482 return (SET_ERROR(EIO));
483 }
484 }
485
486 /*
487 * We're checking zap_ismicro without the lock held, in order to
488 * tell what type of lock we want. Once we have some sort of
489 * lock, see if it really is the right type. In practice this
490 * can only be different if it was upgraded from micro to fat,
491 * and micro wanted WRITER but fat only needs READER.
492 */
493 lt = (!zap->zap_ismicro && fatreader) ? RW_READER : lti;
494 rw_enter(&zap->zap_rwlock, lt);
495 if (lt != ((!zap->zap_ismicro && fatreader) ? RW_READER : lti)) {
496 /* it was upgraded, now we only need reader */
497 ASSERT(lt == RW_WRITER);
498 ASSERT(RW_READER ==
499 ((!zap->zap_ismicro && fatreader) ? RW_READER : lti));
500 rw_downgrade(&zap->zap_rwlock);
501 lt = RW_READER;
502 }
503
504 zap->zap_objset = os;
505
506 if (lt == RW_WRITER)
507 dmu_buf_will_dirty(db, tx);
508
509 ASSERT3P(zap->zap_dbuf, ==, db);
510
511 ASSERT(!zap->zap_ismicro ||
512 zap->zap_m.zap_num_entries <= zap->zap_m.zap_num_chunks);
513 if (zap->zap_ismicro && tx && adding &&
514 zap->zap_m.zap_num_entries == zap->zap_m.zap_num_chunks) {
515 uint64_t newsz = db->db_size + SPA_MINBLOCKSIZE;
516 if (newsz > MZAP_MAX_BLKSZ) {
517 int err;
518 dprintf("upgrading obj %llu: num_entries=%u\n",
519 obj, zap->zap_m.zap_num_entries);
520 *zapp = zap;
521 err = mzap_upgrade(zapp, tag, tx, 0);
522 if (err != 0)
523 rw_exit(&zap->zap_rwlock);
524 return (err);
525 }
526 VERIFY0(dmu_object_set_blocksize(os, obj, newsz, 0, tx));
527 zap->zap_m.zap_num_chunks =
528 db->db_size / MZAP_ENT_LEN - 1;
529 }
530
531 *zapp = zap;
532 return (0);
533 }
534
535 static int
536 zap_lockdir_by_dnode(dnode_t *dn, dmu_tx_t *tx,
537 krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
538 {
539 dmu_buf_t *db;
540 int err;
541
542 err = dmu_buf_hold_by_dnode(dn, 0, tag, &db, DMU_READ_NO_PREFETCH);
543 if (err != 0) {
544 return (err);
545 }
546 err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
547 if (err != 0) {
548 dmu_buf_rele(db, tag);
549 }
550 return (err);
551 }
552
553 int
554 zap_lockdir(objset_t *os, uint64_t obj, dmu_tx_t *tx,
555 krw_t lti, boolean_t fatreader, boolean_t adding, void *tag, zap_t **zapp)
556 {
557 dmu_buf_t *db;
558 int err;
559
560 err = dmu_buf_hold(os, obj, 0, tag, &db, DMU_READ_NO_PREFETCH);
561 if (err != 0)
562 return (err);
563 err = zap_lockdir_impl(db, tag, tx, lti, fatreader, adding, zapp);
564 if (err != 0)
565 dmu_buf_rele(db, tag);
566 return (err);
567 }
568
569 void
570 zap_unlockdir(zap_t *zap, void *tag)
571 {
572 rw_exit(&zap->zap_rwlock);
573 dmu_buf_rele(zap->zap_dbuf, tag);
574 }
575
576 static int
577 mzap_upgrade(zap_t **zapp, void *tag, dmu_tx_t *tx, zap_flags_t flags)
578 {
579 mzap_phys_t *mzp;
580 int i, sz, nchunks;
581 int err = 0;
582 zap_t *zap = *zapp;
583
584 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
585
586 sz = zap->zap_dbuf->db_size;
587 mzp = vmem_alloc(sz, KM_SLEEP);
588 bcopy(zap->zap_dbuf->db_data, mzp, sz);
589 nchunks = zap->zap_m.zap_num_chunks;
590
591 if (!flags) {
592 err = dmu_object_set_blocksize(zap->zap_objset, zap->zap_object,
593 1ULL << fzap_default_block_shift, 0, tx);
594 if (err) {
595 vmem_free(mzp, sz);
596 return (err);
597 }
598 }
599
600 dprintf("upgrading obj=%llu with %u chunks\n",
601 zap->zap_object, nchunks);
602 /* XXX destroy the avl later, so we can use the stored hash value */
603 mze_destroy(zap);
604
605 fzap_upgrade(zap, tx, flags);
606
607 for (i = 0; i < nchunks; i++) {
608 mzap_ent_phys_t *mze = &mzp->mz_chunk[i];
609 zap_name_t *zn;
610 if (mze->mze_name[0] == 0)
611 continue;
612 dprintf("adding %s=%llu\n",
613 mze->mze_name, mze->mze_value);
614 zn = zap_name_alloc(zap, mze->mze_name, MT_EXACT);
615 err = fzap_add_cd(zn, 8, 1, &mze->mze_value, mze->mze_cd,
616 tag, tx);
617 zap = zn->zn_zap; /* fzap_add_cd() may change zap */
618 zap_name_free(zn);
619 if (err)
620 break;
621 }
622 vmem_free(mzp, sz);
623 *zapp = zap;
624 return (err);
625 }
626
627 void
628 mzap_create_impl(objset_t *os, uint64_t obj, int normflags, zap_flags_t flags,
629 dmu_tx_t *tx)
630 {
631 dmu_buf_t *db;
632 mzap_phys_t *zp;
633
634 VERIFY(0 == dmu_buf_hold(os, obj, 0, FTAG, &db, DMU_READ_NO_PREFETCH));
635
636 #ifdef ZFS_DEBUG
637 {
638 dmu_object_info_t doi;
639 dmu_object_info_from_db(db, &doi);
640 ASSERT3U(DMU_OT_BYTESWAP(doi.doi_type), ==, DMU_BSWAP_ZAP);
641 }
642 #endif
643
644 dmu_buf_will_dirty(db, tx);
645 zp = db->db_data;
646 zp->mz_block_type = ZBT_MICRO;
647 zp->mz_salt = ((uintptr_t)db ^ (uintptr_t)tx ^ (obj << 1)) | 1ULL;
648 zp->mz_normflags = normflags;
649 dmu_buf_rele(db, FTAG);
650
651 if (flags != 0) {
652 zap_t *zap;
653 /* Only fat zap supports flags; upgrade immediately. */
654 VERIFY(0 == zap_lockdir(os, obj, tx, RW_WRITER,
655 B_FALSE, B_FALSE, FTAG, &zap));
656 VERIFY3U(0, ==, mzap_upgrade(&zap, FTAG, tx, flags));
657 zap_unlockdir(zap, FTAG);
658 }
659 }
660
661 int
662 zap_create_claim(objset_t *os, uint64_t obj, dmu_object_type_t ot,
663 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
664 {
665 return (zap_create_claim_dnsize(os, obj, ot, bonustype, bonuslen,
666 0, tx));
667 }
668
669 int
670 zap_create_claim_dnsize(objset_t *os, uint64_t obj, dmu_object_type_t ot,
671 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
672 {
673 return (zap_create_claim_norm_dnsize(os, obj,
674 0, ot, bonustype, bonuslen, dnodesize, tx));
675 }
676
677 int
678 zap_create_claim_norm(objset_t *os, uint64_t obj, int normflags,
679 dmu_object_type_t ot,
680 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
681 {
682 return (zap_create_claim_norm_dnsize(os, obj, normflags, ot, bonustype,
683 bonuslen, 0, tx));
684 }
685
686 int
687 zap_create_claim_norm_dnsize(objset_t *os, uint64_t obj, int normflags,
688 dmu_object_type_t ot, dmu_object_type_t bonustype, int bonuslen,
689 int dnodesize, dmu_tx_t *tx)
690 {
691 int err;
692
693 err = dmu_object_claim_dnsize(os, obj, ot, 0, bonustype, bonuslen,
694 dnodesize, tx);
695 if (err != 0)
696 return (err);
697 mzap_create_impl(os, obj, normflags, 0, tx);
698 return (0);
699 }
700
701 uint64_t
702 zap_create(objset_t *os, dmu_object_type_t ot,
703 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
704 {
705 return (zap_create_norm(os, 0, ot, bonustype, bonuslen, tx));
706 }
707
708 uint64_t
709 zap_create_dnsize(objset_t *os, dmu_object_type_t ot,
710 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
711 {
712 return (zap_create_norm_dnsize(os, 0, ot, bonustype, bonuslen,
713 dnodesize, tx));
714 }
715
716 uint64_t
717 zap_create_norm(objset_t *os, int normflags, dmu_object_type_t ot,
718 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
719 {
720 return (zap_create_norm_dnsize(os, normflags, ot, bonustype, bonuslen,
721 0, tx));
722 }
723
724 uint64_t
725 zap_create_norm_dnsize(objset_t *os, int normflags, dmu_object_type_t ot,
726 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
727 {
728 uint64_t obj = dmu_object_alloc_dnsize(os, ot, 0, bonustype, bonuslen,
729 dnodesize, tx);
730
731 mzap_create_impl(os, obj, normflags, 0, tx);
732 return (obj);
733 }
734
735 uint64_t
736 zap_create_flags(objset_t *os, int normflags, zap_flags_t flags,
737 dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
738 dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *tx)
739 {
740 return (zap_create_flags_dnsize(os, normflags, flags, ot,
741 leaf_blockshift, indirect_blockshift, bonustype, bonuslen, 0, tx));
742 }
743
744 uint64_t
745 zap_create_flags_dnsize(objset_t *os, int normflags, zap_flags_t flags,
746 dmu_object_type_t ot, int leaf_blockshift, int indirect_blockshift,
747 dmu_object_type_t bonustype, int bonuslen, int dnodesize, dmu_tx_t *tx)
748 {
749 uint64_t obj = dmu_object_alloc_dnsize(os, ot, 0, bonustype, bonuslen,
750 dnodesize, tx);
751
752 ASSERT(leaf_blockshift >= SPA_MINBLOCKSHIFT &&
753 leaf_blockshift <= SPA_OLD_MAXBLOCKSHIFT &&
754 indirect_blockshift >= SPA_MINBLOCKSHIFT &&
755 indirect_blockshift <= SPA_OLD_MAXBLOCKSHIFT);
756
757 VERIFY(dmu_object_set_blocksize(os, obj,
758 1ULL << leaf_blockshift, indirect_blockshift, tx) == 0);
759
760 mzap_create_impl(os, obj, normflags, flags, tx);
761 return (obj);
762 }
763
764 int
765 zap_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx)
766 {
767 /*
768 * dmu_object_free will free the object number and free the
769 * data. Freeing the data will cause our pageout function to be
770 * called, which will destroy our data (zap_leaf_t's and zap_t).
771 */
772
773 return (dmu_object_free(os, zapobj, tx));
774 }
775
776 void
777 zap_evict(void *dbu)
778 {
779 zap_t *zap = dbu;
780
781 rw_destroy(&zap->zap_rwlock);
782
783 if (zap->zap_ismicro)
784 mze_destroy(zap);
785 else
786 mutex_destroy(&zap->zap_f.zap_num_entries_mtx);
787
788 kmem_free(zap, sizeof (zap_t));
789 }
790
791 int
792 zap_count(objset_t *os, uint64_t zapobj, uint64_t *count)
793 {
794 zap_t *zap;
795 int err;
796
797 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
798 if (err)
799 return (err);
800 if (!zap->zap_ismicro) {
801 err = fzap_count(zap, count);
802 } else {
803 *count = zap->zap_m.zap_num_entries;
804 }
805 zap_unlockdir(zap, FTAG);
806 return (err);
807 }
808
809 /*
810 * zn may be NULL; if not specified, it will be computed if needed.
811 * See also the comment above zap_entry_normalization_conflict().
812 */
813 static boolean_t
814 mzap_normalization_conflict(zap_t *zap, zap_name_t *zn, mzap_ent_t *mze)
815 {
816 mzap_ent_t *other;
817 int direction = AVL_BEFORE;
818 boolean_t allocdzn = B_FALSE;
819
820 if (zap->zap_normflags == 0)
821 return (B_FALSE);
822
823 again:
824 for (other = avl_walk(&zap->zap_m.zap_avl, mze, direction);
825 other && other->mze_hash == mze->mze_hash;
826 other = avl_walk(&zap->zap_m.zap_avl, other, direction)) {
827
828 if (zn == NULL) {
829 zn = zap_name_alloc(zap, MZE_PHYS(zap, mze)->mze_name,
830 MT_FIRST);
831 allocdzn = B_TRUE;
832 }
833 if (zap_match(zn, MZE_PHYS(zap, other)->mze_name)) {
834 if (allocdzn)
835 zap_name_free(zn);
836 return (B_TRUE);
837 }
838 }
839
840 if (direction == AVL_BEFORE) {
841 direction = AVL_AFTER;
842 goto again;
843 }
844
845 if (allocdzn)
846 zap_name_free(zn);
847 return (B_FALSE);
848 }
849
850 /*
851 * Routines for manipulating attributes.
852 */
853
854 int
855 zap_lookup(objset_t *os, uint64_t zapobj, const char *name,
856 uint64_t integer_size, uint64_t num_integers, void *buf)
857 {
858 return (zap_lookup_norm(os, zapobj, name, integer_size,
859 num_integers, buf, MT_EXACT, NULL, 0, NULL));
860 }
861
862 static int
863 zap_lookup_impl(zap_t *zap, const char *name,
864 uint64_t integer_size, uint64_t num_integers, void *buf,
865 matchtype_t mt, char *realname, int rn_len,
866 boolean_t *ncp)
867 {
868 int err = 0;
869 mzap_ent_t *mze;
870 zap_name_t *zn;
871
872 zn = zap_name_alloc(zap, name, mt);
873 if (zn == NULL)
874 return (SET_ERROR(ENOTSUP));
875
876 if (!zap->zap_ismicro) {
877 err = fzap_lookup(zn, integer_size, num_integers, buf,
878 realname, rn_len, ncp);
879 } else {
880 mze = mze_find(zn);
881 if (mze == NULL) {
882 err = SET_ERROR(ENOENT);
883 } else {
884 if (num_integers < 1) {
885 err = SET_ERROR(EOVERFLOW);
886 } else if (integer_size != 8) {
887 err = SET_ERROR(EINVAL);
888 } else {
889 *(uint64_t *)buf =
890 MZE_PHYS(zap, mze)->mze_value;
891 (void) strlcpy(realname,
892 MZE_PHYS(zap, mze)->mze_name, rn_len);
893 if (ncp) {
894 *ncp = mzap_normalization_conflict(zap,
895 zn, mze);
896 }
897 }
898 }
899 }
900 zap_name_free(zn);
901 return (err);
902 }
903
904 int
905 zap_lookup_norm(objset_t *os, uint64_t zapobj, const char *name,
906 uint64_t integer_size, uint64_t num_integers, void *buf,
907 matchtype_t mt, char *realname, int rn_len,
908 boolean_t *ncp)
909 {
910 zap_t *zap;
911 int err;
912
913 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
914 if (err != 0)
915 return (err);
916 err = zap_lookup_impl(zap, name, integer_size,
917 num_integers, buf, mt, realname, rn_len, ncp);
918 zap_unlockdir(zap, FTAG);
919 return (err);
920 }
921
922 int
923 zap_prefetch(objset_t *os, uint64_t zapobj, const char *name)
924 {
925 zap_t *zap;
926 int err;
927 zap_name_t *zn;
928
929 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
930 if (err)
931 return (err);
932 zn = zap_name_alloc(zap, name, MT_EXACT);
933 if (zn == NULL) {
934 zap_unlockdir(zap, FTAG);
935 return (SET_ERROR(ENOTSUP));
936 }
937
938 fzap_prefetch(zn);
939 zap_name_free(zn);
940 zap_unlockdir(zap, FTAG);
941 return (err);
942 }
943
944 int
945 zap_lookup_by_dnode(dnode_t *dn, const char *name,
946 uint64_t integer_size, uint64_t num_integers, void *buf)
947 {
948 return (zap_lookup_norm_by_dnode(dn, name, integer_size,
949 num_integers, buf, MT_EXACT, NULL, 0, NULL));
950 }
951
952 int
953 zap_lookup_norm_by_dnode(dnode_t *dn, const char *name,
954 uint64_t integer_size, uint64_t num_integers, void *buf,
955 matchtype_t mt, char *realname, int rn_len,
956 boolean_t *ncp)
957 {
958 zap_t *zap;
959 int err;
960
961 err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
962 FTAG, &zap);
963 if (err != 0)
964 return (err);
965 err = zap_lookup_impl(zap, name, integer_size,
966 num_integers, buf, mt, realname, rn_len, ncp);
967 zap_unlockdir(zap, FTAG);
968 return (err);
969 }
970
971 int
972 zap_prefetch_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
973 int key_numints)
974 {
975 zap_t *zap;
976 int err;
977 zap_name_t *zn;
978
979 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
980 if (err)
981 return (err);
982 zn = zap_name_alloc_uint64(zap, key, key_numints);
983 if (zn == NULL) {
984 zap_unlockdir(zap, FTAG);
985 return (SET_ERROR(ENOTSUP));
986 }
987
988 fzap_prefetch(zn);
989 zap_name_free(zn);
990 zap_unlockdir(zap, FTAG);
991 return (err);
992 }
993
994 int
995 zap_lookup_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
996 int key_numints, uint64_t integer_size, uint64_t num_integers, void *buf)
997 {
998 zap_t *zap;
999 int err;
1000 zap_name_t *zn;
1001
1002 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1003 if (err)
1004 return (err);
1005 zn = zap_name_alloc_uint64(zap, key, key_numints);
1006 if (zn == NULL) {
1007 zap_unlockdir(zap, FTAG);
1008 return (SET_ERROR(ENOTSUP));
1009 }
1010
1011 err = fzap_lookup(zn, integer_size, num_integers, buf,
1012 NULL, 0, NULL);
1013 zap_name_free(zn);
1014 zap_unlockdir(zap, FTAG);
1015 return (err);
1016 }
1017
1018 int
1019 zap_contains(objset_t *os, uint64_t zapobj, const char *name)
1020 {
1021 int err = zap_lookup_norm(os, zapobj, name, 0,
1022 0, NULL, MT_EXACT, NULL, 0, NULL);
1023 if (err == EOVERFLOW || err == EINVAL)
1024 err = 0; /* found, but skipped reading the value */
1025 return (err);
1026 }
1027
1028 int
1029 zap_length(objset_t *os, uint64_t zapobj, const char *name,
1030 uint64_t *integer_size, uint64_t *num_integers)
1031 {
1032 zap_t *zap;
1033 int err;
1034 mzap_ent_t *mze;
1035 zap_name_t *zn;
1036
1037 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1038 if (err)
1039 return (err);
1040 zn = zap_name_alloc(zap, name, MT_EXACT);
1041 if (zn == NULL) {
1042 zap_unlockdir(zap, FTAG);
1043 return (SET_ERROR(ENOTSUP));
1044 }
1045 if (!zap->zap_ismicro) {
1046 err = fzap_length(zn, integer_size, num_integers);
1047 } else {
1048 mze = mze_find(zn);
1049 if (mze == NULL) {
1050 err = SET_ERROR(ENOENT);
1051 } else {
1052 if (integer_size)
1053 *integer_size = 8;
1054 if (num_integers)
1055 *num_integers = 1;
1056 }
1057 }
1058 zap_name_free(zn);
1059 zap_unlockdir(zap, FTAG);
1060 return (err);
1061 }
1062
1063 int
1064 zap_length_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1065 int key_numints, uint64_t *integer_size, uint64_t *num_integers)
1066 {
1067 zap_t *zap;
1068 int err;
1069 zap_name_t *zn;
1070
1071 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1072 if (err)
1073 return (err);
1074 zn = zap_name_alloc_uint64(zap, key, key_numints);
1075 if (zn == NULL) {
1076 zap_unlockdir(zap, FTAG);
1077 return (SET_ERROR(ENOTSUP));
1078 }
1079 err = fzap_length(zn, integer_size, num_integers);
1080 zap_name_free(zn);
1081 zap_unlockdir(zap, FTAG);
1082 return (err);
1083 }
1084
1085 static void
1086 mzap_addent(zap_name_t *zn, uint64_t value)
1087 {
1088 int i;
1089 zap_t *zap = zn->zn_zap;
1090 int start = zap->zap_m.zap_alloc_next;
1091 uint32_t cd;
1092
1093 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
1094
1095 #ifdef ZFS_DEBUG
1096 for (i = 0; i < zap->zap_m.zap_num_chunks; i++) {
1097 ASSERTV(mzap_ent_phys_t *mze);
1098 ASSERT(mze = &zap_m_phys(zap)->mz_chunk[i]);
1099 ASSERT(strcmp(zn->zn_key_orig, mze->mze_name) != 0);
1100 }
1101 #endif
1102
1103 cd = mze_find_unused_cd(zap, zn->zn_hash);
1104 /* given the limited size of the microzap, this can't happen */
1105 ASSERT(cd < zap_maxcd(zap));
1106
1107 again:
1108 for (i = start; i < zap->zap_m.zap_num_chunks; i++) {
1109 mzap_ent_phys_t *mze = &zap_m_phys(zap)->mz_chunk[i];
1110 if (mze->mze_name[0] == 0) {
1111 mze->mze_value = value;
1112 mze->mze_cd = cd;
1113 (void) strlcpy(mze->mze_name, zn->zn_key_orig,
1114 sizeof (mze->mze_name));
1115 zap->zap_m.zap_num_entries++;
1116 zap->zap_m.zap_alloc_next = i+1;
1117 if (zap->zap_m.zap_alloc_next ==
1118 zap->zap_m.zap_num_chunks)
1119 zap->zap_m.zap_alloc_next = 0;
1120 mze_insert(zap, i, zn->zn_hash);
1121 return;
1122 }
1123 }
1124 if (start != 0) {
1125 start = 0;
1126 goto again;
1127 }
1128 cmn_err(CE_PANIC, "out of entries!");
1129 }
1130
1131 static int
1132 zap_add_impl(zap_t *zap, const char *key,
1133 int integer_size, uint64_t num_integers,
1134 const void *val, dmu_tx_t *tx, void *tag)
1135 {
1136 int err = 0;
1137 mzap_ent_t *mze;
1138 const uint64_t *intval = val;
1139 zap_name_t *zn;
1140
1141 zn = zap_name_alloc(zap, key, MT_EXACT);
1142 if (zn == NULL) {
1143 zap_unlockdir(zap, tag);
1144 return (SET_ERROR(ENOTSUP));
1145 }
1146 if (!zap->zap_ismicro) {
1147 err = fzap_add(zn, integer_size, num_integers, val, tag, tx);
1148 zap = zn->zn_zap; /* fzap_add() may change zap */
1149 } else if (integer_size != 8 || num_integers != 1 ||
1150 strlen(key) >= MZAP_NAME_LEN) {
1151 err = mzap_upgrade(&zn->zn_zap, tag, tx, 0);
1152 if (err == 0) {
1153 err = fzap_add(zn, integer_size, num_integers, val,
1154 tag, tx);
1155 }
1156 zap = zn->zn_zap; /* fzap_add() may change zap */
1157 } else {
1158 mze = mze_find(zn);
1159 if (mze != NULL) {
1160 err = SET_ERROR(EEXIST);
1161 } else {
1162 mzap_addent(zn, *intval);
1163 }
1164 }
1165 ASSERT(zap == zn->zn_zap);
1166 zap_name_free(zn);
1167 zap_unlockdir(zap, tag);
1168 return (err);
1169 }
1170
1171 int
1172 zap_add(objset_t *os, uint64_t zapobj, const char *key,
1173 int integer_size, uint64_t num_integers,
1174 const void *val, dmu_tx_t *tx)
1175 {
1176 zap_t *zap;
1177 int err;
1178
1179 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1180 if (err != 0)
1181 return (err);
1182 err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1183 /* zap_add_impl() calls zap_unlockdir() */
1184 return (err);
1185 }
1186
1187 int
1188 zap_add_by_dnode(dnode_t *dn, const char *key,
1189 int integer_size, uint64_t num_integers,
1190 const void *val, dmu_tx_t *tx)
1191 {
1192 zap_t *zap;
1193 int err;
1194
1195 err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1196 if (err != 0)
1197 return (err);
1198 err = zap_add_impl(zap, key, integer_size, num_integers, val, tx, FTAG);
1199 /* zap_add_impl() calls zap_unlockdir() */
1200 return (err);
1201 }
1202
1203 int
1204 zap_add_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1205 int key_numints, int integer_size, uint64_t num_integers,
1206 const void *val, dmu_tx_t *tx)
1207 {
1208 zap_t *zap;
1209 int err;
1210 zap_name_t *zn;
1211
1212 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1213 if (err)
1214 return (err);
1215 zn = zap_name_alloc_uint64(zap, key, key_numints);
1216 if (zn == NULL) {
1217 zap_unlockdir(zap, FTAG);
1218 return (SET_ERROR(ENOTSUP));
1219 }
1220 err = fzap_add(zn, integer_size, num_integers, val, FTAG, tx);
1221 zap = zn->zn_zap; /* fzap_add() may change zap */
1222 zap_name_free(zn);
1223 if (zap != NULL) /* may be NULL if fzap_add() failed */
1224 zap_unlockdir(zap, FTAG);
1225 return (err);
1226 }
1227
1228 int
1229 zap_update(objset_t *os, uint64_t zapobj, const char *name,
1230 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1231 {
1232 zap_t *zap;
1233 mzap_ent_t *mze;
1234 const uint64_t *intval = val;
1235 zap_name_t *zn;
1236 int err;
1237
1238 #ifdef ZFS_DEBUG
1239 uint64_t oldval;
1240
1241 /*
1242 * If there is an old value, it shouldn't change across the
1243 * lockdir (eg, due to bprewrite's xlation).
1244 */
1245 if (integer_size == 8 && num_integers == 1)
1246 (void) zap_lookup(os, zapobj, name, 8, 1, &oldval);
1247 #endif
1248
1249 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1250 if (err)
1251 return (err);
1252 zn = zap_name_alloc(zap, name, MT_EXACT);
1253 if (zn == NULL) {
1254 zap_unlockdir(zap, FTAG);
1255 return (SET_ERROR(ENOTSUP));
1256 }
1257 if (!zap->zap_ismicro) {
1258 err = fzap_update(zn, integer_size, num_integers, val,
1259 FTAG, tx);
1260 zap = zn->zn_zap; /* fzap_update() may change zap */
1261 } else if (integer_size != 8 || num_integers != 1 ||
1262 strlen(name) >= MZAP_NAME_LEN) {
1263 dprintf("upgrading obj %llu: intsz=%u numint=%llu name=%s\n",
1264 zapobj, integer_size, num_integers, name);
1265 err = mzap_upgrade(&zn->zn_zap, FTAG, tx, 0);
1266 if (err == 0) {
1267 err = fzap_update(zn, integer_size, num_integers,
1268 val, FTAG, tx);
1269 }
1270 zap = zn->zn_zap; /* fzap_update() may change zap */
1271 } else {
1272 mze = mze_find(zn);
1273 if (mze != NULL) {
1274 ASSERT3U(MZE_PHYS(zap, mze)->mze_value, ==, oldval);
1275 MZE_PHYS(zap, mze)->mze_value = *intval;
1276 } else {
1277 mzap_addent(zn, *intval);
1278 }
1279 }
1280 ASSERT(zap == zn->zn_zap);
1281 zap_name_free(zn);
1282 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1283 zap_unlockdir(zap, FTAG);
1284 return (err);
1285 }
1286
1287 int
1288 zap_update_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1289 int key_numints,
1290 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
1291 {
1292 zap_t *zap;
1293 zap_name_t *zn;
1294 int err;
1295
1296 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, TRUE, FTAG, &zap);
1297 if (err)
1298 return (err);
1299 zn = zap_name_alloc_uint64(zap, key, key_numints);
1300 if (zn == NULL) {
1301 zap_unlockdir(zap, FTAG);
1302 return (SET_ERROR(ENOTSUP));
1303 }
1304 err = fzap_update(zn, integer_size, num_integers, val, FTAG, tx);
1305 zap = zn->zn_zap; /* fzap_update() may change zap */
1306 zap_name_free(zn);
1307 if (zap != NULL) /* may be NULL if fzap_upgrade() failed */
1308 zap_unlockdir(zap, FTAG);
1309 return (err);
1310 }
1311
1312 int
1313 zap_remove(objset_t *os, uint64_t zapobj, const char *name, dmu_tx_t *tx)
1314 {
1315 return (zap_remove_norm(os, zapobj, name, MT_EXACT, tx));
1316 }
1317
1318 static int
1319 zap_remove_impl(zap_t *zap, const char *name,
1320 matchtype_t mt, dmu_tx_t *tx)
1321 {
1322 mzap_ent_t *mze;
1323 zap_name_t *zn;
1324 int err = 0;
1325
1326 zn = zap_name_alloc(zap, name, mt);
1327 if (zn == NULL)
1328 return (SET_ERROR(ENOTSUP));
1329 if (!zap->zap_ismicro) {
1330 err = fzap_remove(zn, tx);
1331 } else {
1332 mze = mze_find(zn);
1333 if (mze == NULL) {
1334 err = SET_ERROR(ENOENT);
1335 } else {
1336 zap->zap_m.zap_num_entries--;
1337 bzero(&zap_m_phys(zap)->mz_chunk[mze->mze_chunkid],
1338 sizeof (mzap_ent_phys_t));
1339 mze_remove(zap, mze);
1340 }
1341 }
1342 zap_name_free(zn);
1343 return (err);
1344 }
1345
1346 int
1347 zap_remove_norm(objset_t *os, uint64_t zapobj, const char *name,
1348 matchtype_t mt, dmu_tx_t *tx)
1349 {
1350 zap_t *zap;
1351 int err;
1352
1353 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1354 if (err)
1355 return (err);
1356 err = zap_remove_impl(zap, name, mt, tx);
1357 zap_unlockdir(zap, FTAG);
1358 return (err);
1359 }
1360
1361 int
1362 zap_remove_by_dnode(dnode_t *dn, const char *name, dmu_tx_t *tx)
1363 {
1364 zap_t *zap;
1365 int err;
1366
1367 err = zap_lockdir_by_dnode(dn, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1368 if (err)
1369 return (err);
1370 err = zap_remove_impl(zap, name, MT_EXACT, tx);
1371 zap_unlockdir(zap, FTAG);
1372 return (err);
1373 }
1374
1375 int
1376 zap_remove_uint64(objset_t *os, uint64_t zapobj, const uint64_t *key,
1377 int key_numints, dmu_tx_t *tx)
1378 {
1379 zap_t *zap;
1380 int err;
1381 zap_name_t *zn;
1382
1383 err = zap_lockdir(os, zapobj, tx, RW_WRITER, TRUE, FALSE, FTAG, &zap);
1384 if (err)
1385 return (err);
1386 zn = zap_name_alloc_uint64(zap, key, key_numints);
1387 if (zn == NULL) {
1388 zap_unlockdir(zap, FTAG);
1389 return (SET_ERROR(ENOTSUP));
1390 }
1391 err = fzap_remove(zn, tx);
1392 zap_name_free(zn);
1393 zap_unlockdir(zap, FTAG);
1394 return (err);
1395 }
1396
1397 /*
1398 * Routines for iterating over the attributes.
1399 */
1400
1401 void
1402 zap_cursor_init_serialized(zap_cursor_t *zc, objset_t *os, uint64_t zapobj,
1403 uint64_t serialized)
1404 {
1405 zc->zc_objset = os;
1406 zc->zc_zap = NULL;
1407 zc->zc_leaf = NULL;
1408 zc->zc_zapobj = zapobj;
1409 zc->zc_serialized = serialized;
1410 zc->zc_hash = 0;
1411 zc->zc_cd = 0;
1412 }
1413
1414 void
1415 zap_cursor_init(zap_cursor_t *zc, objset_t *os, uint64_t zapobj)
1416 {
1417 zap_cursor_init_serialized(zc, os, zapobj, 0);
1418 }
1419
1420 void
1421 zap_cursor_fini(zap_cursor_t *zc)
1422 {
1423 if (zc->zc_zap) {
1424 rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1425 zap_unlockdir(zc->zc_zap, NULL);
1426 zc->zc_zap = NULL;
1427 }
1428 if (zc->zc_leaf) {
1429 rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1430 zap_put_leaf(zc->zc_leaf);
1431 zc->zc_leaf = NULL;
1432 }
1433 zc->zc_objset = NULL;
1434 }
1435
1436 uint64_t
1437 zap_cursor_serialize(zap_cursor_t *zc)
1438 {
1439 if (zc->zc_hash == -1ULL)
1440 return (-1ULL);
1441 if (zc->zc_zap == NULL)
1442 return (zc->zc_serialized);
1443 ASSERT((zc->zc_hash & zap_maxcd(zc->zc_zap)) == 0);
1444 ASSERT(zc->zc_cd < zap_maxcd(zc->zc_zap));
1445
1446 /*
1447 * We want to keep the high 32 bits of the cursor zero if we can, so
1448 * that 32-bit programs can access this. So usually use a small
1449 * (28-bit) hash value so we can fit 4 bits of cd into the low 32-bits
1450 * of the cursor.
1451 *
1452 * [ collision differentiator | zap_hashbits()-bit hash value ]
1453 */
1454 return ((zc->zc_hash >> (64 - zap_hashbits(zc->zc_zap))) |
1455 ((uint64_t)zc->zc_cd << zap_hashbits(zc->zc_zap)));
1456 }
1457
1458 int
1459 zap_cursor_retrieve(zap_cursor_t *zc, zap_attribute_t *za)
1460 {
1461 int err;
1462 avl_index_t idx;
1463 mzap_ent_t mze_tofind;
1464 mzap_ent_t *mze;
1465
1466 if (zc->zc_hash == -1ULL)
1467 return (SET_ERROR(ENOENT));
1468
1469 if (zc->zc_zap == NULL) {
1470 int hb;
1471 err = zap_lockdir(zc->zc_objset, zc->zc_zapobj, NULL,
1472 RW_READER, TRUE, FALSE, NULL, &zc->zc_zap);
1473 if (err)
1474 return (err);
1475
1476 /*
1477 * To support zap_cursor_init_serialized, advance, retrieve,
1478 * we must add to the existing zc_cd, which may already
1479 * be 1 due to the zap_cursor_advance.
1480 */
1481 ASSERT(zc->zc_hash == 0);
1482 hb = zap_hashbits(zc->zc_zap);
1483 zc->zc_hash = zc->zc_serialized << (64 - hb);
1484 zc->zc_cd += zc->zc_serialized >> hb;
1485 if (zc->zc_cd >= zap_maxcd(zc->zc_zap)) /* corrupt serialized */
1486 zc->zc_cd = 0;
1487 } else {
1488 rw_enter(&zc->zc_zap->zap_rwlock, RW_READER);
1489 }
1490 if (!zc->zc_zap->zap_ismicro) {
1491 err = fzap_cursor_retrieve(zc->zc_zap, zc, za);
1492 } else {
1493 mze_tofind.mze_hash = zc->zc_hash;
1494 mze_tofind.mze_cd = zc->zc_cd;
1495
1496 mze = avl_find(&zc->zc_zap->zap_m.zap_avl, &mze_tofind, &idx);
1497 if (mze == NULL) {
1498 mze = avl_nearest(&zc->zc_zap->zap_m.zap_avl,
1499 idx, AVL_AFTER);
1500 }
1501 if (mze) {
1502 mzap_ent_phys_t *mzep = MZE_PHYS(zc->zc_zap, mze);
1503 ASSERT3U(mze->mze_cd, ==, mzep->mze_cd);
1504 za->za_normalization_conflict =
1505 mzap_normalization_conflict(zc->zc_zap, NULL, mze);
1506 za->za_integer_length = 8;
1507 za->za_num_integers = 1;
1508 za->za_first_integer = mzep->mze_value;
1509 (void) strcpy(za->za_name, mzep->mze_name);
1510 zc->zc_hash = mze->mze_hash;
1511 zc->zc_cd = mze->mze_cd;
1512 err = 0;
1513 } else {
1514 zc->zc_hash = -1ULL;
1515 err = SET_ERROR(ENOENT);
1516 }
1517 }
1518 rw_exit(&zc->zc_zap->zap_rwlock);
1519 return (err);
1520 }
1521
1522 void
1523 zap_cursor_advance(zap_cursor_t *zc)
1524 {
1525 if (zc->zc_hash == -1ULL)
1526 return;
1527 zc->zc_cd++;
1528 }
1529
1530 int
1531 zap_get_stats(objset_t *os, uint64_t zapobj, zap_stats_t *zs)
1532 {
1533 int err;
1534 zap_t *zap;
1535
1536 err = zap_lockdir(os, zapobj, NULL, RW_READER, TRUE, FALSE, FTAG, &zap);
1537 if (err)
1538 return (err);
1539
1540 bzero(zs, sizeof (zap_stats_t));
1541
1542 if (zap->zap_ismicro) {
1543 zs->zs_blocksize = zap->zap_dbuf->db_size;
1544 zs->zs_num_entries = zap->zap_m.zap_num_entries;
1545 zs->zs_num_blocks = 1;
1546 } else {
1547 fzap_get_stats(zap, zs);
1548 }
1549 zap_unlockdir(zap, FTAG);
1550 return (0);
1551 }
1552
1553 int
1554 zap_count_write_by_dnode(dnode_t *dn, const char *name, int add,
1555 uint64_t *towrite, uint64_t *tooverwrite)
1556 {
1557 zap_t *zap;
1558 int err = 0;
1559
1560 /*
1561 * Since, we don't have a name, we cannot figure out which blocks will
1562 * be affected in this operation. So, account for the worst case :
1563 * - 3 blocks overwritten: target leaf, ptrtbl block, header block
1564 * - 4 new blocks written if adding:
1565 * - 2 blocks for possibly split leaves,
1566 * - 2 grown ptrtbl blocks
1567 *
1568 * This also accommodates the case where an add operation to a fairly
1569 * large microzap results in a promotion to fatzap.
1570 */
1571 if (name == NULL) {
1572 *towrite += (3 + (add ? 4 : 0)) * SPA_OLD_MAXBLOCKSIZE;
1573 return (err);
1574 }
1575
1576 /*
1577 * We lock the zap with adding == FALSE. Because, if we pass
1578 * the actual value of add, it could trigger a mzap_upgrade().
1579 * At present we are just evaluating the possibility of this operation
1580 * and hence we do not want to trigger an upgrade.
1581 */
1582 err = zap_lockdir_by_dnode(dn, NULL, RW_READER, TRUE, FALSE,
1583 FTAG, &zap);
1584 if (err != 0)
1585 return (err);
1586
1587 if (!zap->zap_ismicro) {
1588 zap_name_t *zn = zap_name_alloc(zap, name, MT_EXACT);
1589 if (zn) {
1590 err = fzap_count_write(zn, add, towrite,
1591 tooverwrite);
1592 zap_name_free(zn);
1593 } else {
1594 /*
1595 * We treat this case as similar to (name == NULL)
1596 */
1597 *towrite += (3 + (add ? 4 : 0)) * SPA_OLD_MAXBLOCKSIZE;
1598 }
1599 } else {
1600 /*
1601 * We are here if (name != NULL) and this is a micro-zap.
1602 * We account for the header block depending on whether it
1603 * is freeable.
1604 *
1605 * Incase of an add-operation it is hard to find out
1606 * if this add will promote this microzap to fatzap.
1607 * Hence, we consider the worst case and account for the
1608 * blocks assuming this microzap would be promoted to a
1609 * fatzap.
1610 *
1611 * 1 block overwritten : header block
1612 * 4 new blocks written : 2 new split leaf, 2 grown
1613 * ptrtbl blocks
1614 */
1615 if (dmu_buf_freeable(zap->zap_dbuf))
1616 *tooverwrite += MZAP_MAX_BLKSZ;
1617 else
1618 *towrite += MZAP_MAX_BLKSZ;
1619
1620 if (add) {
1621 *towrite += 4 * MZAP_MAX_BLKSZ;
1622 }
1623 }
1624
1625 zap_unlockdir(zap, FTAG);
1626 return (err);
1627 }
1628
1629 #if defined(_KERNEL) && defined(HAVE_SPL)
1630 EXPORT_SYMBOL(zap_create);
1631 EXPORT_SYMBOL(zap_create_dnsize);
1632 EXPORT_SYMBOL(zap_create_norm);
1633 EXPORT_SYMBOL(zap_create_norm_dnsize);
1634 EXPORT_SYMBOL(zap_create_flags);
1635 EXPORT_SYMBOL(zap_create_flags_dnsize);
1636 EXPORT_SYMBOL(zap_create_claim);
1637 EXPORT_SYMBOL(zap_create_claim_norm);
1638 EXPORT_SYMBOL(zap_create_claim_norm_dnsize);
1639 EXPORT_SYMBOL(zap_destroy);
1640 EXPORT_SYMBOL(zap_lookup);
1641 EXPORT_SYMBOL(zap_lookup_by_dnode);
1642 EXPORT_SYMBOL(zap_lookup_norm);
1643 EXPORT_SYMBOL(zap_lookup_uint64);
1644 EXPORT_SYMBOL(zap_contains);
1645 EXPORT_SYMBOL(zap_prefetch);
1646 EXPORT_SYMBOL(zap_prefetch_uint64);
1647 EXPORT_SYMBOL(zap_count_write_by_dnode);
1648 EXPORT_SYMBOL(zap_add);
1649 EXPORT_SYMBOL(zap_add_by_dnode);
1650 EXPORT_SYMBOL(zap_add_uint64);
1651 EXPORT_SYMBOL(zap_update);
1652 EXPORT_SYMBOL(zap_update_uint64);
1653 EXPORT_SYMBOL(zap_length);
1654 EXPORT_SYMBOL(zap_length_uint64);
1655 EXPORT_SYMBOL(zap_remove);
1656 EXPORT_SYMBOL(zap_remove_by_dnode);
1657 EXPORT_SYMBOL(zap_remove_norm);
1658 EXPORT_SYMBOL(zap_remove_uint64);
1659 EXPORT_SYMBOL(zap_count);
1660 EXPORT_SYMBOL(zap_value_search);
1661 EXPORT_SYMBOL(zap_join);
1662 EXPORT_SYMBOL(zap_join_increment);
1663 EXPORT_SYMBOL(zap_add_int);
1664 EXPORT_SYMBOL(zap_remove_int);
1665 EXPORT_SYMBOL(zap_lookup_int);
1666 EXPORT_SYMBOL(zap_increment_int);
1667 EXPORT_SYMBOL(zap_add_int_key);
1668 EXPORT_SYMBOL(zap_lookup_int_key);
1669 EXPORT_SYMBOL(zap_increment);
1670 EXPORT_SYMBOL(zap_cursor_init);
1671 EXPORT_SYMBOL(zap_cursor_fini);
1672 EXPORT_SYMBOL(zap_cursor_retrieve);
1673 EXPORT_SYMBOL(zap_cursor_advance);
1674 EXPORT_SYMBOL(zap_cursor_serialize);
1675 EXPORT_SYMBOL(zap_cursor_init_serialized);
1676 EXPORT_SYMBOL(zap_get_stats);
1677 #endif