]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dsl_userhold.c
Fix typo/etc in module/zfs/zfs_ctldir.c
[mirror_zfs.git] / module / zfs / dsl_userhold.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/dsl_userhold.h>
29 #include <sys/dsl_dataset.h>
30 #include <sys/dsl_destroy.h>
31 #include <sys/dsl_synctask.h>
32 #include <sys/dmu_tx.h>
33 #include <sys/zfs_onexit.h>
34 #include <sys/dsl_pool.h>
35 #include <sys/dsl_dir.h>
36 #include <sys/zfs_ioctl.h>
37 #include <sys/zap.h>
38
39 typedef struct dsl_dataset_user_hold_arg {
40 nvlist_t *dduha_holds;
41 nvlist_t *dduha_chkholds;
42 nvlist_t *dduha_errlist;
43 minor_t dduha_minor;
44 } dsl_dataset_user_hold_arg_t;
45
46 /*
47 * If you add new checks here, you may need to add additional checks to the
48 * "temporary" case in snapshot_check() in dmu_objset.c.
49 */
50 int
51 dsl_dataset_user_hold_check_one(dsl_dataset_t *ds, const char *htag,
52 boolean_t temphold, dmu_tx_t *tx)
53 {
54 dsl_pool_t *dp = dmu_tx_pool(tx);
55 objset_t *mos = dp->dp_meta_objset;
56 int error = 0;
57
58 ASSERT(dsl_pool_config_held(dp));
59
60 if (strlen(htag) > MAXNAMELEN)
61 return (SET_ERROR(E2BIG));
62 /* Tempholds have a more restricted length */
63 if (temphold && strlen(htag) + MAX_TAG_PREFIX_LEN >= MAXNAMELEN)
64 return (SET_ERROR(E2BIG));
65
66 /* tags must be unique (if ds already exists) */
67 if (ds != NULL && dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
68 uint64_t value;
69
70 error = zap_lookup(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
71 htag, 8, 1, &value);
72 if (error == 0)
73 error = SET_ERROR(EEXIST);
74 else if (error == ENOENT)
75 error = 0;
76 }
77
78 return (error);
79 }
80
81 static int
82 dsl_dataset_user_hold_check(void *arg, dmu_tx_t *tx)
83 {
84 dsl_dataset_user_hold_arg_t *dduha = arg;
85 dsl_pool_t *dp = dmu_tx_pool(tx);
86 nvlist_t *tmp_holds;
87
88 if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS)
89 return (SET_ERROR(ENOTSUP));
90
91 if (!dmu_tx_is_syncing(tx))
92 return (0);
93
94 /*
95 * Ensure the list has no duplicates by copying name/values from
96 * non-unique dduha_holds to unique tmp_holds, and comparing counts.
97 */
98 tmp_holds = fnvlist_alloc();
99 for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
100 pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
101 size_t len = strlen(nvpair_name(pair)) +
102 strlen(fnvpair_value_string(pair));
103 char *nameval = kmem_zalloc(len + 2, KM_SLEEP);
104 (void) strcpy(nameval, nvpair_name(pair));
105 (void) strcat(nameval, "@");
106 (void) strcat(nameval, fnvpair_value_string(pair));
107 fnvlist_add_string(tmp_holds, nameval, "");
108 kmem_free(nameval, len + 2);
109 }
110 size_t tmp_count = fnvlist_num_pairs(tmp_holds);
111 fnvlist_free(tmp_holds);
112 if (tmp_count != fnvlist_num_pairs(dduha->dduha_holds))
113 return (SET_ERROR(EEXIST));
114 for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_holds, NULL);
115 pair != NULL; pair = nvlist_next_nvpair(dduha->dduha_holds, pair)) {
116 dsl_dataset_t *ds;
117 int error = 0;
118 char *htag, *name;
119
120 /* must be a snapshot */
121 name = nvpair_name(pair);
122 if (strchr(name, '@') == NULL)
123 error = SET_ERROR(EINVAL);
124
125 if (error == 0)
126 error = nvpair_value_string(pair, &htag);
127
128 if (error == 0)
129 error = dsl_dataset_hold(dp, name, FTAG, &ds);
130
131 if (error == 0) {
132 error = dsl_dataset_user_hold_check_one(ds, htag,
133 dduha->dduha_minor != 0, tx);
134 dsl_dataset_rele(ds, FTAG);
135 }
136
137 if (error == 0) {
138 fnvlist_add_string(dduha->dduha_chkholds, name, htag);
139 } else {
140 /*
141 * We register ENOENT errors so they can be correctly
142 * reported if needed, such as when all holds fail.
143 */
144 fnvlist_add_int32(dduha->dduha_errlist, name, error);
145 if (error != ENOENT)
146 return (error);
147 }
148 }
149
150 return (0);
151 }
152
153
154 static void
155 dsl_dataset_user_hold_sync_one_impl(nvlist_t *tmpholds, dsl_dataset_t *ds,
156 const char *htag, minor_t minor, uint64_t now, dmu_tx_t *tx)
157 {
158 dsl_pool_t *dp = ds->ds_dir->dd_pool;
159 objset_t *mos = dp->dp_meta_objset;
160 uint64_t zapobj;
161
162 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
163
164 if (dsl_dataset_phys(ds)->ds_userrefs_obj == 0) {
165 /*
166 * This is the first user hold for this dataset. Create
167 * the userrefs zap object.
168 */
169 dmu_buf_will_dirty(ds->ds_dbuf, tx);
170 zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj =
171 zap_create(mos, DMU_OT_USERREFS, DMU_OT_NONE, 0, tx);
172 } else {
173 zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj;
174 }
175 ds->ds_userrefs++;
176
177 VERIFY0(zap_add(mos, zapobj, htag, 8, 1, &now, tx));
178
179 if (minor != 0) {
180 char name[MAXNAMELEN];
181 nvlist_t *tags;
182
183 VERIFY0(dsl_pool_user_hold(dp, ds->ds_object,
184 htag, now, tx));
185 (void) snprintf(name, sizeof (name), "%llx",
186 (u_longlong_t)ds->ds_object);
187
188 if (nvlist_lookup_nvlist(tmpholds, name, &tags) != 0) {
189 tags = fnvlist_alloc();
190 fnvlist_add_boolean(tags, htag);
191 fnvlist_add_nvlist(tmpholds, name, tags);
192 fnvlist_free(tags);
193 } else {
194 fnvlist_add_boolean(tags, htag);
195 }
196 }
197
198 spa_history_log_internal_ds(ds, "hold", tx,
199 "tag=%s temp=%d refs=%llu",
200 htag, minor != 0, ds->ds_userrefs);
201 }
202
203 typedef struct zfs_hold_cleanup_arg {
204 char zhca_spaname[ZFS_MAX_DATASET_NAME_LEN];
205 uint64_t zhca_spa_load_guid;
206 nvlist_t *zhca_holds;
207 } zfs_hold_cleanup_arg_t;
208
209 static void
210 dsl_dataset_user_release_onexit(void *arg)
211 {
212 zfs_hold_cleanup_arg_t *ca = arg;
213 spa_t *spa;
214 int error;
215
216 error = spa_open(ca->zhca_spaname, &spa, FTAG);
217 if (error != 0) {
218 zfs_dbgmsg("couldn't release holds on pool=%s "
219 "because pool is no longer loaded",
220 ca->zhca_spaname);
221 return;
222 }
223 if (spa_load_guid(spa) != ca->zhca_spa_load_guid) {
224 zfs_dbgmsg("couldn't release holds on pool=%s "
225 "because pool is no longer loaded (guid doesn't match)",
226 ca->zhca_spaname);
227 spa_close(spa, FTAG);
228 return;
229 }
230
231 (void) dsl_dataset_user_release_tmp(spa_get_dsl(spa), ca->zhca_holds);
232 fnvlist_free(ca->zhca_holds);
233 kmem_free(ca, sizeof (zfs_hold_cleanup_arg_t));
234 spa_close(spa, FTAG);
235 }
236
237 static void
238 dsl_onexit_hold_cleanup(spa_t *spa, nvlist_t *holds, minor_t minor)
239 {
240 zfs_hold_cleanup_arg_t *ca;
241
242 if (minor == 0 || nvlist_empty(holds)) {
243 fnvlist_free(holds);
244 return;
245 }
246
247 ASSERT(spa != NULL);
248 ca = kmem_alloc(sizeof (*ca), KM_SLEEP);
249
250 (void) strlcpy(ca->zhca_spaname, spa_name(spa),
251 sizeof (ca->zhca_spaname));
252 ca->zhca_spa_load_guid = spa_load_guid(spa);
253 ca->zhca_holds = holds;
254 VERIFY0(zfs_onexit_add_cb(minor,
255 dsl_dataset_user_release_onexit, ca, NULL));
256 }
257
258 void
259 dsl_dataset_user_hold_sync_one(dsl_dataset_t *ds, const char *htag,
260 minor_t minor, uint64_t now, dmu_tx_t *tx)
261 {
262 nvlist_t *tmpholds;
263
264 if (minor != 0)
265 tmpholds = fnvlist_alloc();
266 else
267 tmpholds = NULL;
268 dsl_dataset_user_hold_sync_one_impl(tmpholds, ds, htag, minor, now, tx);
269 dsl_onexit_hold_cleanup(dsl_dataset_get_spa(ds), tmpholds, minor);
270 }
271
272 static void
273 dsl_dataset_user_hold_sync(void *arg, dmu_tx_t *tx)
274 {
275 dsl_dataset_user_hold_arg_t *dduha = arg;
276 dsl_pool_t *dp = dmu_tx_pool(tx);
277 nvlist_t *tmpholds;
278 uint64_t now = gethrestime_sec();
279
280 if (dduha->dduha_minor != 0)
281 tmpholds = fnvlist_alloc();
282 else
283 tmpholds = NULL;
284 for (nvpair_t *pair = nvlist_next_nvpair(dduha->dduha_chkholds, NULL);
285 pair != NULL;
286 pair = nvlist_next_nvpair(dduha->dduha_chkholds, pair)) {
287 dsl_dataset_t *ds;
288
289 VERIFY0(dsl_dataset_hold(dp, nvpair_name(pair), FTAG, &ds));
290 dsl_dataset_user_hold_sync_one_impl(tmpholds, ds,
291 fnvpair_value_string(pair), dduha->dduha_minor, now, tx);
292 dsl_dataset_rele(ds, FTAG);
293 }
294 dsl_onexit_hold_cleanup(dp->dp_spa, tmpholds, dduha->dduha_minor);
295 }
296
297 /*
298 * The full semantics of this function are described in the comment above
299 * lzc_hold().
300 *
301 * To summarize:
302 * holds is nvl of snapname -> holdname
303 * errlist will be filled in with snapname -> error
304 *
305 * The snaphosts must all be in the same pool.
306 *
307 * Holds for snapshots that don't exist will be skipped.
308 *
309 * If none of the snapshots for requested holds exist then ENOENT will be
310 * returned.
311 *
312 * If cleanup_minor is not 0, the holds will be temporary, which will be cleaned
313 * up when the process exits.
314 *
315 * On success all the holds, for snapshots that existed, will be created and 0
316 * will be returned.
317 *
318 * On failure no holds will be created, the errlist will be filled in,
319 * and an errno will returned.
320 *
321 * In all cases the errlist will contain entries for holds where the snapshot
322 * didn't exist.
323 */
324 int
325 dsl_dataset_user_hold(nvlist_t *holds, minor_t cleanup_minor, nvlist_t *errlist)
326 {
327 dsl_dataset_user_hold_arg_t dduha;
328 nvpair_t *pair;
329 int ret;
330
331 pair = nvlist_next_nvpair(holds, NULL);
332 if (pair == NULL)
333 return (0);
334
335 dduha.dduha_holds = holds;
336 /* chkholds can have non-unique name */
337 VERIFY(0 == nvlist_alloc(&dduha.dduha_chkholds, 0, KM_SLEEP));
338 dduha.dduha_errlist = errlist;
339 dduha.dduha_minor = cleanup_minor;
340
341 ret = dsl_sync_task(nvpair_name(pair), dsl_dataset_user_hold_check,
342 dsl_dataset_user_hold_sync, &dduha,
343 fnvlist_num_pairs(holds), ZFS_SPACE_CHECK_RESERVED);
344 fnvlist_free(dduha.dduha_chkholds);
345
346 return (ret);
347 }
348
349 typedef int (dsl_holdfunc_t)(dsl_pool_t *dp, const char *name, void *tag,
350 dsl_dataset_t **dsp);
351
352 typedef struct dsl_dataset_user_release_arg {
353 dsl_holdfunc_t *ddura_holdfunc;
354 nvlist_t *ddura_holds;
355 nvlist_t *ddura_todelete;
356 nvlist_t *ddura_errlist;
357 nvlist_t *ddura_chkholds;
358 } dsl_dataset_user_release_arg_t;
359
360 /* Place a dataset hold on the snapshot identified by passed dsobj string */
361 static int
362 dsl_dataset_hold_obj_string(dsl_pool_t *dp, const char *dsobj, void *tag,
363 dsl_dataset_t **dsp)
364 {
365 return (dsl_dataset_hold_obj(dp, zfs_strtonum(dsobj, NULL), tag, dsp));
366 }
367
368 static int
369 dsl_dataset_user_release_check_one(dsl_dataset_user_release_arg_t *ddura,
370 dsl_dataset_t *ds, nvlist_t *holds, const char *snapname)
371 {
372 uint64_t zapobj;
373 nvlist_t *holds_found;
374 objset_t *mos;
375 int numholds;
376
377 if (!ds->ds_is_snapshot)
378 return (SET_ERROR(EINVAL));
379
380 if (nvlist_empty(holds))
381 return (0);
382
383 numholds = 0;
384 mos = ds->ds_dir->dd_pool->dp_meta_objset;
385 zapobj = dsl_dataset_phys(ds)->ds_userrefs_obj;
386 VERIFY0(nvlist_alloc(&holds_found, NV_UNIQUE_NAME, KM_SLEEP));
387
388 for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
389 pair = nvlist_next_nvpair(holds, pair)) {
390 uint64_t tmp;
391 int error;
392 const char *holdname = nvpair_name(pair);
393
394 if (zapobj != 0)
395 error = zap_lookup(mos, zapobj, holdname, 8, 1, &tmp);
396 else
397 error = SET_ERROR(ENOENT);
398
399 /*
400 * Non-existent holds are put on the errlist, but don't
401 * cause an overall failure.
402 */
403 if (error == ENOENT) {
404 if (ddura->ddura_errlist != NULL) {
405 char *errtag = kmem_asprintf("%s#%s",
406 snapname, holdname);
407 fnvlist_add_int32(ddura->ddura_errlist, errtag,
408 ENOENT);
409 strfree(errtag);
410 }
411 continue;
412 }
413
414 if (error != 0) {
415 fnvlist_free(holds_found);
416 return (error);
417 }
418
419 fnvlist_add_boolean(holds_found, holdname);
420 numholds++;
421 }
422
423 if (DS_IS_DEFER_DESTROY(ds) &&
424 dsl_dataset_phys(ds)->ds_num_children == 1 &&
425 ds->ds_userrefs == numholds) {
426 /* we need to destroy the snapshot as well */
427 if (dsl_dataset_long_held(ds)) {
428 fnvlist_free(holds_found);
429 return (SET_ERROR(EBUSY));
430 }
431 fnvlist_add_boolean(ddura->ddura_todelete, snapname);
432 }
433
434 if (numholds != 0) {
435 fnvlist_add_nvlist(ddura->ddura_chkholds, snapname,
436 holds_found);
437 }
438 fnvlist_free(holds_found);
439
440 return (0);
441 }
442
443 static int
444 dsl_dataset_user_release_check(void *arg, dmu_tx_t *tx)
445 {
446 dsl_dataset_user_release_arg_t *ddura;
447 dsl_holdfunc_t *holdfunc;
448 dsl_pool_t *dp;
449
450 if (!dmu_tx_is_syncing(tx))
451 return (0);
452
453 dp = dmu_tx_pool(tx);
454
455 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
456
457 ddura = arg;
458 holdfunc = ddura->ddura_holdfunc;
459
460 for (nvpair_t *pair = nvlist_next_nvpair(ddura->ddura_holds, NULL);
461 pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_holds, pair)) {
462 int error;
463 dsl_dataset_t *ds;
464 nvlist_t *holds;
465 const char *snapname = nvpair_name(pair);
466
467 error = nvpair_value_nvlist(pair, &holds);
468 if (error != 0)
469 error = (SET_ERROR(EINVAL));
470 else
471 error = holdfunc(dp, snapname, FTAG, &ds);
472 if (error == 0) {
473 error = dsl_dataset_user_release_check_one(ddura, ds,
474 holds, snapname);
475 dsl_dataset_rele(ds, FTAG);
476 }
477 if (error != 0) {
478 if (ddura->ddura_errlist != NULL) {
479 fnvlist_add_int32(ddura->ddura_errlist,
480 snapname, error);
481 }
482 /*
483 * Non-existent snapshots are put on the errlist,
484 * but don't cause an overall failure.
485 */
486 if (error != ENOENT)
487 return (error);
488 }
489 }
490
491 return (0);
492 }
493
494 static void
495 dsl_dataset_user_release_sync_one(dsl_dataset_t *ds, nvlist_t *holds,
496 dmu_tx_t *tx)
497 {
498 dsl_pool_t *dp = ds->ds_dir->dd_pool;
499 objset_t *mos = dp->dp_meta_objset;
500
501 for (nvpair_t *pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
502 pair = nvlist_next_nvpair(holds, pair)) {
503 int error;
504 const char *holdname = nvpair_name(pair);
505
506 /* Remove temporary hold if one exists. */
507 error = dsl_pool_user_release(dp, ds->ds_object, holdname, tx);
508 VERIFY(error == 0 || error == ENOENT);
509
510 VERIFY0(zap_remove(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
511 holdname, tx));
512 ds->ds_userrefs--;
513
514 spa_history_log_internal_ds(ds, "release", tx,
515 "tag=%s refs=%lld", holdname, (longlong_t)ds->ds_userrefs);
516 }
517 }
518
519 static void
520 dsl_dataset_user_release_sync(void *arg, dmu_tx_t *tx)
521 {
522 dsl_dataset_user_release_arg_t *ddura = arg;
523 dsl_holdfunc_t *holdfunc = ddura->ddura_holdfunc;
524 dsl_pool_t *dp = dmu_tx_pool(tx);
525
526 ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
527
528 for (nvpair_t *pair = nvlist_next_nvpair(ddura->ddura_chkholds, NULL);
529 pair != NULL; pair = nvlist_next_nvpair(ddura->ddura_chkholds,
530 pair)) {
531 dsl_dataset_t *ds;
532 const char *name = nvpair_name(pair);
533
534 VERIFY0(holdfunc(dp, name, FTAG, &ds));
535
536 dsl_dataset_user_release_sync_one(ds,
537 fnvpair_value_nvlist(pair), tx);
538 if (nvlist_exists(ddura->ddura_todelete, name)) {
539 ASSERT(ds->ds_userrefs == 0 &&
540 dsl_dataset_phys(ds)->ds_num_children == 1 &&
541 DS_IS_DEFER_DESTROY(ds));
542 dsl_destroy_snapshot_sync_impl(ds, B_FALSE, tx);
543 }
544 dsl_dataset_rele(ds, FTAG);
545 }
546 }
547
548 /*
549 * The full semantics of this function are described in the comment above
550 * lzc_release().
551 *
552 * To summarize:
553 * Releases holds specified in the nvl holds.
554 *
555 * holds is nvl of snapname -> { holdname, ... }
556 * errlist will be filled in with snapname -> error
557 *
558 * If tmpdp is not NULL the names for holds should be the dsobj's of snapshots,
559 * otherwise they should be the names of shapshots.
560 *
561 * As a release may cause snapshots to be destroyed this trys to ensure they
562 * aren't mounted.
563 *
564 * The release of non-existent holds are skipped.
565 *
566 * At least one hold must have been released for the this function to succeed
567 * and return 0.
568 */
569 static int
570 dsl_dataset_user_release_impl(nvlist_t *holds, nvlist_t *errlist,
571 dsl_pool_t *tmpdp)
572 {
573 dsl_dataset_user_release_arg_t ddura;
574 nvpair_t *pair;
575 char *pool;
576 int error;
577
578 pair = nvlist_next_nvpair(holds, NULL);
579 if (pair == NULL)
580 return (0);
581
582 /*
583 * The release may cause snapshots to be destroyed; make sure they
584 * are not mounted.
585 */
586 if (tmpdp != NULL) {
587 /* Temporary holds are specified by dsobj string. */
588 ddura.ddura_holdfunc = dsl_dataset_hold_obj_string;
589 pool = spa_name(tmpdp->dp_spa);
590 #ifdef _KERNEL
591 for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
592 pair = nvlist_next_nvpair(holds, pair)) {
593 dsl_dataset_t *ds;
594
595 dsl_pool_config_enter(tmpdp, FTAG);
596 error = dsl_dataset_hold_obj_string(tmpdp,
597 nvpair_name(pair), FTAG, &ds);
598 if (error == 0) {
599 char name[ZFS_MAX_DATASET_NAME_LEN];
600 dsl_dataset_name(ds, name);
601 dsl_pool_config_exit(tmpdp, FTAG);
602 dsl_dataset_rele(ds, FTAG);
603 (void) zfs_unmount_snap(name);
604 } else {
605 dsl_pool_config_exit(tmpdp, FTAG);
606 }
607 }
608 #endif
609 } else {
610 /* Non-temporary holds are specified by name. */
611 ddura.ddura_holdfunc = dsl_dataset_hold;
612 pool = nvpair_name(pair);
613 #ifdef _KERNEL
614 for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
615 pair = nvlist_next_nvpair(holds, pair)) {
616 (void) zfs_unmount_snap(nvpair_name(pair));
617 }
618 #endif
619 }
620
621 ddura.ddura_holds = holds;
622 ddura.ddura_errlist = errlist;
623 VERIFY0(nvlist_alloc(&ddura.ddura_todelete, NV_UNIQUE_NAME,
624 KM_SLEEP));
625 VERIFY0(nvlist_alloc(&ddura.ddura_chkholds, NV_UNIQUE_NAME,
626 KM_SLEEP));
627
628 error = dsl_sync_task(pool, dsl_dataset_user_release_check,
629 dsl_dataset_user_release_sync, &ddura, 0,
630 ZFS_SPACE_CHECK_EXTRA_RESERVED);
631 fnvlist_free(ddura.ddura_todelete);
632 fnvlist_free(ddura.ddura_chkholds);
633
634 return (error);
635 }
636
637 /*
638 * holds is nvl of snapname -> { holdname, ... }
639 * errlist will be filled in with snapname -> error
640 */
641 int
642 dsl_dataset_user_release(nvlist_t *holds, nvlist_t *errlist)
643 {
644 return (dsl_dataset_user_release_impl(holds, errlist, NULL));
645 }
646
647 /*
648 * holds is nvl of snapdsobj -> { holdname, ... }
649 */
650 void
651 dsl_dataset_user_release_tmp(struct dsl_pool *dp, nvlist_t *holds)
652 {
653 ASSERT(dp != NULL);
654 (void) dsl_dataset_user_release_impl(holds, NULL, dp);
655 }
656
657 int
658 dsl_dataset_get_holds(const char *dsname, nvlist_t *nvl)
659 {
660 dsl_pool_t *dp;
661 dsl_dataset_t *ds;
662 int err;
663
664 err = dsl_pool_hold(dsname, FTAG, &dp);
665 if (err != 0)
666 return (err);
667 err = dsl_dataset_hold(dp, dsname, FTAG, &ds);
668 if (err != 0) {
669 dsl_pool_rele(dp, FTAG);
670 return (err);
671 }
672
673 if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
674 zap_attribute_t *za;
675 zap_cursor_t zc;
676
677 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
678 for (zap_cursor_init(&zc, ds->ds_dir->dd_pool->dp_meta_objset,
679 dsl_dataset_phys(ds)->ds_userrefs_obj);
680 zap_cursor_retrieve(&zc, za) == 0;
681 zap_cursor_advance(&zc)) {
682 fnvlist_add_uint64(nvl, za->za_name,
683 za->za_first_integer);
684 }
685 zap_cursor_fini(&zc);
686 kmem_free(za, sizeof (zap_attribute_t));
687 }
688 dsl_dataset_rele(ds, FTAG);
689 dsl_pool_rele(dp, FTAG);
690 return (0);
691 }