]> git.proxmox.com Git - mirror_zfs.git/blob - lib/libzfs_core/libzfs_core.c
Fix indefinite article
[mirror_zfs.git] / lib / libzfs_core / libzfs_core.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 */
26
27 /*
28 * LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
29 * It has the following characteristics:
30 *
31 * - Thread Safe. libzfs_core is accessible concurrently from multiple
32 * threads. This is accomplished primarily by avoiding global data
33 * (e.g. caching). Since it's thread-safe, there is no reason for a
34 * process to have multiple libzfs "instances". Therefore, we store
35 * our few pieces of data (e.g. the file descriptor) in global
36 * variables. The fd is reference-counted so that the libzfs_core
37 * library can be "initialized" multiple times (e.g. by different
38 * consumers within the same process).
39 *
40 * - Committed Interface. The libzfs_core interface will be committed,
41 * therefore consumers can compile against it and be confident that
42 * their code will continue to work on future releases of this code.
43 * Currently, the interface is Evolving (not Committed), but we intend
44 * to commit to it once it is more complete and we determine that it
45 * meets the needs of all consumers.
46 *
47 * - Programmatic Error Handling. libzfs_core communicates errors with
48 * defined error numbers, and doesn't print anything to stdout/stderr.
49 *
50 * - Thin Layer. libzfs_core is a thin layer, marshaling arguments
51 * to/from the kernel ioctls. There is generally a 1:1 correspondence
52 * between libzfs_core functions and ioctls to /dev/zfs.
53 *
54 * - Clear Atomicity. Because libzfs_core functions are generally 1:1
55 * with kernel ioctls, and kernel ioctls are general atomic, each
56 * libzfs_core function is atomic. For example, creating multiple
57 * snapshots with a single call to lzc_snapshot() is atomic -- it
58 * can't fail with only some of the requested snapshots created, even
59 * in the event of power loss or system crash.
60 *
61 * - Continued libzfs Support. Some higher-level operations (e.g.
62 * support for "zfs send -R") are too complicated to fit the scope of
63 * libzfs_core. This functionality will continue to live in libzfs.
64 * Where appropriate, libzfs will use the underlying atomic operations
65 * of libzfs_core. For example, libzfs may implement "zfs send -R |
66 * zfs receive" by using individual "send one snapshot", rename,
67 * destroy, and "receive one snapshot" operations in libzfs_core.
68 * /sbin/zfs and /zbin/zpool will link with both libzfs and
69 * libzfs_core. Other consumers should aim to use only libzfs_core,
70 * since that will be the supported, stable interface going forwards.
71 */
72
73 #include <libzfs_core.h>
74 #include <ctype.h>
75 #include <unistd.h>
76 #include <stdlib.h>
77 #include <string.h>
78 #include <errno.h>
79 #include <fcntl.h>
80 #include <pthread.h>
81 #include <sys/nvpair.h>
82 #include <sys/param.h>
83 #include <sys/types.h>
84 #include <sys/stat.h>
85 #include <sys/zfs_ioctl.h>
86
87 static int g_fd;
88 static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
89 static int g_refcount;
90
91 int
92 libzfs_core_init(void)
93 {
94 (void) pthread_mutex_lock(&g_lock);
95 if (g_refcount == 0) {
96 g_fd = open("/dev/zfs", O_RDWR);
97 if (g_fd < 0) {
98 (void) pthread_mutex_unlock(&g_lock);
99 return (errno);
100 }
101 }
102 g_refcount++;
103 (void) pthread_mutex_unlock(&g_lock);
104 return (0);
105 }
106
107 void
108 libzfs_core_fini(void)
109 {
110 (void) pthread_mutex_lock(&g_lock);
111 ASSERT3S(g_refcount, >, 0);
112 g_refcount--;
113 if (g_refcount == 0)
114 (void) close(g_fd);
115 (void) pthread_mutex_unlock(&g_lock);
116 }
117
118 static int
119 lzc_ioctl(zfs_ioc_t ioc, const char *name,
120 nvlist_t *source, nvlist_t **resultp)
121 {
122 zfs_cmd_t zc = {"\0"};
123 int error = 0;
124 char *packed;
125 size_t size;
126
127 ASSERT3S(g_refcount, >, 0);
128
129 (void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
130
131 packed = fnvlist_pack(source, &size);
132 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
133 zc.zc_nvlist_src_size = size;
134
135 if (resultp != NULL) {
136 *resultp = NULL;
137 zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
138 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
139 malloc(zc.zc_nvlist_dst_size);
140 if (zc.zc_nvlist_dst == (uint64_t)0) {
141 error = ENOMEM;
142 goto out;
143 }
144 }
145
146 while (ioctl(g_fd, ioc, &zc) != 0) {
147 if (errno == ENOMEM && resultp != NULL) {
148 free((void *)(uintptr_t)zc.zc_nvlist_dst);
149 zc.zc_nvlist_dst_size *= 2;
150 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
151 malloc(zc.zc_nvlist_dst_size);
152 if (zc.zc_nvlist_dst == (uint64_t)0) {
153 error = ENOMEM;
154 goto out;
155 }
156 } else {
157 error = errno;
158 break;
159 }
160 }
161 if (zc.zc_nvlist_dst_filled) {
162 *resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
163 zc.zc_nvlist_dst_size);
164 }
165
166 out:
167 fnvlist_pack_free(packed, size);
168 free((void *)(uintptr_t)zc.zc_nvlist_dst);
169 return (error);
170 }
171
172 int
173 lzc_create(const char *fsname, dmu_objset_type_t type, nvlist_t *props)
174 {
175 int error;
176 nvlist_t *args = fnvlist_alloc();
177 fnvlist_add_int32(args, "type", type);
178 if (props != NULL)
179 fnvlist_add_nvlist(args, "props", props);
180 error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
181 nvlist_free(args);
182 return (error);
183 }
184
185 int
186 lzc_clone(const char *fsname, const char *origin,
187 nvlist_t *props)
188 {
189 int error;
190 nvlist_t *args = fnvlist_alloc();
191 fnvlist_add_string(args, "origin", origin);
192 if (props != NULL)
193 fnvlist_add_nvlist(args, "props", props);
194 error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
195 nvlist_free(args);
196 return (error);
197 }
198
199 /*
200 * Creates snapshots.
201 *
202 * The keys in the snaps nvlist are the snapshots to be created.
203 * They must all be in the same pool.
204 *
205 * The props nvlist is properties to set. Currently only user properties
206 * are supported. { user:prop_name -> string value }
207 *
208 * The returned results nvlist will have an entry for each snapshot that failed.
209 * The value will be the (int32) error code.
210 *
211 * The return value will be 0 if all snapshots were created, otherwise it will
212 * be the errno of a (unspecified) snapshot that failed.
213 */
214 int
215 lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
216 {
217 nvpair_t *elem;
218 nvlist_t *args;
219 int error;
220 char pool[ZFS_MAX_DATASET_NAME_LEN];
221
222 *errlist = NULL;
223
224 /* determine the pool name */
225 elem = nvlist_next_nvpair(snaps, NULL);
226 if (elem == NULL)
227 return (0);
228 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
229 pool[strcspn(pool, "/@")] = '\0';
230
231 args = fnvlist_alloc();
232 fnvlist_add_nvlist(args, "snaps", snaps);
233 if (props != NULL)
234 fnvlist_add_nvlist(args, "props", props);
235
236 error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
237 nvlist_free(args);
238
239 return (error);
240 }
241
242 /*
243 * Destroys snapshots.
244 *
245 * The keys in the snaps nvlist are the snapshots to be destroyed.
246 * They must all be in the same pool.
247 *
248 * Snapshots that do not exist will be silently ignored.
249 *
250 * If 'defer' is not set, and a snapshot has user holds or clones, the
251 * destroy operation will fail and none of the snapshots will be
252 * destroyed.
253 *
254 * If 'defer' is set, and a snapshot has user holds or clones, it will be
255 * marked for deferred destruction, and will be destroyed when the last hold
256 * or clone is removed/destroyed.
257 *
258 * The return value will be 0 if all snapshots were destroyed (or marked for
259 * later destruction if 'defer' is set) or didn't exist to begin with.
260 *
261 * Otherwise the return value will be the errno of a (unspecified) snapshot
262 * that failed, no snapshots will be destroyed, and the errlist will have an
263 * entry for each snapshot that failed. The value in the errlist will be
264 * the (int32) error code.
265 */
266 int
267 lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
268 {
269 nvpair_t *elem;
270 nvlist_t *args;
271 int error;
272 char pool[ZFS_MAX_DATASET_NAME_LEN];
273
274 /* determine the pool name */
275 elem = nvlist_next_nvpair(snaps, NULL);
276 if (elem == NULL)
277 return (0);
278 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
279 pool[strcspn(pool, "/@")] = '\0';
280
281 args = fnvlist_alloc();
282 fnvlist_add_nvlist(args, "snaps", snaps);
283 if (defer)
284 fnvlist_add_boolean(args, "defer");
285
286 error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
287 nvlist_free(args);
288
289 return (error);
290 }
291
292 int
293 lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
294 uint64_t *usedp)
295 {
296 nvlist_t *args;
297 nvlist_t *result;
298 int err;
299 char fs[ZFS_MAX_DATASET_NAME_LEN];
300 char *atp;
301
302 /* determine the fs name */
303 (void) strlcpy(fs, firstsnap, sizeof (fs));
304 atp = strchr(fs, '@');
305 if (atp == NULL)
306 return (EINVAL);
307 *atp = '\0';
308
309 args = fnvlist_alloc();
310 fnvlist_add_string(args, "firstsnap", firstsnap);
311
312 err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
313 nvlist_free(args);
314 if (err == 0)
315 *usedp = fnvlist_lookup_uint64(result, "used");
316 fnvlist_free(result);
317
318 return (err);
319 }
320
321 boolean_t
322 lzc_exists(const char *dataset)
323 {
324 /*
325 * The objset_stats ioctl is still legacy, so we need to construct our
326 * own zfs_cmd_t rather than using zfsc_ioctl().
327 */
328 zfs_cmd_t zc = {"\0"};
329
330 (void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
331 return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
332 }
333
334 /*
335 * Create "user holds" on snapshots. If there is a hold on a snapshot,
336 * the snapshot can not be destroyed. (However, it can be marked for deletion
337 * by lzc_destroy_snaps(defer=B_TRUE).)
338 *
339 * The keys in the nvlist are snapshot names.
340 * The snapshots must all be in the same pool.
341 * The value is the name of the hold (string type).
342 *
343 * If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
344 * In this case, when the cleanup_fd is closed (including on process
345 * termination), the holds will be released. If the system is shut down
346 * uncleanly, the holds will be released when the pool is next opened
347 * or imported.
348 *
349 * Holds for snapshots which don't exist will be skipped and have an entry
350 * added to errlist, but will not cause an overall failure.
351 *
352 * The return value will be 0 if all holds, for snapshots that existed,
353 * were successfully created.
354 *
355 * Otherwise the return value will be the errno of a (unspecified) hold that
356 * failed and no holds will be created.
357 *
358 * In all cases the errlist will have an entry for each hold that failed
359 * (name = snapshot), with its value being the error code (int32).
360 */
361 int
362 lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
363 {
364 char pool[ZFS_MAX_DATASET_NAME_LEN];
365 nvlist_t *args;
366 nvpair_t *elem;
367 int error;
368
369 /* determine the pool name */
370 elem = nvlist_next_nvpair(holds, NULL);
371 if (elem == NULL)
372 return (0);
373 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
374 pool[strcspn(pool, "/@")] = '\0';
375
376 args = fnvlist_alloc();
377 fnvlist_add_nvlist(args, "holds", holds);
378 if (cleanup_fd != -1)
379 fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
380
381 error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
382 nvlist_free(args);
383 return (error);
384 }
385
386 /*
387 * Release "user holds" on snapshots. If the snapshot has been marked for
388 * deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
389 * any clones, and all the user holds are removed, then the snapshot will be
390 * destroyed.
391 *
392 * The keys in the nvlist are snapshot names.
393 * The snapshots must all be in the same pool.
394 * The value is an nvlist whose keys are the holds to remove.
395 *
396 * Holds which failed to release because they didn't exist will have an entry
397 * added to errlist, but will not cause an overall failure.
398 *
399 * The return value will be 0 if the nvl holds was empty or all holds that
400 * existed, were successfully removed.
401 *
402 * Otherwise the return value will be the errno of a (unspecified) hold that
403 * failed to release and no holds will be released.
404 *
405 * In all cases the errlist will have an entry for each hold that failed to
406 * to release.
407 */
408 int
409 lzc_release(nvlist_t *holds, nvlist_t **errlist)
410 {
411 char pool[ZFS_MAX_DATASET_NAME_LEN];
412 nvpair_t *elem;
413
414 /* determine the pool name */
415 elem = nvlist_next_nvpair(holds, NULL);
416 if (elem == NULL)
417 return (0);
418 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
419 pool[strcspn(pool, "/@")] = '\0';
420
421 return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
422 }
423
424 /*
425 * Retrieve list of user holds on the specified snapshot.
426 *
427 * On success, *holdsp will be set to an nvlist which the caller must free.
428 * The keys are the names of the holds, and the value is the creation time
429 * of the hold (uint64) in seconds since the epoch.
430 */
431 int
432 lzc_get_holds(const char *snapname, nvlist_t **holdsp)
433 {
434 int error;
435 nvlist_t *innvl = fnvlist_alloc();
436 error = lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, innvl, holdsp);
437 fnvlist_free(innvl);
438 return (error);
439 }
440
441 /*
442 * Generate a zfs send stream for the specified snapshot and write it to
443 * the specified file descriptor.
444 *
445 * "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
446 *
447 * If "from" is NULL, a full (non-incremental) stream will be sent.
448 * If "from" is non-NULL, it must be the full name of a snapshot or
449 * bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
450 * "pool/fs#earlier_bmark"). If non-NULL, the specified snapshot or
451 * bookmark must represent an earlier point in the history of "snapname").
452 * It can be an earlier snapshot in the same filesystem or zvol as "snapname",
453 * or it can be the origin of "snapname"'s filesystem, or an earlier
454 * snapshot in the origin, etc.
455 *
456 * "fd" is the file descriptor to write the send stream to.
457 *
458 * If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
459 * to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
460 * records with drr_blksz > 128K.
461 *
462 * If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
463 * to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
464 * which the receiving system must support (as indicated by support
465 * for the "embedded_data" feature).
466 */
467 int
468 lzc_send(const char *snapname, const char *from, int fd,
469 enum lzc_send_flags flags)
470 {
471 return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
472 }
473
474 int
475 lzc_send_resume(const char *snapname, const char *from, int fd,
476 enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
477 {
478 nvlist_t *args;
479 int err;
480
481 args = fnvlist_alloc();
482 fnvlist_add_int32(args, "fd", fd);
483 if (from != NULL)
484 fnvlist_add_string(args, "fromsnap", from);
485 if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
486 fnvlist_add_boolean(args, "largeblockok");
487 if (flags & LZC_SEND_FLAG_EMBED_DATA)
488 fnvlist_add_boolean(args, "embedok");
489 if (resumeobj != 0 || resumeoff != 0) {
490 fnvlist_add_uint64(args, "resume_object", resumeobj);
491 fnvlist_add_uint64(args, "resume_offset", resumeoff);
492 }
493 err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
494 nvlist_free(args);
495 return (err);
496 }
497
498 /*
499 * "from" can be NULL, a snapshot, or a bookmark.
500 *
501 * If from is NULL, a full (non-incremental) stream will be estimated. This
502 * is calculated very efficiently.
503 *
504 * If from is a snapshot, lzc_send_space uses the deadlists attached to
505 * each snapshot to efficiently estimate the stream size.
506 *
507 * If from is a bookmark, the indirect blocks in the destination snapshot
508 * are traversed, looking for blocks with a birth time since the creation TXG of
509 * the snapshot this bookmark was created from. This will result in
510 * significantly more I/O and be less efficient than a send space estimation on
511 * an equivalent snapshot.
512 */
513 int
514 lzc_send_space(const char *snapname, const char *from, uint64_t *spacep)
515 {
516 nvlist_t *args;
517 nvlist_t *result;
518 int err;
519
520 args = fnvlist_alloc();
521 if (from != NULL)
522 fnvlist_add_string(args, "from", from);
523 err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
524 nvlist_free(args);
525 if (err == 0)
526 *spacep = fnvlist_lookup_uint64(result, "space");
527 nvlist_free(result);
528 return (err);
529 }
530
531 static int
532 recv_read(int fd, void *buf, int ilen)
533 {
534 char *cp = buf;
535 int rv;
536 int len = ilen;
537
538 do {
539 rv = read(fd, cp, len);
540 cp += rv;
541 len -= rv;
542 } while (rv > 0);
543
544 if (rv < 0 || len != 0)
545 return (EIO);
546
547 return (0);
548 }
549
550 /*
551 * Linux adds ZFS_IOC_RECV_NEW for resumable streams and preserves the legacy
552 * ZFS_IOC_RECV user/kernel interface. The new interface supports all stream
553 * options but is currently only used for resumable streams. This way updated
554 * user space utilities will interoperate with older kernel modules.
555 *
556 * Non-Linux OpenZFS platforms have opted to modify the legacy interface.
557 */
558 static int
559 recv_impl(const char *snapname, nvlist_t *props, const char *origin,
560 boolean_t force, boolean_t resumable, int input_fd,
561 const dmu_replay_record_t *begin_record, int cleanup_fd,
562 uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
563 nvlist_t **errors)
564 {
565 dmu_replay_record_t drr;
566 char fsname[MAXPATHLEN];
567 char *atp;
568 int error;
569
570 /* Set 'fsname' to the name of containing filesystem */
571 (void) strlcpy(fsname, snapname, sizeof (fsname));
572 atp = strchr(fsname, '@');
573 if (atp == NULL)
574 return (EINVAL);
575 *atp = '\0';
576
577 /* If the fs does not exist, try its parent. */
578 if (!lzc_exists(fsname)) {
579 char *slashp = strrchr(fsname, '/');
580 if (slashp == NULL)
581 return (ENOENT);
582 *slashp = '\0';
583 }
584
585 /*
586 * The begin_record is normally a non-byteswapped BEGIN record.
587 * For resumable streams it may be set to any non-byteswapped
588 * dmu_replay_record_t.
589 */
590 if (begin_record == NULL) {
591 error = recv_read(input_fd, &drr, sizeof (drr));
592 if (error != 0)
593 return (error);
594 } else {
595 drr = *begin_record;
596 }
597
598 if (resumable) {
599 nvlist_t *outnvl = NULL;
600 nvlist_t *innvl = fnvlist_alloc();
601
602 fnvlist_add_string(innvl, "snapname", snapname);
603
604 if (props != NULL)
605 fnvlist_add_nvlist(innvl, "props", props);
606
607 if (origin != NULL && strlen(origin))
608 fnvlist_add_string(innvl, "origin", origin);
609
610 fnvlist_add_byte_array(innvl, "begin_record",
611 (uchar_t *) &drr, sizeof (drr));
612
613 fnvlist_add_int32(innvl, "input_fd", input_fd);
614
615 if (force)
616 fnvlist_add_boolean(innvl, "force");
617
618 if (resumable)
619 fnvlist_add_boolean(innvl, "resumable");
620
621 if (cleanup_fd >= 0)
622 fnvlist_add_int32(innvl, "cleanup_fd", cleanup_fd);
623
624 if (action_handle != NULL)
625 fnvlist_add_uint64(innvl, "action_handle",
626 *action_handle);
627
628 error = lzc_ioctl(ZFS_IOC_RECV_NEW, fsname, innvl, &outnvl);
629
630 if (error == 0 && read_bytes != NULL)
631 error = nvlist_lookup_uint64(outnvl, "read_bytes",
632 read_bytes);
633
634 if (error == 0 && errflags != NULL)
635 error = nvlist_lookup_uint64(outnvl, "error_flags",
636 errflags);
637
638 if (error == 0 && action_handle != NULL)
639 error = nvlist_lookup_uint64(outnvl, "action_handle",
640 action_handle);
641
642 if (error == 0 && errors != NULL) {
643 nvlist_t *nvl;
644 error = nvlist_lookup_nvlist(outnvl, "errors", &nvl);
645 if (error == 0)
646 *errors = fnvlist_dup(nvl);
647 }
648
649 fnvlist_free(innvl);
650 fnvlist_free(outnvl);
651 } else {
652 zfs_cmd_t zc = {"\0"};
653 char *packed = NULL;
654 size_t size;
655
656 ASSERT3S(g_refcount, >, 0);
657
658 (void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_value));
659 (void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
660
661 if (props != NULL) {
662 packed = fnvlist_pack(props, &size);
663 zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
664 zc.zc_nvlist_src_size = size;
665 }
666
667 if (origin != NULL)
668 (void) strlcpy(zc.zc_string, origin,
669 sizeof (zc.zc_string));
670
671 ASSERT3S(drr.drr_type, ==, DRR_BEGIN);
672 zc.zc_begin_record = drr.drr_u.drr_begin;
673 zc.zc_guid = force;
674 zc.zc_cookie = input_fd;
675 zc.zc_cleanup_fd = -1;
676 zc.zc_action_handle = 0;
677
678 if (cleanup_fd >= 0)
679 zc.zc_cleanup_fd = cleanup_fd;
680
681 if (action_handle != NULL)
682 zc.zc_action_handle = *action_handle;
683
684 zc.zc_nvlist_dst_size = 128 * 1024;
685 zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
686 malloc(zc.zc_nvlist_dst_size);
687
688 error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
689 if (error != 0) {
690 error = errno;
691 } else {
692 if (read_bytes != NULL)
693 *read_bytes = zc.zc_cookie;
694
695 if (errflags != NULL)
696 *errflags = zc.zc_obj;
697
698 if (action_handle != NULL)
699 *action_handle = zc.zc_action_handle;
700
701 if (errors != NULL)
702 VERIFY0(nvlist_unpack(
703 (void *)(uintptr_t)zc.zc_nvlist_dst,
704 zc.zc_nvlist_dst_size, errors, KM_SLEEP));
705 }
706
707 if (packed != NULL)
708 fnvlist_pack_free(packed, size);
709 free((void *)(uintptr_t)zc.zc_nvlist_dst);
710 }
711
712 return (error);
713 }
714
715 /*
716 * The simplest receive case: receive from the specified fd, creating the
717 * specified snapshot. Apply the specified properties as "received" properties
718 * (which can be overridden by locally-set properties). If the stream is a
719 * clone, its origin snapshot must be specified by 'origin'. The 'force'
720 * flag will cause the target filesystem to be rolled back or destroyed if
721 * necessary to receive.
722 *
723 * Return 0 on success or an errno on failure.
724 *
725 * Note: this interface does not work on dedup'd streams
726 * (those with DMU_BACKUP_FEATURE_DEDUP).
727 */
728 int
729 lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
730 boolean_t force, int fd)
731 {
732 return (recv_impl(snapname, props, origin, force, B_FALSE, fd,
733 NULL, -1, NULL, NULL, NULL, NULL));
734 }
735
736 /*
737 * Like lzc_receive, but if the receive fails due to premature stream
738 * termination, the intermediate state will be preserved on disk. In this
739 * case, ECKSUM will be returned. The receive may subsequently be resumed
740 * with a resuming send stream generated by lzc_send_resume().
741 */
742 int
743 lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
744 boolean_t force, int fd)
745 {
746 return (recv_impl(snapname, props, origin, force, B_TRUE, fd,
747 NULL, -1, NULL, NULL, NULL, NULL));
748 }
749
750 /*
751 * Like lzc_receive, but allows the caller to read the begin record and then to
752 * pass it in. That could be useful if the caller wants to derive, for example,
753 * the snapname or the origin parameters based on the information contained in
754 * the begin record.
755 * The begin record must be in its original form as read from the stream,
756 * in other words, it should not be byteswapped.
757 *
758 * The 'resumable' parameter allows to obtain the same behavior as with
759 * lzc_receive_resumable.
760 */
761 int
762 lzc_receive_with_header(const char *snapname, nvlist_t *props,
763 const char *origin, boolean_t force, boolean_t resumable, int fd,
764 const dmu_replay_record_t *begin_record)
765 {
766 if (begin_record == NULL)
767 return (EINVAL);
768 return (recv_impl(snapname, props, origin, force, resumable, fd,
769 begin_record, -1, NULL, NULL, NULL, NULL));
770 }
771
772 /*
773 * Like lzc_receive, but allows the caller to pass all supported arguments
774 * and retrieve all values returned. The only additional input parameter
775 * is 'cleanup_fd' which is used to set a cleanup-on-exit file descriptor.
776 *
777 * The following parameters all provide return values. Several may be set
778 * in the failure case and will contain additional information.
779 *
780 * The 'read_bytes' value will be set to the total number of bytes read.
781 *
782 * The 'errflags' value will contain zprop_errflags_t flags which are
783 * used to describe any failures.
784 *
785 * The 'action_handle' is used to pass the handle for this guid/ds mapping.
786 * It should be set to zero on first call and will contain an updated handle
787 * on success, it should be passed in subsequent calls.
788 *
789 * The 'errors' nvlist contains an entry for each unapplied received
790 * property. Callers are responsible for freeing this nvlist.
791 */
792 int lzc_receive_one(const char *snapname, nvlist_t *props,
793 const char *origin, boolean_t force, boolean_t resumable, int input_fd,
794 const dmu_replay_record_t *begin_record, int cleanup_fd,
795 uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
796 nvlist_t **errors)
797 {
798 return (recv_impl(snapname, props, origin, force, resumable,
799 input_fd, begin_record, cleanup_fd, read_bytes, errflags,
800 action_handle, errors));
801 }
802
803 /*
804 * Roll back this filesystem or volume to its most recent snapshot.
805 * If snapnamebuf is not NULL, it will be filled in with the name
806 * of the most recent snapshot.
807 *
808 * Return 0 on success or an errno on failure.
809 */
810 int
811 lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
812 {
813 nvlist_t *args;
814 nvlist_t *result;
815 int err;
816
817 args = fnvlist_alloc();
818 err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
819 nvlist_free(args);
820 if (err == 0 && snapnamebuf != NULL) {
821 const char *snapname = fnvlist_lookup_string(result, "target");
822 (void) strlcpy(snapnamebuf, snapname, snapnamelen);
823 }
824 return (err);
825 }
826
827 /*
828 * Creates bookmarks.
829 *
830 * The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
831 * the name of the snapshot (e.g. "pool/fs@snap"). All the bookmarks and
832 * snapshots must be in the same pool.
833 *
834 * The returned results nvlist will have an entry for each bookmark that failed.
835 * The value will be the (int32) error code.
836 *
837 * The return value will be 0 if all bookmarks were created, otherwise it will
838 * be the errno of a (undetermined) bookmarks that failed.
839 */
840 int
841 lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
842 {
843 nvpair_t *elem;
844 int error;
845 char pool[ZFS_MAX_DATASET_NAME_LEN];
846
847 /* determine the pool name */
848 elem = nvlist_next_nvpair(bookmarks, NULL);
849 if (elem == NULL)
850 return (0);
851 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
852 pool[strcspn(pool, "/#")] = '\0';
853
854 error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
855
856 return (error);
857 }
858
859 /*
860 * Retrieve bookmarks.
861 *
862 * Retrieve the list of bookmarks for the given file system. The props
863 * parameter is an nvlist of property names (with no values) that will be
864 * returned for each bookmark.
865 *
866 * The following are valid properties on bookmarks, all of which are numbers
867 * (represented as uint64 in the nvlist)
868 *
869 * "guid" - globally unique identifier of the snapshot it refers to
870 * "createtxg" - txg when the snapshot it refers to was created
871 * "creation" - timestamp when the snapshot it refers to was created
872 *
873 * The format of the returned nvlist as follows:
874 * <short name of bookmark> -> {
875 * <name of property> -> {
876 * "value" -> uint64
877 * }
878 * }
879 */
880 int
881 lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
882 {
883 return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
884 }
885
886 /*
887 * Destroys bookmarks.
888 *
889 * The keys in the bmarks nvlist are the bookmarks to be destroyed.
890 * They must all be in the same pool. Bookmarks are specified as
891 * <fs>#<bmark>.
892 *
893 * Bookmarks that do not exist will be silently ignored.
894 *
895 * The return value will be 0 if all bookmarks that existed were destroyed.
896 *
897 * Otherwise the return value will be the errno of a (undetermined) bookmark
898 * that failed, no bookmarks will be destroyed, and the errlist will have an
899 * entry for each bookmarks that failed. The value in the errlist will be
900 * the (int32) error code.
901 */
902 int
903 lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
904 {
905 nvpair_t *elem;
906 int error;
907 char pool[ZFS_MAX_DATASET_NAME_LEN];
908
909 /* determine the pool name */
910 elem = nvlist_next_nvpair(bmarks, NULL);
911 if (elem == NULL)
912 return (0);
913 (void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
914 pool[strcspn(pool, "/#")] = '\0';
915
916 error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
917
918 return (error);
919 }