* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
+ * Copyright (c) 2019, Klara Inc.
+ * Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
+#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
+#include <sys/objlist.h>
+#ifdef _KERNEL
+#include <sys/zfs_vfsops.h>
+#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
-int zfs_send_corrupt_data = B_FALSE;
-int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
-int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
-/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
-int zfs_send_set_freerecords_bit = B_TRUE;
-
-static char *dmu_recv_tag = "dmu_recv_tag";
-const char *recv_clone_name = "%recv";
+static int zfs_send_corrupt_data = B_FALSE;
+/*
+ * This tunable controls the amount of data (measured in bytes) that will be
+ * prefetched by zfs send. If the main thread is blocking on reads that haven't
+ * completed, this variable might need to be increased. If instead the main
+ * thread is issuing new reads because the prefetches have fallen out of the
+ * cache, this may need to be decreased.
+ */
+static uint_t zfs_send_queue_length = SPA_MAXBLOCKSIZE;
+/*
+ * This tunable controls the length of the queues that zfs send worker threads
+ * use to communicate. If the send_main_thread is blocking on these queues,
+ * this variable may need to be increased. If there is a significant slowdown
+ * at the start of a send as these threads consume all the available IO
+ * resources, this variable may need to be decreased.
+ */
+static uint_t zfs_send_no_prefetch_queue_length = 1024 * 1024;
+/*
+ * These tunables control the fill fraction of the queues by zfs send. The fill
+ * fraction controls the frequency with which threads have to be cv_signaled.
+ * If a lot of cpu time is being spent on cv_signal, then these should be tuned
+ * down. If the queues empty before the signalled thread can catch up, then
+ * these should be tuned up.
+ */
+static uint_t zfs_send_queue_ff = 20;
+static uint_t zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
-unsigned long zfs_override_estimate_recordsize = 0;
+static uint_t zfs_override_estimate_recordsize = 0;
+
+/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
+static const boolean_t zfs_send_set_freerecords_bit = B_TRUE;
-#define BP_SPAN(datablkszsec, indblkshift, level) \
- (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
- (level) * (indblkshift - SPA_BLKPTRSHIFT)))
+/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
+static int zfs_send_unmodified_spill_blocks = B_TRUE;
-static void byteswap_record(dmu_replay_record_t *drr);
+static inline boolean_t
+overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
+{
+ uint64_t temp = a * b;
+ if (b != 0 && temp / b != a)
+ return (B_FALSE);
+ *c = temp;
+ return (B_TRUE);
+}
struct send_thread_arg {
bqueue_t q;
- dsl_dataset_t *ds; /* Dataset to traverse */
+ objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
+ uint64_t *num_blocks_visited;
+};
+
+struct redact_list_thread_arg {
+ boolean_t cancel;
+ bqueue_t q;
+ zbookmark_phys_t resume;
+ redaction_list_t *rl;
+ boolean_t mark_redact;
+ int error_code;
+ uint64_t *num_blocks_visited;
+};
+
+struct send_merge_thread_arg {
+ bqueue_t q;
+ objset_t *os;
+ struct redact_list_thread_arg *from_arg;
+ struct send_thread_arg *to_arg;
+ struct redact_list_thread_arg *redact_arg;
+ int error;
+ boolean_t cancel;
};
-struct send_block_record {
+struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
- blkptr_t bp;
- zbookmark_phys_t zb;
- uint8_t indblkshift;
- uint16_t datablkszsec;
+ uint64_t object;
+ uint64_t start_blkid;
+ uint64_t end_blkid;
bqueue_node_t ln;
+ enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
+ PREVIOUSLY_REDACTED} type;
+ union {
+ struct srd {
+ dmu_object_type_t obj_type;
+ uint32_t datablksz; // logical size
+ uint32_t datasz; // payload size
+ blkptr_t bp;
+ arc_buf_t *abuf;
+ abd_t *abd;
+ kmutex_t lock;
+ kcondvar_t cv;
+ boolean_t io_outstanding;
+ boolean_t io_compressed;
+ int io_err;
+ } data;
+ struct srh {
+ uint32_t datablksz;
+ } hole;
+ struct sro {
+ /*
+ * This is a pointer because embedding it in the
+ * struct causes these structures to be massively larger
+ * for all range types; this makes the code much less
+ * memory efficient.
+ */
+ dnode_phys_t *dnp;
+ blkptr_t bp;
+ } object;
+ struct srr {
+ uint32_t datablksz;
+ } redact;
+ struct sror {
+ blkptr_t bp;
+ } object_range;
+ } sru;
};
-typedef struct dump_bytes_io {
- dmu_sendarg_t *dbi_dsp;
- void *dbi_buf;
- int dbi_len;
-} dump_bytes_io_t;
+/*
+ * The list of data whose inclusion in a send stream can be pending from
+ * one call to backup_cb to another. Multiple calls to dump_free(),
+ * dump_freeobjects(), and dump_redact() can be aggregated into a single
+ * DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
+ */
+typedef enum {
+ PENDING_NONE,
+ PENDING_FREE,
+ PENDING_FREEOBJECTS,
+ PENDING_REDACT
+} dmu_pendop_t;
+
+typedef struct dmu_send_cookie {
+ dmu_replay_record_t *dsc_drr;
+ dmu_send_outparams_t *dsc_dso;
+ offset_t *dsc_off;
+ objset_t *dsc_os;
+ zio_cksum_t dsc_zc;
+ uint64_t dsc_toguid;
+ uint64_t dsc_fromtxg;
+ int dsc_err;
+ dmu_pendop_t dsc_pending_op;
+ uint64_t dsc_featureflags;
+ uint64_t dsc_last_data_object;
+ uint64_t dsc_last_data_offset;
+ uint64_t dsc_resume_object;
+ uint64_t dsc_resume_offset;
+ boolean_t dsc_sent_begin;
+ boolean_t dsc_sent_end;
+} dmu_send_cookie_t;
+
+static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
-dump_bytes_cb(void *arg)
-{
- dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
- dmu_sendarg_t *dsp = dbi->dbi_dsp;
- dsl_dataset_t *ds = dmu_objset_ds(dsp->dsa_os);
- ssize_t resid; /* have to get resid to get detailed errno */
-
- /*
- * The code does not rely on len being a multiple of 8. We keep
- * this assertion because of the corresponding assertion in
- * receive_read(). Keeping this assertion ensures that we do not
- * inadvertently break backwards compatibility (causing the assertion
- * in receive_read() to trigger on old software). Newer feature flags
- * (such as raw send) may break this assertion since they were
- * introduced after the requirement was made obsolete.
- */
-
- ASSERT(dbi->dbi_len % 8 == 0 ||
- (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
-
- dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
- (caddr_t)dbi->dbi_buf, dbi->dbi_len,
- 0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
-
- mutex_enter(&ds->ds_sendstream_lock);
- *dsp->dsa_off += dbi->dbi_len;
- mutex_exit(&ds->ds_sendstream_lock);
-}
-
-static int
-dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
+range_free(struct send_range *range)
{
- dump_bytes_io_t dbi;
-
- dbi.dbi_dsp = dsp;
- dbi.dbi_buf = buf;
- dbi.dbi_len = len;
-
-#if defined(HAVE_LARGE_STACKS)
- dump_bytes_cb(&dbi);
-#else
- /*
- * The vn_rdwr() call is performed in a taskq to ensure that there is
- * always enough stack space to write safely to the target filesystem.
- * The ZIO_TYPE_FREE threads are used because there can be a lot of
- * them and they are used in vdev_file.c for a similar purpose.
- */
- spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE,
- ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
-#endif /* HAVE_LARGE_STACKS */
-
- return (dsp->dsa_err);
+ if (range->type == OBJECT) {
+ size_t size = sizeof (dnode_phys_t) *
+ (range->sru.object.dnp->dn_extra_slots + 1);
+ kmem_free(range->sru.object.dnp, size);
+ } else if (range->type == DATA) {
+ mutex_enter(&range->sru.data.lock);
+ while (range->sru.data.io_outstanding)
+ cv_wait(&range->sru.data.cv, &range->sru.data.lock);
+ if (range->sru.data.abd != NULL)
+ abd_free(range->sru.data.abd);
+ if (range->sru.data.abuf != NULL) {
+ arc_buf_destroy(range->sru.data.abuf,
+ &range->sru.data.abuf);
+ }
+ mutex_exit(&range->sru.data.lock);
+
+ cv_destroy(&range->sru.data.cv);
+ mutex_destroy(&range->sru.data.lock);
+ }
+ kmem_free(range, sizeof (*range));
}
/*
* up to the start of the checksum itself.
*/
static int
-dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
+dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
+ dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
- (void) fletcher_4_incremental_native(dsp->dsa_drr,
+ (void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
- &dsp->dsa_zc);
- if (dsp->dsa_drr->drr_type == DRR_BEGIN) {
- dsp->dsa_sent_begin = B_TRUE;
+ &dscp->dsc_zc);
+ if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
+ dscp->dsc_sent_begin = B_TRUE;
} else {
- ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
+ ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
- dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
+ dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
- if (dsp->dsa_drr->drr_type == DRR_END) {
- dsp->dsa_sent_end = B_TRUE;
+ if (dscp->dsc_drr->drr_type == DRR_END) {
+ dscp->dsc_sent_end = B_TRUE;
}
- (void) fletcher_4_incremental_native(&dsp->dsa_drr->
+ (void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
- sizeof (zio_cksum_t), &dsp->dsa_zc);
- if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
+ sizeof (zio_cksum_t), &dscp->dsc_zc);
+ *dscp->dsc_off += sizeof (dmu_replay_record_t);
+ dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
+ sizeof (dmu_replay_record_t), dso->dso_arg);
+ if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
- (void) fletcher_4_incremental_native(payload, payload_len,
- &dsp->dsa_zc);
- if (dump_bytes(dsp, payload, payload_len) != 0)
+ *dscp->dsc_off += payload_len;
+ /*
+ * payload is null when dso_dryrun == B_TRUE (i.e. when we're
+ * doing a send size calculation)
+ */
+ if (payload != NULL) {
+ (void) fletcher_4_incremental_native(
+ payload, payload_len, &dscp->dsc_zc);
+ }
+
+ /*
+ * The code does not rely on this (len being a multiple of 8).
+ * We keep this assertion because of the corresponding assertion
+ * in receive_read(). Keeping this assertion ensures that we do
+ * not inadvertently break backwards compatibility (causing the
+ * assertion in receive_read() to trigger on old software).
+ *
+ * Raw sends cannot be received on old software, and so can
+ * bypass this assertion.
+ */
+
+ ASSERT((payload_len % 8 == 0) ||
+ (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
+
+ dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
+ payload_len, dso->dso_arg);
+ if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
* and freeobject records that were generated on the source.
*/
static int
-dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
+dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
- struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
+ struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* another way to assert that the one-record constraint is still
* satisfied.
*/
- ASSERT(object > dsp->dsa_last_data_object ||
- (object == dsp->dsa_last_data_object &&
- offset > dsp->dsa_last_data_offset));
+ ASSERT(object > dscp->dsc_last_data_object ||
+ (object == dscp->dsc_last_data_object &&
+ offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
- * aggregated with other DRR_FREEOBJECTS records.
+ * aggregated with other DRR_FREEOBJECTS records).
*/
- if (dsp->dsa_pending_op != PENDING_NONE &&
- dsp->dsa_pending_op != PENDING_FREE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE &&
+ dscp->dsc_pending_op != PENDING_FREE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
- if (dsp->dsa_pending_op == PENDING_FREE) {
- /*
- * There should never be a PENDING_FREE if length is
- * DMU_OBJECT_END (because dump_dnode is the only place where
- * this function is called with a DMU_OBJECT_END, and only after
- * flushing any pending record).
- */
- ASSERT(length != DMU_OBJECT_END);
+ if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
- if (offset + length < offset)
- drrf->drr_length = DMU_OBJECT_END;
+ if (offset + length < offset || length == UINT64_MAX)
+ drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_FREE;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
- drrf->drr_toguid = dsp->dsa_toguid;
+ drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
- dsp->dsa_pending_op = PENDING_FREE;
+ dscp->dsc_pending_op = PENDING_FREE;
+ }
+
+ return (0);
+}
+
+/*
+ * Fill in the drr_redact struct, or perform aggregation if the previous record
+ * is also a redaction record, and the two are adjacent.
+ */
+static int
+dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
+ uint64_t length)
+{
+ struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
+
+ /*
+ * If there is a pending op, but it's not PENDING_REDACT, push it out,
+ * since free block aggregation can only be done for blocks of the
+ * same type (i.e., DRR_REDACT records can only be aggregated with
+ * other DRR_REDACT records).
+ */
+ if (dscp->dsc_pending_op != PENDING_NONE &&
+ dscp->dsc_pending_op != PENDING_REDACT) {
+ if (dump_record(dscp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dscp->dsc_pending_op = PENDING_NONE;
}
+ if (dscp->dsc_pending_op == PENDING_REDACT) {
+ /*
+ * Check to see whether this redacted block can be aggregated
+ * with pending one.
+ */
+ if (drrr->drr_object == object && drrr->drr_offset +
+ drrr->drr_length == offset) {
+ drrr->drr_length += length;
+ return (0);
+ } else {
+ /* not a continuation. Push out pending record */
+ if (dump_record(dscp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dscp->dsc_pending_op = PENDING_NONE;
+ }
+ }
+ /* create a REDACT record and make it pending */
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_REDACT;
+ drrr->drr_object = object;
+ drrr->drr_offset = offset;
+ drrr->drr_length = length;
+ drrr->drr_toguid = dscp->dsc_toguid;
+ dscp->dsc_pending_op = PENDING_REDACT;
+
return (0);
}
static int
-dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type, uint64_t object,
- uint64_t offset, int lsize, int psize, const blkptr_t *bp, void *data)
+dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
+ uint64_t offset, int lsize, int psize, const blkptr_t *bp,
+ boolean_t io_compressed, void *data)
{
uint64_t payload_size;
- boolean_t raw = (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
- struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
+ boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
+ struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
- ASSERT(object > dsp->dsa_last_data_object ||
- (object == dsp->dsa_last_data_object &&
- offset > dsp->dsa_last_data_offset));
- dsp->dsa_last_data_object = object;
- dsp->dsa_last_data_offset = offset + lsize - 1;
+ ASSERT(object > dscp->dsc_last_data_object ||
+ (object == dscp->dsc_last_data_object &&
+ offset > dscp->dsc_last_data_offset));
+ dscp->dsc_last_data_object = object;
+ dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* the stream, since aggregation can't be done across operations
* of different types.
*/
- if (dsp->dsa_pending_op != PENDING_NONE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_WRITE;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
- drrw->drr_toguid = dsp->dsa_toguid;
+ drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
- if (raw || lsize != psize) {
+ boolean_t compressed =
+ (bp != NULL ? BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
+ io_compressed : lsize != psize);
+ if (raw || compressed) {
+ ASSERT(bp != NULL);
+ ASSERT(raw || dscp->dsc_featureflags &
+ DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
- ASSERT(dsp->dsa_featureflags &
+ ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
- * plaintext, so (like fletcher4-checkummed blocks) userland
+ * plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
- if (dump_record(dsp, data, payload_size) != 0)
+ if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
-dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
+dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
- &(dsp->dsa_drr->drr_u.drr_write_embedded);
+ &(dscp->dsc_drr->drr_u.drr_write_embedded);
- if (dsp->dsa_pending_op != PENDING_NONE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
- drrw->drr_toguid = dsp->dsa_toguid;
+ drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
- if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
+ uint32_t psize = drrw->drr_psize;
+ uint32_t rsize = P2ROUNDUP(psize, 8);
+
+ if (psize != rsize)
+ memset(buf + psize, 0, rsize - psize);
+
+ if (dump_record(dscp, buf, rsize) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
-dump_spill(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object, void *data)
+dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
+ void *data)
{
- struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
+ struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
- if (dsp->dsa_pending_op != PENDING_NONE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_SPILL;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
- drrs->drr_toguid = dsp->dsa_toguid;
+ drrs->drr_toguid = dscp->dsc_toguid;
+
+ /* See comment in dump_dnode() for full details */
+ if (zfs_send_unmodified_spill_blocks &&
+ (bp->blk_birth <= dscp->dsc_fromtxg)) {
+ drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
+ }
/* handle raw send fields */
- if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
+ if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
payload_size = drrs->drr_compressed_size;
}
- if (dump_record(dsp, data, payload_size) != 0)
+ if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
-dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
+dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
- struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
+ struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
- (DMU_META_DNODE(dsp->dsa_os)->dn_maxblkid + 1);
+ (DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* receiving side.
*/
if (maxobj > 0) {
- if (maxobj < firstobj)
+ if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
- * can only be aggregated with other DRR_FREEOBJECTS records.
+ * can only be aggregated with other DRR_FREEOBJECTS records).
*/
- if (dsp->dsa_pending_op != PENDING_NONE &&
- dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE &&
+ dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
- if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
+
+ if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
return (0);
} else {
/* can't be aggregated. Push out pending record */
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
- drrfo->drr_toguid = dsp->dsa_toguid;
+ drrfo->drr_toguid = dscp->dsc_toguid;
- dsp->dsa_pending_op = PENDING_FREEOBJECTS;
+ dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
-dump_dnode(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t object,
+dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
- struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
+ struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
- if (object < dsp->dsa_resume_object) {
+ if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
- ASSERT3U(dsp->dsa_resume_object - object, <,
+ ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
- return (dump_freeobjects(dsp, object, 1));
+ return (dump_freeobjects(dscp, object, 1));
- if (dsp->dsa_pending_op != PENDING_NONE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_OBJECT;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
- drro->drr_toguid = dsp->dsa_toguid;
+ drro->drr_toguid = dscp->dsc_toguid;
- if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
+ if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
- if ((dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW)) {
+ if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
* to send it.
*/
if (bonuslen != 0) {
+ if (drro->drr_bonuslen > DN_MAX_BONUS_LEN(dnp))
+ return (SET_ERROR(EINVAL));
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
- if (dump_record(dsp, DN_BONUS(dnp), bonuslen) != 0)
+ /*
+ * DRR_OBJECT_SPILL is set for every dnode which references a
+ * spill block. This allows the receiving pool to definitively
+ * determine when a spill block should be kept or freed.
+ */
+ if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
+ drro->drr_flags |= DRR_OBJECT_SPILL;
+
+ if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
- if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
+ if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
- if (dsp->dsa_err != 0)
+
+ /*
+ * Send DRR_SPILL records for unmodified spill blocks. This is useful
+ * because changing certain attributes of the object (e.g. blocksize)
+ * can cause old versions of ZFS to incorrectly remove a spill block.
+ * Including these records in the stream forces an up to date version
+ * to always be written ensuring they're never lost. Current versions
+ * of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
+ * ignore these unmodified spill blocks.
+ */
+ if (zfs_send_unmodified_spill_blocks &&
+ (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
+ (DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
+ struct send_range record;
+ blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
+
+ memset(&record, 0, sizeof (struct send_range));
+ record.type = DATA;
+ record.object = object;
+ record.eos_marker = B_FALSE;
+ record.start_blkid = DMU_SPILL_BLKID;
+ record.end_blkid = record.start_blkid + 1;
+ record.sru.data.bp = *bp;
+ record.sru.data.obj_type = dnp->dn_type;
+ record.sru.data.datablksz = BP_GET_LSIZE(bp);
+
+ if (do_dump(dscp, &record) != 0)
+ return (SET_ERROR(EINTR));
+ }
+
+ if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
+
return (0);
}
static int
-dump_object_range(dmu_sendarg_t *dsp, const blkptr_t *bp, uint64_t firstobj,
- uint64_t numslots)
+dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
+ uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
- &(dsp->dsa_drr->drr_u.drr_object_range);
+ &(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
- ASSERT(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_RAW);
+ ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
- if (dsp->dsa_pending_op != PENDING_NONE) {
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dscp->dsc_pending_op != PENDING_NONE) {
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
- dsp->dsa_pending_op = PENDING_NONE;
+ dscp->dsc_pending_op = PENDING_NONE;
}
- bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
- dsp->dsa_drr->drr_type = DRR_OBJECT_RANGE;
+ memset(dscp->dsc_drr, 0, sizeof (dmu_replay_record_t));
+ dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
- drror->drr_toguid = dsp->dsa_toguid;
+ drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
- if (dump_record(dsp, NULL, 0) != 0)
+ if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
-backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
+send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
- !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LZ4)))
+ !(featureflags & DMU_BACKUP_FEATURE_LZ4)))
+ return (B_FALSE);
+
+ /*
+ * If we have not set the ZSTD feature flag, we can't send ZSTD
+ * compressed embedded blocks, as the receiver may not support them.
+ */
+ if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
+ !(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
- if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
+ if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
}
/*
- * This is the callback function to traverse_dataset that acts as the worker
- * thread for dmu_send_impl.
+ * This function actually handles figuring out what kind of record needs to be
+ * dumped, and calling the appropriate helper function. In most cases,
+ * the data has already been read by send_reader_thread().
*/
-/*ARGSUSED*/
static int
-send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
- const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
+do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
- struct send_thread_arg *sta = arg;
- struct send_block_record *record;
- uint64_t record_size;
int err = 0;
+ switch (range->type) {
+ case OBJECT:
+ err = dump_dnode(dscp, &range->sru.object.bp, range->object,
+ range->sru.object.dnp);
+ return (err);
+ case OBJECT_RANGE: {
+ ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
+ if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
+ return (0);
+ }
+ uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
+ DNODE_SHIFT;
+ uint64_t firstobj = range->start_blkid * epb;
+ err = dump_object_range(dscp, &range->sru.object_range.bp,
+ firstobj, epb);
+ break;
+ }
+ case REDACT: {
+ struct srr *srrp = &range->sru.redact;
+ err = dump_redact(dscp, range->object, range->start_blkid *
+ srrp->datablksz, (range->end_blkid - range->start_blkid) *
+ srrp->datablksz);
+ return (err);
+ }
+ case DATA: {
+ struct srd *srdp = &range->sru.data;
+ blkptr_t *bp = &srdp->bp;
+ spa_t *spa =
+ dmu_objset_spa(dscp->dsc_os);
+
+ ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
+ ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
+ if (BP_GET_TYPE(bp) == DMU_OT_SA) {
+ arc_flags_t aflags = ARC_FLAG_WAIT;
+ zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
+
+ if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
+ ASSERT(BP_IS_PROTECTED(bp));
+ zioflags |= ZIO_FLAG_RAW;
+ }
- ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
- zb->zb_object >= sta->resume.zb_object);
- ASSERT3P(sta->ds, !=, NULL);
+ zbookmark_phys_t zb;
+ ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
+ zb.zb_objset = dmu_objset_id(dscp->dsc_os);
+ zb.zb_object = range->object;
+ zb.zb_level = 0;
+ zb.zb_blkid = range->start_blkid;
+
+ arc_buf_t *abuf = NULL;
+ if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
+ bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
+ zioflags, &aflags, &zb) != 0)
+ return (SET_ERROR(EIO));
- if (sta->cancel)
- return (SET_ERROR(EINTR));
+ err = dump_spill(dscp, bp, zb.zb_object,
+ (abuf == NULL ? NULL : abuf->b_data));
+ if (abuf != NULL)
+ arc_buf_destroy(abuf, &abuf);
+ return (err);
+ }
+ if (send_do_embed(bp, dscp->dsc_featureflags)) {
+ err = dump_write_embedded(dscp, range->object,
+ range->start_blkid * srdp->datablksz,
+ srdp->datablksz, bp);
+ return (err);
+ }
+ ASSERT(range->object > dscp->dsc_resume_object ||
+ (range->object == dscp->dsc_resume_object &&
+ range->start_blkid * srdp->datablksz >=
+ dscp->dsc_resume_offset));
+ /* it's a level-0 block of a regular object */
- if (bp == NULL) {
- ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
- return (0);
- } else if (zb->zb_level < 0) {
- return (0);
+ mutex_enter(&srdp->lock);
+ while (srdp->io_outstanding)
+ cv_wait(&srdp->cv, &srdp->lock);
+ err = srdp->io_err;
+ mutex_exit(&srdp->lock);
+
+ if (err != 0) {
+ if (zfs_send_corrupt_data &&
+ !dscp->dsc_dso->dso_dryrun) {
+ /*
+ * Send a block filled with 0x"zfs badd bloc"
+ */
+ srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
+ ARC_BUFC_DATA, srdp->datablksz);
+ uint64_t *ptr;
+ for (ptr = srdp->abuf->b_data;
+ (char *)ptr < (char *)srdp->abuf->b_data +
+ srdp->datablksz; ptr++)
+ *ptr = 0x2f5baddb10cULL;
+ } else {
+ return (SET_ERROR(EIO));
+ }
+ }
+
+ ASSERT(dscp->dsc_dso->dso_dryrun ||
+ srdp->abuf != NULL || srdp->abd != NULL);
+
+ uint64_t offset = range->start_blkid * srdp->datablksz;
+
+ char *data = NULL;
+ if (srdp->abd != NULL) {
+ data = abd_to_buf(srdp->abd);
+ ASSERT3P(srdp->abuf, ==, NULL);
+ } else if (srdp->abuf != NULL) {
+ data = srdp->abuf->b_data;
+ }
+
+ /*
+ * If we have large blocks stored on disk but the send flags
+ * don't allow us to send large blocks, we split the data from
+ * the arc buf into chunks.
+ */
+ if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
+ !(dscp->dsc_featureflags &
+ DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
+ while (srdp->datablksz > 0 && err == 0) {
+ int n = MIN(srdp->datablksz,
+ SPA_OLD_MAXBLOCKSIZE);
+ err = dmu_dump_write(dscp, srdp->obj_type,
+ range->object, offset, n, n, NULL, B_FALSE,
+ data);
+ offset += n;
+ /*
+ * When doing dry run, data==NULL is used as a
+ * sentinel value by
+ * dmu_dump_write()->dump_record().
+ */
+ if (data != NULL)
+ data += n;
+ srdp->datablksz -= n;
+ }
+ } else {
+ err = dmu_dump_write(dscp, srdp->obj_type,
+ range->object, offset,
+ srdp->datablksz, srdp->datasz, bp,
+ srdp->io_compressed, data);
+ }
+ return (err);
}
+ case HOLE: {
+ struct srh *srhp = &range->sru.hole;
+ if (range->object == DMU_META_DNODE_OBJECT) {
+ uint32_t span = srhp->datablksz >> DNODE_SHIFT;
+ uint64_t first_obj = range->start_blkid * span;
+ uint64_t numobj = range->end_blkid * span - first_obj;
+ return (dump_freeobjects(dscp, first_obj, numobj));
+ }
+ uint64_t offset = 0;
- record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
- record->eos_marker = B_FALSE;
- record->bp = *bp;
- record->zb = *zb;
- record->indblkshift = dnp->dn_indblkshift;
- record->datablkszsec = dnp->dn_datablkszsec;
- record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
- bqueue_enqueue(&sta->q, record, record_size);
+ /*
+ * If this multiply overflows, we don't need to send this block.
+ * Even if it has a birth time, it can never not be a hole, so
+ * we don't need to send records for it.
+ */
+ if (!overflow_multiply(range->start_blkid, srhp->datablksz,
+ &offset)) {
+ return (0);
+ }
+ uint64_t len = 0;
+ if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
+ len = UINT64_MAX;
+ len = len - offset;
+ return (dump_free(dscp, range->object, offset, len));
+ }
+ default:
+ panic("Invalid range type in do_dump: %d", range->type);
+ }
return (err);
}
-/*
- * This function kicks off the traverse_dataset. It also handles setting the
- * error code of the thread in case something goes wrong, and pushes the End of
- * Stream record when the traverse_dataset call has finished. If there is no
- * dataset to traverse, the thread immediately pushes End of Stream marker.
- */
-static void
-send_traverse_thread(void *arg)
+static struct send_range *
+range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
+ uint64_t end_blkid, boolean_t eos)
{
- struct send_thread_arg *st_arg = arg;
- int err;
- struct send_block_record *data;
- fstrans_cookie_t cookie = spl_fstrans_mark();
-
- if (st_arg->ds != NULL) {
- err = traverse_dataset_resume(st_arg->ds,
- st_arg->fromtxg, &st_arg->resume,
- st_arg->flags, send_cb, st_arg);
-
- if (err != EINTR)
- st_arg->error_code = err;
- }
- data = kmem_zalloc(sizeof (*data), KM_SLEEP);
- data->eos_marker = B_TRUE;
- bqueue_enqueue(&st_arg->q, data, 1);
- spl_fstrans_unmark(cookie);
- thread_exit();
+ struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
+ range->type = type;
+ range->object = object;
+ range->start_blkid = start_blkid;
+ range->end_blkid = end_blkid;
+ range->eos_marker = eos;
+ if (type == DATA) {
+ range->sru.data.abd = NULL;
+ range->sru.data.abuf = NULL;
+ mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
+ range->sru.data.io_outstanding = 0;
+ range->sru.data.io_err = 0;
+ range->sru.data.io_compressed = B_FALSE;
+ }
+ return (range);
}
/*
- * This function actually handles figuring out what kind of record needs to be
- * dumped, reading the data (which has hopefully been prefetched), and calling
- * the appropriate helper function.
+ * This is the callback function to traverse_dataset that acts as a worker
+ * thread for dmu_send_impl.
*/
static int
-do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
+send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
+ const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
- dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
- const blkptr_t *bp = &data->bp;
- const zbookmark_phys_t *zb = &data->zb;
- uint8_t indblkshift = data->indblkshift;
- uint16_t dblkszsec = data->datablkszsec;
- spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
- dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
- int err = 0;
-
- ASSERT3U(zb->zb_level, >=, 0);
+ (void) zilog;
+ struct send_thread_arg *sta = arg;
+ struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
- zb->zb_object >= dsa->dsa_resume_object);
+ zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
- if (dsa->dsa_os->os_encrypted &&
+ if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
- spa_log_error(spa, zb);
- zfs_panic_recover("unencrypted block in encrypted "
- "object set %llu", ds->ds_object);
+ spa_log_error(spa, zb, &bp->blk_birth);
return (SET_ERROR(EIO));
}
+ if (sta->cancel)
+ return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
- DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
+ DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
- } else if (BP_IS_HOLE(bp) &&
- zb->zb_object == DMU_META_DNODE_OBJECT) {
- uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
- uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
- err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
- } else if (BP_IS_HOLE(bp)) {
- uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
- uint64_t offset = zb->zb_blkid * span;
- /* Don't dump free records for offsets > DMU_OBJECT_END */
- if (zb->zb_blkid == 0 || span <= DMU_OBJECT_END / zb->zb_blkid)
- err = dump_free(dsa, zb->zb_object, offset, span);
- } else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
+ atomic_inc_64(sta->num_blocks_visited);
+
+ if (zb->zb_level == ZB_DNODE_LEVEL) {
+ if (zb->zb_object == DMU_META_DNODE_OBJECT)
+ return (0);
+ record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
+ record->sru.object.bp = *bp;
+ size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
+ record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
+ memcpy(record->sru.object.dnp, dnp, size);
+ bqueue_enqueue(&sta->q, record, sizeof (*record));
+ return (0);
+ }
+ if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
+ !BP_IS_HOLE(bp)) {
+ record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
+ zb->zb_blkid + 1, B_FALSE);
+ record->sru.object_range.bp = *bp;
+ bqueue_enqueue(&sta->q, record, sizeof (*record));
+ return (0);
+ }
+ if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
+ return (0);
+ if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
- } else if (type == DMU_OT_DNODE) {
- int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
- arc_flags_t aflags = ARC_FLAG_WAIT;
- arc_buf_t *abuf;
- enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
-
- if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
- ASSERT(BP_IS_ENCRYPTED(bp));
- ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
- zioflags |= ZIO_FLAG_RAW;
- }
- ASSERT0(zb->zb_level);
+ uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
+ uint64_t start;
- if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
- ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
- return (SET_ERROR(EIO));
+ /*
+ * If this multiply overflows, we don't need to send this block.
+ * Even if it has a birth time, it can never not be a hole, so
+ * we don't need to send records for it.
+ */
+ if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
+ DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
+ span * zb->zb_blkid > dnp->dn_maxblkid)) {
+ ASSERT(BP_IS_HOLE(bp));
+ return (0);
+ }
- dnode_phys_t *blk = abuf->b_data;
- uint64_t dnobj = zb->zb_blkid * epb;
+ if (zb->zb_blkid == DMU_SPILL_BLKID)
+ ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
- /*
- * Raw sends require sending encryption parameters for the
- * block of dnodes. Regular sends do not need to send this
- * info.
- */
- if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
- ASSERT(arc_is_encrypted(abuf));
- err = dump_object_range(dsa, bp, dnobj, epb);
- }
+ enum type record_type = DATA;
+ if (BP_IS_HOLE(bp))
+ record_type = HOLE;
+ else if (BP_IS_REDACTED(bp))
+ record_type = REDACT;
+ else
+ record_type = DATA;
- if (err == 0) {
- for (int i = 0; i < epb;
- i += blk[i].dn_extra_slots + 1) {
- err = dump_dnode(dsa, bp, dnobj + i, blk + i);
- if (err != 0)
- break;
- }
- }
- arc_buf_destroy(abuf, &abuf);
- } else if (type == DMU_OT_SA) {
- arc_flags_t aflags = ARC_FLAG_WAIT;
- arc_buf_t *abuf;
- enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
+ record = range_alloc(record_type, zb->zb_object, start,
+ (start + span < start ? 0 : start + span), B_FALSE);
- if (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) {
- ASSERT(BP_IS_PROTECTED(bp));
- zioflags |= ZIO_FLAG_RAW;
- }
+ uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
+ BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
- if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
- ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0)
- return (SET_ERROR(EIO));
-
- err = dump_spill(dsa, bp, zb->zb_object, abuf->b_data);
- arc_buf_destroy(abuf, &abuf);
- } else if (backup_do_embed(dsa, bp)) {
- /* it's an embedded level-0 block of a regular object */
- int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
- ASSERT0(zb->zb_level);
- err = dump_write_embedded(dsa, zb->zb_object,
- zb->zb_blkid * blksz, blksz, bp);
+ if (BP_IS_HOLE(bp)) {
+ record->sru.hole.datablksz = datablksz;
+ } else if (BP_IS_REDACTED(bp)) {
+ record->sru.redact.datablksz = datablksz;
} else {
- /* it's a level-0 block of a regular object */
- arc_flags_t aflags = ARC_FLAG_WAIT;
- arc_buf_t *abuf;
- int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
- uint64_t offset;
-
- /*
- * If we have large blocks stored on disk but the send flags
- * don't allow us to send large blocks, we split the data from
- * the arc buf into chunks.
- */
- boolean_t split_large_blocks = blksz > SPA_OLD_MAXBLOCKSIZE &&
- !(dsa->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
+ record->sru.data.datablksz = datablksz;
+ record->sru.data.obj_type = dnp->dn_type;
+ record->sru.data.bp = *bp;
+ }
- /*
- * Raw sends require that we always get raw data as it exists
- * on disk, so we assert that we are not splitting blocks here.
- */
- boolean_t request_raw =
- (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
+ bqueue_enqueue(&sta->q, record, sizeof (*record));
+ return (0);
+}
- /*
- * We should only request compressed data from the ARC if all
- * the following are true:
- * - stream compression was requested
- * - we aren't splitting large blocks into smaller chunks
- * - the data won't need to be byteswapped before sending
- * - this isn't an embedded block
- * - this isn't metadata (if receiving on a different endian
- * system it can be byteswapped more easily)
- */
- boolean_t request_compressed =
- (dsa->dsa_featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
- !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
- !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
-
- IMPLY(request_raw, !split_large_blocks);
- IMPLY(request_raw, BP_IS_PROTECTED(bp));
- ASSERT0(zb->zb_level);
- ASSERT(zb->zb_object > dsa->dsa_resume_object ||
- (zb->zb_object == dsa->dsa_resume_object &&
- zb->zb_blkid * blksz >= dsa->dsa_resume_offset));
-
- ASSERT3U(blksz, ==, BP_GET_LSIZE(bp));
-
- enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
- if (request_raw)
- zioflags |= ZIO_FLAG_RAW;
- else if (request_compressed)
- zioflags |= ZIO_FLAG_RAW_COMPRESS;
-
- if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
- ZIO_PRIORITY_ASYNC_READ, zioflags, &aflags, zb) != 0) {
- if (zfs_send_corrupt_data) {
- /* Send a block filled with 0x"zfs badd bloc" */
- abuf = arc_alloc_buf(spa, &abuf, ARC_BUFC_DATA,
- blksz);
- uint64_t *ptr;
- for (ptr = abuf->b_data;
- (char *)ptr < (char *)abuf->b_data + blksz;
- ptr++)
- *ptr = 0x2f5baddb10cULL;
- } else {
- return (SET_ERROR(EIO));
- }
- }
+struct redact_list_cb_arg {
+ uint64_t *num_blocks_visited;
+ bqueue_t *q;
+ boolean_t *cancel;
+ boolean_t mark_redact;
+};
- offset = zb->zb_blkid * blksz;
-
- if (split_large_blocks) {
- ASSERT0(arc_is_encrypted(abuf));
- ASSERT3U(arc_get_compression(abuf), ==,
- ZIO_COMPRESS_OFF);
- char *buf = abuf->b_data;
- while (blksz > 0 && err == 0) {
- int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
- err = dump_write(dsa, type, zb->zb_object,
- offset, n, n, NULL, buf);
- offset += n;
- buf += n;
- blksz -= n;
- }
- } else {
- err = dump_write(dsa, type, zb->zb_object, offset,
- blksz, arc_buf_size(abuf), bp, abuf->b_data);
- }
- arc_buf_destroy(abuf, &abuf);
+static int
+redact_list_cb(redact_block_phys_t *rb, void *arg)
+{
+ struct redact_list_cb_arg *rlcap = arg;
+
+ atomic_inc_64(rlcap->num_blocks_visited);
+ if (*rlcap->cancel)
+ return (-1);
+
+ struct send_range *data = range_alloc(REDACT, rb->rbp_object,
+ rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
+ ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
+ if (rlcap->mark_redact) {
+ data->type = REDACT;
+ data->sru.redact.datablksz = redact_block_get_size(rb);
+ } else {
+ data->type = PREVIOUSLY_REDACTED;
}
+ bqueue_enqueue(rlcap->q, data, sizeof (*data));
- ASSERT(err == 0 || err == EINTR);
- return (err);
+ return (0);
}
/*
- * Pop the new data off the queue, and free the old data.
+ * This function kicks off the traverse_dataset. It also handles setting the
+ * error code of the thread in case something goes wrong, and pushes the End of
+ * Stream record when the traverse_dataset call has finished.
*/
-static struct send_block_record *
-get_next_record(bqueue_t *bq, struct send_block_record *data)
+static __attribute__((noreturn)) void
+send_traverse_thread(void *arg)
{
- struct send_block_record *tmp = bqueue_dequeue(bq);
- kmem_free(data, sizeof (*data));
- return (tmp);
+ struct send_thread_arg *st_arg = arg;
+ int err = 0;
+ struct send_range *data;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
+
+ err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
+ st_arg->fromtxg, &st_arg->resume,
+ st_arg->flags, send_cb, st_arg);
+
+ if (err != EINTR)
+ st_arg->error_code = err;
+ data = range_alloc(DATA, 0, 0, 0, B_TRUE);
+ bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
+ spl_fstrans_unmark(cookie);
+ thread_exit();
}
/*
- * Actually do the bulk of the work in a zfs send.
- *
- * Note: Releases dp using the specified tag.
+ * Utility function that causes End of Stream records to compare after of all
+ * others, so that other threads' comparison logic can stay simple.
*/
-static int
-dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
- zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone,
- boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
- boolean_t rawok, int outfd, uint64_t resumeobj, uint64_t resumeoff,
- vnode_t *vp, offset_t *off)
+static int __attribute__((unused))
+send_range_after(const struct send_range *from, const struct send_range *to)
{
- objset_t *os;
- dmu_replay_record_t *drr;
- dmu_sendarg_t *dsp;
- int err;
- uint64_t fromtxg = 0;
- uint64_t featureflags = 0;
- struct send_thread_arg to_arg;
- void *payload = NULL;
- size_t payload_len = 0;
- struct send_block_record *to_data;
+ if (from->eos_marker == B_TRUE)
+ return (1);
+ if (to->eos_marker == B_TRUE)
+ return (-1);
+
+ uint64_t from_obj = from->object;
+ uint64_t from_end_obj = from->object + 1;
+ uint64_t to_obj = to->object;
+ uint64_t to_end_obj = to->object + 1;
+ if (from_obj == 0) {
+ ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
+ from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
+ from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
+ }
+ if (to_obj == 0) {
+ ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
+ to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
+ to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
+ }
+
+ if (from_end_obj <= to_obj)
+ return (-1);
+ if (from_obj >= to_end_obj)
+ return (1);
+ int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
+ OBJECT_RANGE);
+ if (unlikely(cmp))
+ return (cmp);
+ cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
+ if (unlikely(cmp))
+ return (cmp);
+ if (from->end_blkid <= to->start_blkid)
+ return (-1);
+ if (from->start_blkid >= to->end_blkid)
+ return (1);
+ return (0);
+}
- err = dmu_objset_from_ds(to_ds, &os);
- if (err != 0) {
- dsl_pool_rele(dp, tag);
- return (err);
- }
-
- /*
- * If this is a non-raw send of an encrypted ds, we can ensure that
- * the objset_phys_t is authenticated. This is safe because this is
- * either a snapshot or we have owned the dataset, ensuring that
- * it can't be modified.
- */
- if (!rawok && os->os_encrypted &&
- arc_is_unauthenticated(os->os_phys_buf)) {
- zbookmark_phys_t zb;
-
- SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
- ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
- err = arc_untransform(os->os_phys_buf, os->os_spa,
- &zb, B_FALSE);
- if (err != 0) {
- dsl_pool_rele(dp, tag);
- return (err);
- }
-
- ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
- }
-
- drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
- drr->drr_type = DRR_BEGIN;
- drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
- DMU_SET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo,
- DMU_SUBSTREAM);
-
- bzero(&to_arg, sizeof (to_arg));
-
-#ifdef _KERNEL
- if (dmu_objset_type(os) == DMU_OST_ZFS) {
- uint64_t version;
- if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
- kmem_free(drr, sizeof (dmu_replay_record_t));
- dsl_pool_rele(dp, tag);
- return (SET_ERROR(EINVAL));
- }
- if (version >= ZPL_VERSION_SA) {
- featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
- }
- }
-#endif
-
- /* raw sends imply large_block_ok */
- if ((large_block_ok || rawok) &&
- to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
- featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
- if (to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_DNODE])
- featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
-
- /* encrypted datasets will not have embedded blocks */
- if ((embedok || rawok) && !os->os_encrypted &&
- spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
- featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
- }
-
- /* raw send implies compressok */
- if (compressok || rawok)
- featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
- if (rawok && os->os_encrypted)
- featureflags |= DMU_BACKUP_FEATURE_RAW;
-
- if ((featureflags &
- (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
- DMU_BACKUP_FEATURE_RAW)) != 0 &&
- spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
- featureflags |= DMU_BACKUP_FEATURE_LZ4;
- }
-
- if (resumeobj != 0 || resumeoff != 0) {
- featureflags |= DMU_BACKUP_FEATURE_RESUMING;
- }
-
- DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
- featureflags);
-
- drr->drr_u.drr_begin.drr_creation_time =
- dsl_dataset_phys(to_ds)->ds_creation_time;
- drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
- if (is_clone)
- drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
- drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
- if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
- drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
- if (zfs_send_set_freerecords_bit)
- drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_FREERECORDS;
-
- if (ancestor_zb != NULL) {
- drr->drr_u.drr_begin.drr_fromguid =
- ancestor_zb->zbm_guid;
- fromtxg = ancestor_zb->zbm_creation_txg;
- }
- dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
- if (!to_ds->ds_is_snapshot) {
- (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
- sizeof (drr->drr_u.drr_begin.drr_toname));
- }
-
- dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
-
- dsp->dsa_drr = drr;
- dsp->dsa_vp = vp;
- dsp->dsa_outfd = outfd;
- dsp->dsa_proc = curproc;
- dsp->dsa_os = os;
- dsp->dsa_off = off;
- dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
- dsp->dsa_pending_op = PENDING_NONE;
- dsp->dsa_featureflags = featureflags;
- dsp->dsa_resume_object = resumeobj;
- dsp->dsa_resume_offset = resumeoff;
-
- mutex_enter(&to_ds->ds_sendstream_lock);
- list_insert_head(&to_ds->ds_sendstreams, dsp);
- mutex_exit(&to_ds->ds_sendstream_lock);
-
- dsl_dataset_long_hold(to_ds, FTAG);
- dsl_pool_rele(dp, tag);
-
- /* handle features that require a DRR_BEGIN payload */
- if (featureflags &
- (DMU_BACKUP_FEATURE_RESUMING | DMU_BACKUP_FEATURE_RAW)) {
- nvlist_t *keynvl = NULL;
- nvlist_t *nvl = fnvlist_alloc();
-
- if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
- dmu_object_info_t to_doi;
- err = dmu_object_info(os, resumeobj, &to_doi);
- if (err != 0) {
- fnvlist_free(nvl);
- goto out;
- }
-
- SET_BOOKMARK(&to_arg.resume, to_ds->ds_object,
- resumeobj, 0,
- resumeoff / to_doi.doi_data_block_size);
-
- fnvlist_add_uint64(nvl, "resume_object", resumeobj);
- fnvlist_add_uint64(nvl, "resume_offset", resumeoff);
- }
-
- if (featureflags & DMU_BACKUP_FEATURE_RAW) {
- ASSERT(os->os_encrypted);
-
- err = dsl_crypto_populate_key_nvlist(to_ds, &keynvl);
- if (err != 0) {
- fnvlist_free(nvl);
- goto out;
- }
-
- fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
- }
-
- payload = fnvlist_pack(nvl, &payload_len);
- drr->drr_payloadlen = payload_len;
- fnvlist_free(keynvl);
- fnvlist_free(nvl);
- }
-
- err = dump_record(dsp, payload, payload_len);
- fnvlist_pack_free(payload, payload_len);
- if (err != 0) {
- err = dsp->dsa_err;
- goto out;
- }
-
- err = bqueue_init(&to_arg.q,
- MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
- offsetof(struct send_block_record, ln));
- to_arg.error_code = 0;
- to_arg.cancel = B_FALSE;
- to_arg.ds = to_ds;
- to_arg.fromtxg = fromtxg;
- to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
- if (rawok)
- to_arg.flags |= TRAVERSE_NO_DECRYPT;
- (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
- TS_RUN, minclsyspri);
-
- to_data = bqueue_dequeue(&to_arg.q);
-
- while (!to_data->eos_marker && err == 0) {
- err = do_dump(dsp, to_data);
- to_data = get_next_record(&to_arg.q, to_data);
- if (issig(JUSTLOOKING) && issig(FORREAL))
- err = EINTR;
- }
-
- if (err != 0) {
- to_arg.cancel = B_TRUE;
- while (!to_data->eos_marker) {
- to_data = get_next_record(&to_arg.q, to_data);
- }
- }
- kmem_free(to_data, sizeof (*to_data));
-
- bqueue_destroy(&to_arg.q);
-
- if (err == 0 && to_arg.error_code != 0)
- err = to_arg.error_code;
-
- if (err != 0)
- goto out;
-
- if (dsp->dsa_pending_op != PENDING_NONE)
- if (dump_record(dsp, NULL, 0) != 0)
- err = SET_ERROR(EINTR);
-
- if (err != 0) {
- if (err == EINTR && dsp->dsa_err != 0)
- err = dsp->dsa_err;
- goto out;
- }
-
- bzero(drr, sizeof (dmu_replay_record_t));
- drr->drr_type = DRR_END;
- drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
- drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
-
- if (dump_record(dsp, NULL, 0) != 0)
- err = dsp->dsa_err;
-out:
- mutex_enter(&to_ds->ds_sendstream_lock);
- list_remove(&to_ds->ds_sendstreams, dsp);
- mutex_exit(&to_ds->ds_sendstream_lock);
-
- VERIFY(err != 0 || (dsp->dsa_sent_begin && dsp->dsa_sent_end));
-
- kmem_free(drr, sizeof (dmu_replay_record_t));
- kmem_free(dsp, sizeof (dmu_sendarg_t));
-
- dsl_dataset_long_rele(to_ds, FTAG);
-
- return (err);
-}
-
-int
-dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
- boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
- boolean_t rawok, int outfd, vnode_t *vp, offset_t *off)
-{
- dsl_pool_t *dp;
- dsl_dataset_t *ds;
- dsl_dataset_t *fromds = NULL;
- ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
- int err;
-
- err = dsl_pool_hold(pool, FTAG, &dp);
- if (err != 0)
- return (err);
-
- err = dsl_dataset_hold_obj_flags(dp, tosnap, dsflags, FTAG, &ds);
- if (err != 0) {
- dsl_pool_rele(dp, FTAG);
- return (err);
- }
-
- if (fromsnap != 0) {
- zfs_bookmark_phys_t zb;
- boolean_t is_clone;
-
- err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
- if (err != 0) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- dsl_pool_rele(dp, FTAG);
- return (err);
- }
- if (!dsl_dataset_is_before(ds, fromds, 0))
- err = SET_ERROR(EXDEV);
- zb.zbm_creation_time =
- dsl_dataset_phys(fromds)->ds_creation_time;
- zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
- zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
- is_clone = (fromds->ds_dir != ds->ds_dir);
- dsl_dataset_rele(fromds, FTAG);
- err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
- embedok, large_block_ok, compressok, rawok, outfd,
- 0, 0, vp, off);
- } else {
- err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
- embedok, large_block_ok, compressok, rawok, outfd,
- 0, 0, vp, off);
- }
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (err);
-}
-
-int
-dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
- boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
- int outfd, uint64_t resumeobj, uint64_t resumeoff, vnode_t *vp,
- offset_t *off)
-{
- dsl_pool_t *dp;
- dsl_dataset_t *ds;
- int err;
- ds_hold_flags_t dsflags = (rawok) ? 0 : DS_HOLD_FLAG_DECRYPT;
- boolean_t owned = B_FALSE;
-
- if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
- return (SET_ERROR(EINVAL));
-
- err = dsl_pool_hold(tosnap, FTAG, &dp);
- if (err != 0)
- return (err);
-
- if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
- /*
- * We are sending a filesystem or volume. Ensure
- * that it doesn't change by owning the dataset.
- */
- err = dsl_dataset_own(dp, tosnap, dsflags, FTAG, &ds);
- owned = B_TRUE;
- } else {
- err = dsl_dataset_hold_flags(dp, tosnap, dsflags, FTAG, &ds);
- }
- if (err != 0) {
- dsl_pool_rele(dp, FTAG);
- return (err);
- }
-
- if (fromsnap != NULL) {
- zfs_bookmark_phys_t zb;
- boolean_t is_clone = B_FALSE;
- int fsnamelen = strchr(tosnap, '@') - tosnap;
-
- /*
- * If the fromsnap is in a different filesystem, then
- * mark the send stream as a clone.
- */
- if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
- (fromsnap[fsnamelen] != '@' &&
- fromsnap[fsnamelen] != '#')) {
- is_clone = B_TRUE;
- }
-
- if (strchr(fromsnap, '@')) {
- dsl_dataset_t *fromds;
- err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
- if (err == 0) {
- if (!dsl_dataset_is_before(ds, fromds, 0))
- err = SET_ERROR(EXDEV);
- zb.zbm_creation_time =
- dsl_dataset_phys(fromds)->ds_creation_time;
- zb.zbm_creation_txg =
- dsl_dataset_phys(fromds)->ds_creation_txg;
- zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
- is_clone = (ds->ds_dir != fromds->ds_dir);
- dsl_dataset_rele(fromds, FTAG);
- }
- } else {
- err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
- }
- if (err != 0) {
- if (owned)
- dsl_dataset_disown(ds, dsflags, FTAG);
- else
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
-
- dsl_pool_rele(dp, FTAG);
- return (err);
- }
- err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
- embedok, large_block_ok, compressok, rawok,
- outfd, resumeobj, resumeoff, vp, off);
- } else {
- err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
- embedok, large_block_ok, compressok, rawok,
- outfd, resumeobj, resumeoff, vp, off);
- }
- if (owned)
- dsl_dataset_disown(ds, dsflags, FTAG);
- else
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
-
- return (err);
-}
-
-static int
-dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
- uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
-{
- int err = 0;
- uint64_t size;
- /*
- * Assume that space (both on-disk and in-stream) is dominated by
- * data. We will adjust for indirect blocks and the copies property,
- * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
- */
-
- uint64_t recordsize;
- uint64_t record_count;
- objset_t *os;
- VERIFY0(dmu_objset_from_ds(ds, &os));
-
- /* Assume all (uncompressed) blocks are recordsize. */
- if (zfs_override_estimate_recordsize != 0) {
- recordsize = zfs_override_estimate_recordsize;
- } else if (os->os_phys->os_type == DMU_OST_ZVOL) {
- err = dsl_prop_get_int_ds(ds,
- zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
- } else {
- err = dsl_prop_get_int_ds(ds,
- zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
- }
- if (err != 0)
- return (err);
- record_count = uncompressed / recordsize;
-
- /*
- * If we're estimating a send size for a compressed stream, use the
- * compressed data size to estimate the stream size. Otherwise, use the
- * uncompressed data size.
- */
- size = stream_compressed ? compressed : uncompressed;
-
- /*
- * Subtract out approximate space used by indirect blocks.
- * Assume most space is used by data blocks (non-indirect, non-dnode).
- * Assume no ditto blocks or internal fragmentation.
- *
- * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
- * block.
- */
- size -= record_count * sizeof (blkptr_t);
-
- /* Add in the space for the record associated with each block. */
- size += record_count * sizeof (dmu_replay_record_t);
-
- *sizep = size;
-
- return (0);
-}
-
-int
-dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds,
- boolean_t stream_compressed, uint64_t *sizep)
-{
- int err;
- uint64_t uncomp, comp;
-
- ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
-
- /* tosnap must be a snapshot */
- if (!ds->ds_is_snapshot)
- return (SET_ERROR(EINVAL));
-
- /* fromsnap, if provided, must be a snapshot */
- if (fromds != NULL && !fromds->ds_is_snapshot)
- return (SET_ERROR(EINVAL));
-
- /*
- * fromsnap must be an earlier snapshot from the same fs as tosnap,
- * or the origin's fs.
- */
- if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
- return (SET_ERROR(EXDEV));
-
- /* Get compressed and uncompressed size estimates of changed data. */
- if (fromds == NULL) {
- uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
- comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
- } else {
- uint64_t used;
- err = dsl_dataset_space_written(fromds, ds,
- &used, &comp, &uncomp);
- if (err != 0)
- return (err);
- }
-
- err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
- stream_compressed, sizep);
- /*
- * Add the size of the BEGIN and END records to the estimate.
- */
- *sizep += 2 * sizeof (dmu_replay_record_t);
- return (err);
-}
-
-struct calculate_send_arg {
- uint64_t uncompressed;
- uint64_t compressed;
-};
-
-/*
- * Simple callback used to traverse the blocks of a snapshot and sum their
- * uncompressed and compressed sizes.
- */
-/* ARGSUSED */
-static int
-dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
- const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
-{
- struct calculate_send_arg *space = arg;
- if (bp != NULL && !BP_IS_HOLE(bp)) {
- space->uncompressed += BP_GET_UCSIZE(bp);
- space->compressed += BP_GET_PSIZE(bp);
- }
- return (0);
-}
-
-/*
- * Given a desination snapshot and a TXG, calculate the approximate size of a
- * send stream sent from that TXG. from_txg may be zero, indicating that the
- * whole snapshot will be sent.
- */
-int
-dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
- boolean_t stream_compressed, uint64_t *sizep)
-{
- int err;
- struct calculate_send_arg size = { 0 };
-
- ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
-
- /* tosnap must be a snapshot */
- if (!dsl_dataset_is_snapshot(ds))
- return (SET_ERROR(EINVAL));
-
- /* verify that from_txg is before the provided snapshot was taken */
- if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
- return (SET_ERROR(EXDEV));
- }
- /*
- * traverse the blocks of the snapshot with birth times after
- * from_txg, summing their uncompressed size
- */
- err = traverse_dataset(ds, from_txg,
- TRAVERSE_POST | TRAVERSE_NO_DECRYPT,
- dmu_calculate_send_traversal, &size);
-
- if (err)
- return (err);
-
- err = dmu_adjust_send_estimate_for_indirects(ds, size.uncompressed,
- size.compressed, stream_compressed, sizep);
- return (err);
-}
-
-typedef struct dmu_recv_begin_arg {
- const char *drba_origin;
- dmu_recv_cookie_t *drba_cookie;
- cred_t *drba_cred;
- dsl_crypto_params_t *drba_dcp;
- uint64_t drba_snapobj;
-} dmu_recv_begin_arg_t;
-
-static int
-recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
- uint64_t fromguid, uint64_t featureflags)
-{
- uint64_t val;
- int error;
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
- boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
- boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
- boolean_t embed = (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) != 0;
-
- /* temporary clone name must not exist */
- error = zap_lookup(dp->dp_meta_objset,
- dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
- 8, 1, &val);
- if (error != ENOENT)
- return (error == 0 ? EBUSY : error);
-
- /* new snapshot name must not exist */
- error = zap_lookup(dp->dp_meta_objset,
- dsl_dataset_phys(ds)->ds_snapnames_zapobj,
- drba->drba_cookie->drc_tosnap, 8, 1, &val);
- if (error != ENOENT)
- return (error == 0 ? EEXIST : error);
-
- /*
- * Check snapshot limit before receiving. We'll recheck again at the
- * end, but might as well abort before receiving if we're already over
- * the limit.
- *
- * Note that we do not check the file system limit with
- * dsl_dir_fscount_check because the temporary %clones don't count
- * against that limit.
- */
- error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
- NULL, drba->drba_cred);
- if (error != 0)
- return (error);
-
- if (fromguid != 0) {
- dsl_dataset_t *snap;
- uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
-
- /* Can't perform a raw receive on top of a non-raw receive */
- if (!encrypted && raw)
- return (SET_ERROR(EINVAL));
-
- /* Encryption is incompatible with embedded data */
- if (encrypted && embed)
- return (SET_ERROR(EINVAL));
-
- /* Find snapshot in this dir that matches fromguid. */
- while (obj != 0) {
- error = dsl_dataset_hold_obj(dp, obj, FTAG,
- &snap);
- if (error != 0)
- return (SET_ERROR(ENODEV));
- if (snap->ds_dir != ds->ds_dir) {
- dsl_dataset_rele(snap, FTAG);
- return (SET_ERROR(ENODEV));
- }
- if (dsl_dataset_phys(snap)->ds_guid == fromguid)
- break;
- obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
- dsl_dataset_rele(snap, FTAG);
- }
- if (obj == 0)
- return (SET_ERROR(ENODEV));
-
- if (drba->drba_cookie->drc_force) {
- drba->drba_snapobj = obj;
- } else {
- /*
- * If we are not forcing, there must be no
- * changes since fromsnap.
- */
- if (dsl_dataset_modified_since_snap(ds, snap)) {
- dsl_dataset_rele(snap, FTAG);
- return (SET_ERROR(ETXTBSY));
- }
- drba->drba_snapobj = ds->ds_prev->ds_object;
- }
-
- dsl_dataset_rele(snap, FTAG);
- } else {
- /* if full, then must be forced */
- if (!drba->drba_cookie->drc_force)
- return (SET_ERROR(EEXIST));
-
- /*
- * We don't support using zfs recv -F to blow away
- * encrypted filesystems. This would require the
- * dsl dir to point to the old encryption key and
- * the new one at the same time during the receive.
- */
- if ((!encrypted && raw) || encrypted)
- return (SET_ERROR(EINVAL));
-
- /*
- * Perform the same encryption checks we would if
- * we were creating a new dataset from scratch.
- */
- if (!raw) {
- boolean_t will_encrypt;
-
- error = dmu_objset_create_crypt_check(
- ds->ds_dir->dd_parent, drba->drba_dcp,
- &will_encrypt);
- if (error != 0)
- return (error);
-
- if (will_encrypt && embed)
- return (SET_ERROR(EINVAL));
- }
-
- drba->drba_snapobj = 0;
- }
-
- return (0);
-
-}
-
-static int
-dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
-{
- dmu_recv_begin_arg_t *drba = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
- uint64_t fromguid = drrb->drr_fromguid;
- int flags = drrb->drr_flags;
- ds_hold_flags_t dsflags = 0;
- int error;
- uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
- dsl_dataset_t *ds;
- const char *tofs = drba->drba_cookie->drc_tofs;
-
- /* already checked */
- ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
- ASSERT(!(featureflags & DMU_BACKUP_FEATURE_RESUMING));
-
- if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
- DMU_COMPOUNDSTREAM ||
- drrb->drr_type >= DMU_OST_NUMTYPES ||
- ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
- return (SET_ERROR(EINVAL));
-
- /* Verify pool version supports SA if SA_SPILL feature set */
- if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
- spa_version(dp->dp_spa) < SPA_VERSION_SA)
- return (SET_ERROR(ENOTSUP));
-
- if (drba->drba_cookie->drc_resumable &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EXTENSIBLE_DATASET))
- return (SET_ERROR(ENOTSUP));
-
- /*
- * The receiving code doesn't know how to translate a WRITE_EMBEDDED
- * record to a plain WRITE record, so the pool must have the
- * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
- * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
- */
- if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
- return (SET_ERROR(ENOTSUP));
- if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
- return (SET_ERROR(ENOTSUP));
-
- /*
- * The receiving code doesn't know how to translate large blocks
- * to smaller ones, so the pool must have the LARGE_BLOCKS
- * feature enabled if the stream has LARGE_BLOCKS. Same with
- * large dnodes.
- */
- if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
- return (SET_ERROR(ENOTSUP));
- if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
- return (SET_ERROR(ENOTSUP));
-
- if (featureflags & DMU_BACKUP_FEATURE_RAW) {
- /* raw receives require the encryption feature */
- if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ENCRYPTION))
- return (SET_ERROR(ENOTSUP));
-
- /* embedded data is incompatible with encryption and raw recv */
- if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
- return (SET_ERROR(EINVAL));
- } else {
- dsflags |= DS_HOLD_FLAG_DECRYPT;
- }
-
- error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
- if (error == 0) {
- /* target fs already exists; recv into temp clone */
-
- /* Can't recv a clone into an existing fs */
- if (flags & DRR_FLAG_CLONE || drba->drba_origin) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- error = recv_begin_check_existing_impl(drba, ds, fromguid,
- featureflags);
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- } else if (error == ENOENT) {
- /* target fs does not exist; must be a full backup or clone */
- char buf[ZFS_MAX_DATASET_NAME_LEN];
-
- /*
- * If it's a non-clone incremental, we are missing the
- * target fs, so fail the recv.
- */
- if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
- drba->drba_origin))
- return (SET_ERROR(ENOENT));
-
- /*
- * If we're receiving a full send as a clone, and it doesn't
- * contain all the necessary free records and freeobject
- * records, reject it.
- */
- if (fromguid == 0 && drba->drba_origin &&
- !(flags & DRR_FLAG_FREERECORDS))
- return (SET_ERROR(EINVAL));
-
- /* Open the parent of tofs */
- ASSERT3U(strlen(tofs), <, sizeof (buf));
- (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
- error = dsl_dataset_hold_flags(dp, buf, dsflags, FTAG, &ds);
- if (error != 0)
- return (error);
-
- if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
- drba->drba_origin == NULL) {
- boolean_t will_encrypt;
-
- /*
- * Check that we aren't breaking any encryption rules
- * and that we have all the parameters we need to
- * create an encrypted dataset if necessary. If we are
- * making an encrypted dataset the stream can't have
- * embedded data.
- */
- error = dmu_objset_create_crypt_check(ds->ds_dir,
- drba->drba_dcp, &will_encrypt);
- if (error != 0) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (error);
- }
-
- if (will_encrypt &&
- (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
- }
-
- /*
- * Check filesystem and snapshot limits before receiving. We'll
- * recheck snapshot limits again at the end (we create the
- * filesystems and increment those counts during begin_sync).
- */
- error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
- ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
- if (error != 0) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (error);
- }
-
- error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
- ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
- if (error != 0) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (error);
- }
-
- if (drba->drba_origin != NULL) {
- dsl_dataset_t *origin;
-
- error = dsl_dataset_hold_flags(dp, drba->drba_origin,
- dsflags, FTAG, &origin);
- if (error != 0) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (error);
- }
- if (!origin->ds_is_snapshot) {
- dsl_dataset_rele_flags(origin, dsflags, FTAG);
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
- if (dsl_dataset_phys(origin)->ds_guid != fromguid &&
- fromguid != 0) {
- dsl_dataset_rele_flags(origin, dsflags, FTAG);
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(ENODEV));
- }
- if (origin->ds_dir->dd_crypto_obj != 0 &&
- (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)) {
- dsl_dataset_rele_flags(origin, dsflags, FTAG);
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
- dsl_dataset_rele_flags(origin,
- dsflags, FTAG);
- }
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- error = 0;
- }
- return (error);
-}
-
-static void
-dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
-{
- dmu_recv_begin_arg_t *drba = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- objset_t *mos = dp->dp_meta_objset;
- struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
- const char *tofs = drba->drba_cookie->drc_tofs;
- uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
- dsl_dataset_t *ds, *newds;
- objset_t *os;
- uint64_t dsobj;
- ds_hold_flags_t dsflags = 0;
- int error;
- uint64_t crflags = 0;
- dsl_crypto_params_t dummy_dcp = { 0 };
- dsl_crypto_params_t *dcp = drba->drba_dcp;
-
- if (drrb->drr_flags & DRR_FLAG_CI_DATA)
- crflags |= DS_FLAG_CI_DATASET;
-
- if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
- dsflags |= DS_HOLD_FLAG_DECRYPT;
-
- /*
- * Raw, non-incremental recvs always use a dummy dcp with
- * the raw cmd set. Raw incremental recvs do not use a dcp
- * since the encryption parameters are already set in stone.
- */
- if (dcp == NULL && drba->drba_snapobj == 0 &&
- drba->drba_origin == NULL) {
- ASSERT3P(dcp, ==, NULL);
- dcp = &dummy_dcp;
-
- if (featureflags & DMU_BACKUP_FEATURE_RAW)
- dcp->cp_cmd = DCP_CMD_RAW_RECV;
- }
-
- error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
- if (error == 0) {
- /* create temporary clone */
- dsl_dataset_t *snap = NULL;
-
- if (drba->drba_snapobj != 0) {
- VERIFY0(dsl_dataset_hold_obj(dp,
- drba->drba_snapobj, FTAG, &snap));
- ASSERT3P(dcp, ==, NULL);
- }
-
- dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
- snap, crflags, drba->drba_cred, dcp, tx);
- if (drba->drba_snapobj != 0)
- dsl_dataset_rele(snap, FTAG);
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- } else {
- dsl_dir_t *dd;
- const char *tail;
- dsl_dataset_t *origin = NULL;
-
- VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
-
- if (drba->drba_origin != NULL) {
- VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
- FTAG, &origin));
- ASSERT3P(dcp, ==, NULL);
- }
-
- /* Create new dataset. */
- dsobj = dsl_dataset_create_sync(dd, strrchr(tofs, '/') + 1,
- origin, crflags, drba->drba_cred, dcp, tx);
- if (origin != NULL)
- dsl_dataset_rele(origin, FTAG);
- dsl_dir_rele(dd, FTAG);
- drba->drba_cookie->drc_newfs = B_TRUE;
- }
-
- VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &newds));
- VERIFY0(dmu_objset_from_ds(newds, &os));
-
- if (drba->drba_cookie->drc_resumable) {
- dsl_dataset_zapify(newds, tx);
- if (drrb->drr_fromguid != 0) {
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_FROMGUID,
- 8, 1, &drrb->drr_fromguid, tx));
- }
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TOGUID,
- 8, 1, &drrb->drr_toguid, tx));
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_TONAME,
- 1, strlen(drrb->drr_toname) + 1, drrb->drr_toname, tx));
- uint64_t one = 1;
- uint64_t zero = 0;
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OBJECT,
- 8, 1, &one, tx));
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_OFFSET,
- 8, 1, &zero, tx));
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_BYTES,
- 8, 1, &zero, tx));
- if (featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) {
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_LARGEBLOCK,
- 8, 1, &one, tx));
- }
- if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) {
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_EMBEDOK,
- 8, 1, &one, tx));
- }
- if (featureflags & DMU_BACKUP_FEATURE_COMPRESSED) {
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_COMPRESSOK,
- 8, 1, &one, tx));
- }
- if (featureflags & DMU_BACKUP_FEATURE_RAW) {
- VERIFY0(zap_add(mos, dsobj, DS_FIELD_RESUME_RAWOK,
- 8, 1, &one, tx));
- }
- }
-
- /*
- * Usually the os->os_encrypted value is tied to the presence of a
- * DSL Crypto Key object in the dd. However, that will not be received
- * until dmu_recv_stream(), so we set the value manually for now.
- */
- if (featureflags & DMU_BACKUP_FEATURE_RAW) {
- os->os_encrypted = B_TRUE;
- drba->drba_cookie->drc_raw = B_TRUE;
- }
-
- dmu_buf_will_dirty(newds->ds_dbuf, tx);
- dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
-
- /*
- * If we actually created a non-clone, we need to create the objset
- * in our new dataset. If this is a raw send we postpone this until
- * dmu_recv_stream() so that we can allocate the metadnode with the
- * properties from the DRR_BEGIN payload.
- */
- rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
- if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
- (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
- (void) dmu_objset_create_impl(dp->dp_spa,
- newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
- }
- rrw_exit(&newds->ds_bp_rwlock, FTAG);
-
- drba->drba_cookie->drc_ds = newds;
-
- spa_history_log_internal_ds(newds, "receive", tx, "");
-}
-
-static int
-dmu_recv_resume_begin_check(void *arg, dmu_tx_t *tx)
-{
- dmu_recv_begin_arg_t *drba = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
- int error;
- ds_hold_flags_t dsflags = 0;
- uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
- dsl_dataset_t *ds;
- const char *tofs = drba->drba_cookie->drc_tofs;
-
- /* already checked */
- ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
- ASSERT(featureflags & DMU_BACKUP_FEATURE_RESUMING);
-
- if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
- DMU_COMPOUNDSTREAM ||
- drrb->drr_type >= DMU_OST_NUMTYPES)
- return (SET_ERROR(EINVAL));
-
- /* Verify pool version supports SA if SA_SPILL feature set */
- if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
- spa_version(dp->dp_spa) < SPA_VERSION_SA)
- return (SET_ERROR(ENOTSUP));
-
- /*
- * The receiving code doesn't know how to translate a WRITE_EMBEDDED
- * record to a plain WRITE record, so the pool must have the
- * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
- * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
- */
- if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
- return (SET_ERROR(ENOTSUP));
- if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
- return (SET_ERROR(ENOTSUP));
-
- /*
- * The receiving code doesn't know how to translate large blocks
- * to smaller ones, so the pool must have the LARGE_BLOCKS
- * feature enabled if the stream has LARGE_BLOCKS. Same with
- * large dnodes.
- */
- if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
- return (SET_ERROR(ENOTSUP));
- if ((featureflags & DMU_BACKUP_FEATURE_LARGE_DNODE) &&
- !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_DNODE))
- return (SET_ERROR(ENOTSUP));
-
- /* 6 extra bytes for /%recv */
- char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
- (void) snprintf(recvname, sizeof (recvname), "%s/%s",
- tofs, recv_clone_name);
-
- if ((featureflags & DMU_BACKUP_FEATURE_RAW) == 0)
- dsflags |= DS_HOLD_FLAG_DECRYPT;
-
- if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
- /* %recv does not exist; continue in tofs */
- error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
- if (error != 0)
- return (error);
- }
-
- /* check that ds is marked inconsistent */
- if (!DS_IS_INCONSISTENT(ds)) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- /* check that there is resuming data, and that the toguid matches */
- if (!dsl_dataset_is_zapified(ds)) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
- uint64_t val;
- error = zap_lookup(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val);
- if (error != 0 || drrb->drr_toguid != val) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Check if the receive is still running. If so, it will be owned.
- * Note that nothing else can own the dataset (e.g. after the receive
- * fails) because it will be marked inconsistent.
- */
- if (dsl_dataset_has_owner(ds)) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EBUSY));
- }
-
- /* There should not be any snapshots of this fs yet. */
- if (ds->ds_prev != NULL && ds->ds_prev->ds_dir == ds->ds_dir) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * Note: resume point will be checked when we process the first WRITE
- * record.
- */
-
- /* check that the origin matches */
- val = 0;
- (void) zap_lookup(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val);
- if (drrb->drr_fromguid != val) {
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (SET_ERROR(EINVAL));
- }
-
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
- return (0);
-}
-
-static void
-dmu_recv_resume_begin_sync(void *arg, dmu_tx_t *tx)
-{
- dmu_recv_begin_arg_t *drba = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- const char *tofs = drba->drba_cookie->drc_tofs;
- struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
- uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
- dsl_dataset_t *ds;
- objset_t *os;
- ds_hold_flags_t dsflags = 0;
- uint64_t dsobj;
- /* 6 extra bytes for /%recv */
- char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
-
- (void) snprintf(recvname, sizeof (recvname), "%s/%s",
- tofs, recv_clone_name);
-
- if (featureflags & DMU_BACKUP_FEATURE_RAW) {
- drba->drba_cookie->drc_raw = B_TRUE;
- } else {
- dsflags |= DS_HOLD_FLAG_DECRYPT;
- }
-
- if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
- /* %recv does not exist; continue in tofs */
- VERIFY0(dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds));
- drba->drba_cookie->drc_newfs = B_TRUE;
- }
-
- /* clear the inconsistent flag so that we can own it */
- ASSERT(DS_IS_INCONSISTENT(ds));
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
- dsobj = ds->ds_object;
- dsl_dataset_rele_flags(ds, dsflags, FTAG);
-
- VERIFY0(dsl_dataset_own_obj(dp, dsobj, dsflags, dmu_recv_tag, &ds));
- VERIFY0(dmu_objset_from_ds(ds, &os));
-
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
-
- rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
- ASSERT(!BP_IS_HOLE(dsl_dataset_get_blkptr(ds)) ||
- drba->drba_cookie->drc_raw);
- rrw_exit(&ds->ds_bp_rwlock, FTAG);
-
- drba->drba_cookie->drc_ds = ds;
-
- spa_history_log_internal_ds(ds, "resume receive", tx, "");
-}
-
-/*
- * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
- * succeeds; otherwise we will leak the holds on the datasets.
- */
-int
-dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
- boolean_t force, boolean_t resumable, nvlist_t *localprops,
- nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc)
-{
- dmu_recv_begin_arg_t drba = { 0 };
-
- bzero(drc, sizeof (dmu_recv_cookie_t));
- drc->drc_drr_begin = drr_begin;
- drc->drc_drrb = &drr_begin->drr_u.drr_begin;
- drc->drc_tosnap = tosnap;
- drc->drc_tofs = tofs;
- drc->drc_force = force;
- drc->drc_resumable = resumable;
- drc->drc_cred = CRED();
- drc->drc_clone = (origin != NULL);
-
- if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
- drc->drc_byteswap = B_TRUE;
- (void) fletcher_4_incremental_byteswap(drr_begin,
- sizeof (dmu_replay_record_t), &drc->drc_cksum);
- byteswap_record(drr_begin);
- } else if (drc->drc_drrb->drr_magic == DMU_BACKUP_MAGIC) {
- (void) fletcher_4_incremental_native(drr_begin,
- sizeof (dmu_replay_record_t), &drc->drc_cksum);
- } else {
- return (SET_ERROR(EINVAL));
- }
-
- drba.drba_origin = origin;
- drba.drba_cookie = drc;
- drba.drba_cred = CRED();
-
- if (DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
- DMU_BACKUP_FEATURE_RESUMING) {
- return (dsl_sync_task(tofs,
- dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
- &drba, 5, ZFS_SPACE_CHECK_NORMAL));
- } else {
- int err;
-
- /*
- * For non-raw, non-incremental, non-resuming receives the
- * user can specify encryption parameters on the command line
- * with "zfs recv -o". For these receives we create a dcp and
- * pass it to the sync task. Creating the dcp will implicitly
- * remove the encryption params from the localprops nvlist,
- * which avoids errors when trying to set these normally
- * read-only properties. Any other kind of receive that
- * attempts to set these properties will fail as a result.
- */
- if ((DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo) &
- DMU_BACKUP_FEATURE_RAW) == 0 &&
- origin == NULL && drc->drc_drrb->drr_fromguid == 0) {
- err = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
- localprops, hidden_args, &drba.drba_dcp);
- if (err != 0)
- return (err);
- }
-
- err = dsl_sync_task(tofs,
- dmu_recv_begin_check, dmu_recv_begin_sync,
- &drba, 5, ZFS_SPACE_CHECK_NORMAL);
- dsl_crypto_params_free(drba.drba_dcp, !!err);
-
- return (err);
- }
-}
-
-struct receive_record_arg {
- dmu_replay_record_t header;
- void *payload; /* Pointer to a buffer containing the payload */
- /*
- * If the record is a write, pointer to the arc_buf_t containing the
- * payload.
- */
- arc_buf_t *arc_buf;
- int payload_size;
- uint64_t bytes_read; /* bytes read from stream when record created */
- boolean_t eos_marker; /* Marks the end of the stream */
- bqueue_node_t node;
-};
-
-struct receive_writer_arg {
- objset_t *os;
- boolean_t byteswap;
- bqueue_t q;
-
- /*
- * These three args are used to signal to the main thread that we're
- * done.
- */
- kmutex_t mutex;
- kcondvar_t cv;
- boolean_t done;
-
- int err;
- /* A map from guid to dataset to help handle dedup'd streams. */
- avl_tree_t *guid_to_ds_map;
- boolean_t resumable;
- boolean_t raw;
- uint64_t last_object;
- uint64_t last_offset;
- uint64_t max_object; /* highest object ID referenced in stream */
- uint64_t bytes_read; /* bytes read when current record created */
-
- /* Encryption parameters for the last received DRR_OBJECT_RANGE */
- boolean_t or_crypt_params_present;
- uint64_t or_firstobj;
- uint64_t or_numslots;
- uint8_t or_salt[ZIO_DATA_SALT_LEN];
- uint8_t or_iv[ZIO_DATA_IV_LEN];
- uint8_t or_mac[ZIO_DATA_MAC_LEN];
- boolean_t or_byteorder;
-};
-
-struct objlist {
- list_t list; /* List of struct receive_objnode. */
- /*
- * Last object looked up. Used to assert that objects are being looked
- * up in ascending order.
- */
- uint64_t last_lookup;
-};
-
-struct receive_objnode {
- list_node_t node;
- uint64_t object;
-};
-
-struct receive_arg {
- objset_t *os;
- vnode_t *vp; /* The vnode to read the stream from */
- uint64_t voff; /* The current offset in the stream */
- uint64_t bytes_read;
- /*
- * A record that has had its payload read in, but hasn't yet been handed
- * off to the worker thread.
- */
- struct receive_record_arg *rrd;
- /* A record that has had its header read in, but not its payload. */
- struct receive_record_arg *next_rrd;
- zio_cksum_t cksum;
- zio_cksum_t prev_cksum;
- int err;
- boolean_t byteswap;
- boolean_t raw;
- uint64_t featureflags;
- /* Sorted list of objects not to issue prefetches for. */
- struct objlist ignore_objlist;
-};
-
-typedef struct guid_map_entry {
- uint64_t guid;
- boolean_t raw;
- dsl_dataset_t *gme_ds;
- avl_node_t avlnode;
-} guid_map_entry_t;
-
-static int
-guid_compare(const void *arg1, const void *arg2)
-{
- const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
- const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
-
- return (AVL_CMP(gmep1->guid, gmep2->guid));
-}
-
-static void
-free_guid_map_onexit(void *arg)
-{
- avl_tree_t *ca = arg;
- void *cookie = NULL;
- guid_map_entry_t *gmep;
-
- while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
- dsl_dataset_long_rele(gmep->gme_ds, gmep);
- dsl_dataset_rele_flags(gmep->gme_ds,
- (gmep->raw) ? 0 : DS_HOLD_FLAG_DECRYPT, gmep);
- kmem_free(gmep, sizeof (guid_map_entry_t));
- }
- avl_destroy(ca);
- kmem_free(ca, sizeof (avl_tree_t));
-}
-
-static int
-receive_read(struct receive_arg *ra, int len, void *buf)
-{
- int done = 0;
-
- /*
- * The code doesn't rely on this (lengths being multiples of 8). See
- * comment in dump_bytes.
- */
- ASSERT(len % 8 == 0 ||
- (ra->featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
-
- while (done < len) {
- ssize_t resid;
-
- ra->err = vn_rdwr(UIO_READ, ra->vp,
- (char *)buf + done, len - done,
- ra->voff, UIO_SYSSPACE, FAPPEND,
- RLIM64_INFINITY, CRED(), &resid);
-
- if (resid == len - done) {
- /*
- * Note: ECKSUM indicates that the receive
- * was interrupted and can potentially be resumed.
- */
- ra->err = SET_ERROR(ECKSUM);
- }
- ra->voff += len - done - resid;
- done = len - resid;
- if (ra->err != 0)
- return (ra->err);
- }
-
- ra->bytes_read += len;
-
- ASSERT3U(done, ==, len);
- return (0);
-}
-
-noinline static void
-byteswap_record(dmu_replay_record_t *drr)
-{
-#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
-#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
- drr->drr_type = BSWAP_32(drr->drr_type);
- drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
-
- switch (drr->drr_type) {
- case DRR_BEGIN:
- DO64(drr_begin.drr_magic);
- DO64(drr_begin.drr_versioninfo);
- DO64(drr_begin.drr_creation_time);
- DO32(drr_begin.drr_type);
- DO32(drr_begin.drr_flags);
- DO64(drr_begin.drr_toguid);
- DO64(drr_begin.drr_fromguid);
- break;
- case DRR_OBJECT:
- DO64(drr_object.drr_object);
- DO32(drr_object.drr_type);
- DO32(drr_object.drr_bonustype);
- DO32(drr_object.drr_blksz);
- DO32(drr_object.drr_bonuslen);
- DO32(drr_object.drr_raw_bonuslen);
- DO64(drr_object.drr_toguid);
- DO64(drr_object.drr_maxblkid);
- break;
- case DRR_FREEOBJECTS:
- DO64(drr_freeobjects.drr_firstobj);
- DO64(drr_freeobjects.drr_numobjs);
- DO64(drr_freeobjects.drr_toguid);
- break;
- case DRR_WRITE:
- DO64(drr_write.drr_object);
- DO32(drr_write.drr_type);
- DO64(drr_write.drr_offset);
- DO64(drr_write.drr_logical_size);
- DO64(drr_write.drr_toguid);
- ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
- DO64(drr_write.drr_key.ddk_prop);
- DO64(drr_write.drr_compressed_size);
- break;
- case DRR_WRITE_BYREF:
- DO64(drr_write_byref.drr_object);
- DO64(drr_write_byref.drr_offset);
- DO64(drr_write_byref.drr_length);
- DO64(drr_write_byref.drr_toguid);
- DO64(drr_write_byref.drr_refguid);
- DO64(drr_write_byref.drr_refobject);
- DO64(drr_write_byref.drr_refoffset);
- ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
- drr_key.ddk_cksum);
- DO64(drr_write_byref.drr_key.ddk_prop);
- break;
- case DRR_WRITE_EMBEDDED:
- DO64(drr_write_embedded.drr_object);
- DO64(drr_write_embedded.drr_offset);
- DO64(drr_write_embedded.drr_length);
- DO64(drr_write_embedded.drr_toguid);
- DO32(drr_write_embedded.drr_lsize);
- DO32(drr_write_embedded.drr_psize);
- break;
- case DRR_FREE:
- DO64(drr_free.drr_object);
- DO64(drr_free.drr_offset);
- DO64(drr_free.drr_length);
- DO64(drr_free.drr_toguid);
- break;
- case DRR_SPILL:
- DO64(drr_spill.drr_object);
- DO64(drr_spill.drr_length);
- DO64(drr_spill.drr_toguid);
- DO64(drr_spill.drr_compressed_size);
- DO32(drr_spill.drr_type);
- break;
- case DRR_OBJECT_RANGE:
- DO64(drr_object_range.drr_firstobj);
- DO64(drr_object_range.drr_numslots);
- DO64(drr_object_range.drr_toguid);
- break;
- case DRR_END:
- DO64(drr_end.drr_toguid);
- ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
- break;
- default:
- break;
- }
-
- if (drr->drr_type != DRR_BEGIN) {
- ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
- }
-
-#undef DO64
-#undef DO32
-}
-
-static inline uint8_t
-deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
-{
- if (bonus_type == DMU_OT_SA) {
- return (1);
- } else {
- return (1 +
- ((DN_OLD_MAX_BONUSLEN -
- MIN(DN_OLD_MAX_BONUSLEN, bonus_size)) >> SPA_BLKPTRSHIFT));
- }
-}
-
-static void
-save_resume_state(struct receive_writer_arg *rwa,
- uint64_t object, uint64_t offset, dmu_tx_t *tx)
-{
- int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
-
- if (!rwa->resumable)
- return;
-
- /*
- * We use ds_resume_bytes[] != 0 to indicate that we need to
- * update this on disk, so it must not be 0.
- */
- ASSERT(rwa->bytes_read != 0);
-
- /*
- * We only resume from write records, which have a valid
- * (non-meta-dnode) object number.
- */
- ASSERT(object != 0);
-
- /*
- * For resuming to work correctly, we must receive records in order,
- * sorted by object,offset. This is checked by the callers, but
- * assert it here for good measure.
- */
- ASSERT3U(object, >=, rwa->os->os_dsl_dataset->ds_resume_object[txgoff]);
- ASSERT(object != rwa->os->os_dsl_dataset->ds_resume_object[txgoff] ||
- offset >= rwa->os->os_dsl_dataset->ds_resume_offset[txgoff]);
- ASSERT3U(rwa->bytes_read, >=,
- rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff]);
-
- rwa->os->os_dsl_dataset->ds_resume_object[txgoff] = object;
- rwa->os->os_dsl_dataset->ds_resume_offset[txgoff] = offset;
- rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
-}
-
-noinline static int
-receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
- void *data)
-{
- dmu_object_info_t doi;
- dmu_tx_t *tx;
- uint64_t object;
- int err;
- uint8_t dn_slots = drro->drr_dn_slots != 0 ?
- drro->drr_dn_slots : DNODE_MIN_SLOTS;
-
- if (drro->drr_type == DMU_OT_NONE ||
- !DMU_OT_IS_VALID(drro->drr_type) ||
- !DMU_OT_IS_VALID(drro->drr_bonustype) ||
- drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
- drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
- P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
- drro->drr_blksz < SPA_MINBLOCKSIZE ||
- drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
- drro->drr_bonuslen >
- DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
- dn_slots >
- (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
- return (SET_ERROR(EINVAL));
- }
-
- if (rwa->raw) {
- /*
- * We should have received a DRR_OBJECT_RANGE record
- * containing this block and stored it in rwa.
- */
- if (drro->drr_object < rwa->or_firstobj ||
- drro->drr_object >= rwa->or_firstobj + rwa->or_numslots ||
- drro->drr_raw_bonuslen < drro->drr_bonuslen ||
- drro->drr_indblkshift > SPA_MAXBLOCKSHIFT ||
- drro->drr_nlevels > DN_MAX_LEVELS ||
- drro->drr_nblkptr > DN_MAX_NBLKPTR ||
- DN_SLOTS_TO_BONUSLEN(dn_slots) <
- drro->drr_raw_bonuslen)
- return (SET_ERROR(EINVAL));
- } else {
- if (drro->drr_flags != 0 || drro->drr_raw_bonuslen != 0 ||
- drro->drr_indblkshift != 0 || drro->drr_nlevels != 0 ||
- drro->drr_nblkptr != 0)
- return (SET_ERROR(EINVAL));
- }
-
- err = dmu_object_info(rwa->os, drro->drr_object, &doi);
- if (err != 0 && err != ENOENT && err != EEXIST)
- return (SET_ERROR(EINVAL));
-
- if (drro->drr_object > rwa->max_object)
- rwa->max_object = drro->drr_object;
-
- /*
- * If we are losing blkptrs or changing the block size this must
- * be a new file instance. We must clear out the previous file
- * contents before we can change this type of metadata in the dnode.
- * Raw receives will also check that the indirect structure of the
- * dnode hasn't changed.
- */
- if (err == 0) {
- uint32_t indblksz = drro->drr_indblkshift ?
- 1ULL << drro->drr_indblkshift : 0;
- int nblkptr = deduce_nblkptr(drro->drr_bonustype,
- drro->drr_bonuslen);
-
- object = drro->drr_object;
-
- /* nblkptr will be bounded by the bonus size and type */
- if (rwa->raw && nblkptr != drro->drr_nblkptr)
- return (SET_ERROR(EINVAL));
-
- if (drro->drr_blksz != doi.doi_data_block_size ||
- nblkptr < doi.doi_nblkptr ||
- dn_slots != doi.doi_dnodesize >> DNODE_SHIFT ||
- (rwa->raw &&
- (indblksz != doi.doi_metadata_block_size ||
- drro->drr_nlevels < doi.doi_indirection))) {
- err = dmu_free_long_range(rwa->os,
- drro->drr_object, 0, DMU_OBJECT_END);
- if (err != 0)
- return (SET_ERROR(EINVAL));
- }
-
- /*
- * The dmu does not currently support decreasing nlevels
- * on an object. For non-raw sends, this does not matter
- * and the new object can just use the previous one's nlevels.
- * For raw sends, however, the structure of the received dnode
- * (including nlevels) must match that of the send side.
- * Therefore, instead of using dmu_object_reclaim(), we must
- * free the object completely and call dmu_object_claim_dnsize()
- * instead.
- */
- if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) ||
- dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
- err = dmu_free_long_object(rwa->os, drro->drr_object);
- if (err != 0)
- return (SET_ERROR(EINVAL));
-
- txg_wait_synced(dmu_objset_pool(rwa->os), 0);
- object = DMU_NEW_OBJECT;
- }
- } else if (err == EEXIST) {
- /*
- * The object requested is currently an interior slot of a
- * multi-slot dnode. This will be resolved when the next txg
- * is synced out, since the send stream will have told us
- * to free this slot when we freed the associated dnode
- * earlier in the stream.
- */
- txg_wait_synced(dmu_objset_pool(rwa->os), 0);
- object = drro->drr_object;
- } else {
- /* object is free and we are about to allocate a new one */
- object = DMU_NEW_OBJECT;
- }
-
- /*
- * If this is a multi-slot dnode there is a chance that this
- * object will expand into a slot that is already used by
- * another object from the previous snapshot. We must free
- * these objects before we attempt to allocate the new dnode.
- */
- if (dn_slots > 1) {
- boolean_t need_sync = B_FALSE;
-
- for (uint64_t slot = drro->drr_object + 1;
- slot < drro->drr_object + dn_slots;
- slot++) {
- dmu_object_info_t slot_doi;
-
- err = dmu_object_info(rwa->os, slot, &slot_doi);
- if (err == ENOENT || err == EEXIST)
- continue;
- else if (err != 0)
- return (err);
-
- err = dmu_free_long_object(rwa->os, slot);
-
- if (err != 0)
- return (err);
-
- need_sync = B_TRUE;
- }
-
- if (need_sync)
- txg_wait_synced(dmu_objset_pool(rwa->os), 0);
- }
+/*
+ * Pop the new data off the queue, check that the records we receive are in
+ * the right order, but do not free the old data. This is used so that the
+ * records can be sent on to the main thread without copying the data.
+ */
+static struct send_range *
+get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
+{
+ struct send_range *next = bqueue_dequeue(bq);
+ ASSERT3S(send_range_after(prev, next), ==, -1);
+ return (next);
+}
- tx = dmu_tx_create(rwa->os);
- dmu_tx_hold_bonus(tx, object);
- dmu_tx_hold_write(tx, object, 0, 0);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
- }
+/*
+ * Pop the new data off the queue, check that the records we receive are in
+ * the right order, and free the old data.
+ */
+static struct send_range *
+get_next_range(bqueue_t *bq, struct send_range *prev)
+{
+ struct send_range *next = get_next_range_nofree(bq, prev);
+ range_free(prev);
+ return (next);
+}
- if (object == DMU_NEW_OBJECT) {
- /* currently free, want to be allocated */
- err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
- drro->drr_bonustype, drro->drr_bonuslen,
- dn_slots << DNODE_SHIFT, tx);
- } else if (drro->drr_type != doi.doi_type ||
- drro->drr_blksz != doi.doi_data_block_size ||
- drro->drr_bonustype != doi.doi_bonus_type ||
- drro->drr_bonuslen != doi.doi_bonus_size) {
- /* currently allocated, but with different properties */
- err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
- drro->drr_bonustype, drro->drr_bonuslen,
- dn_slots << DNODE_SHIFT, tx);
- }
- if (err != 0) {
- dmu_tx_commit(tx);
- return (SET_ERROR(EINVAL));
+static __attribute__((noreturn)) void
+redact_list_thread(void *arg)
+{
+ struct redact_list_thread_arg *rlt_arg = arg;
+ struct send_range *record;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
+ if (rlt_arg->rl != NULL) {
+ struct redact_list_cb_arg rlcba = {0};
+ rlcba.cancel = &rlt_arg->cancel;
+ rlcba.q = &rlt_arg->q;
+ rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
+ rlcba.mark_redact = rlt_arg->mark_redact;
+ int err = dsl_redaction_list_traverse(rlt_arg->rl,
+ &rlt_arg->resume, redact_list_cb, &rlcba);
+ if (err != EINTR)
+ rlt_arg->error_code = err;
}
+ record = range_alloc(DATA, 0, 0, 0, B_TRUE);
+ bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
+ spl_fstrans_unmark(cookie);
- if (rwa->or_crypt_params_present) {
- /*
- * Set the crypt params for the buffer associated with this
- * range of dnodes. This causes the blkptr_t to have the
- * same crypt params (byteorder, salt, iv, mac) as on the
- * sending side.
- *
- * Since we are committing this tx now, it is possible for
- * the dnode block to end up on-disk with the incorrect MAC,
- * if subsequent objects in this block are received in a
- * different txg. However, since the dataset is marked as
- * inconsistent, no code paths will do a non-raw read (or
- * decrypt the block / verify the MAC). The receive code and
- * scrub code can safely do raw reads and verify the
- * checksum. They don't need to verify the MAC.
- */
- dmu_buf_t *db = NULL;
- uint64_t offset = rwa->or_firstobj * DNODE_MIN_SIZE;
-
- err = dmu_buf_hold_by_dnode(DMU_META_DNODE(rwa->os),
- offset, FTAG, &db, DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
- if (err != 0) {
- dmu_tx_commit(tx);
- return (SET_ERROR(EINVAL));
- }
+ thread_exit();
+}
- dmu_buf_set_crypt_params(db, rwa->or_byteorder,
- rwa->or_salt, rwa->or_iv, rwa->or_mac, tx);
+/*
+ * Compare the start point of the two provided ranges. End of stream ranges
+ * compare last, objects compare before any data or hole inside that object and
+ * multi-object holes that start at the same object.
+ */
+static int
+send_range_start_compare(struct send_range *r1, struct send_range *r2)
+{
+ uint64_t r1_objequiv = r1->object;
+ uint64_t r1_l0equiv = r1->start_blkid;
+ uint64_t r2_objequiv = r2->object;
+ uint64_t r2_l0equiv = r2->start_blkid;
+ int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
+ if (unlikely(cmp))
+ return (cmp);
+ if (r1->object == 0) {
+ r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
+ r1_l0equiv = 0;
+ }
+ if (r2->object == 0) {
+ r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
+ r2_l0equiv = 0;
+ }
+
+ cmp = TREE_CMP(r1_objequiv, r2_objequiv);
+ if (likely(cmp))
+ return (cmp);
+ cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
+ if (unlikely(cmp))
+ return (cmp);
+ cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
+ if (unlikely(cmp))
+ return (cmp);
+
+ return (TREE_CMP(r1_l0equiv, r2_l0equiv));
+}
- dmu_buf_rele(db, FTAG);
+enum q_idx {
+ REDACT_IDX = 0,
+ TO_IDX,
+ FROM_IDX,
+ NUM_THREADS
+};
- rwa->or_crypt_params_present = B_FALSE;
+/*
+ * This function returns the next range the send_merge_thread should operate on.
+ * The inputs are two arrays; the first one stores the range at the front of the
+ * queues stored in the second one. The ranges are sorted in descending
+ * priority order; the metadata from earlier ranges overrules metadata from
+ * later ranges. out_mask is used to return which threads the ranges came from;
+ * bit i is set if ranges[i] started at the same place as the returned range.
+ *
+ * This code is not hardcoded to compare a specific number of threads; it could
+ * be used with any number, just by changing the q_idx enum.
+ *
+ * The "next range" is the one with the earliest start; if two starts are equal,
+ * the highest-priority range is the next to operate on. If a higher-priority
+ * range starts in the middle of the first range, then the first range will be
+ * truncated to end where the higher-priority range starts, and we will operate
+ * on that one next time. In this way, we make sure that each block covered by
+ * some range gets covered by a returned range, and each block covered is
+ * returned using the metadata of the highest-priority range it appears in.
+ *
+ * For example, if the three ranges at the front of the queues were [2,4),
+ * [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
+ * from the third range, [2,4) with the metadata from the first range, and then
+ * [4,5) with the metadata from the second.
+ */
+static struct send_range *
+find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
+{
+ int idx = 0; // index of the range with the earliest start
+ int i;
+ uint64_t bmask = 0;
+ for (i = 1; i < NUM_THREADS; i++) {
+ if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
+ idx = i;
+ }
+ if (ranges[idx]->eos_marker) {
+ struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
+ *out_mask = 0;
+ return (ret);
}
-
- dmu_object_set_checksum(rwa->os, drro->drr_object,
- drro->drr_checksumtype, tx);
- dmu_object_set_compress(rwa->os, drro->drr_object,
- drro->drr_compress, tx);
-
- /* handle more restrictive dnode structuring for raw recvs */
- if (rwa->raw) {
- /*
- * Set the indirect block shift and nlevels. This will not fail
- * because we ensured all of the blocks were free earlier if
- * this is a new object.
- */
- VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
- drro->drr_blksz, drro->drr_indblkshift, tx));
- VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
- drro->drr_nlevels, tx));
- VERIFY0(dmu_object_set_maxblkid(rwa->os, drro->drr_object,
- drro->drr_maxblkid, tx));
+ /*
+ * Find all the ranges that start at that same point.
+ */
+ for (i = 0; i < NUM_THREADS; i++) {
+ if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
+ bmask |= 1 << i;
}
-
- if (data != NULL) {
- dmu_buf_t *db;
- uint32_t flags = DMU_READ_NO_PREFETCH;
-
- if (rwa->raw)
- flags |= DMU_READ_NO_DECRYPT;
-
- VERIFY0(dmu_bonus_hold_impl(rwa->os, drro->drr_object,
- FTAG, flags, &db));
- dmu_buf_will_dirty(db, tx);
-
- ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
- bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
-
- /*
- * Raw bonus buffers have their byteorder determined by the
- * DRR_OBJECT_RANGE record.
- */
- if (rwa->byteswap && !rwa->raw) {
- dmu_object_byteswap_t byteswap =
- DMU_OT_BYTESWAP(drro->drr_bonustype);
- dmu_ot_byteswap[byteswap].ob_func(db->db_data,
- DRR_OBJECT_PAYLOAD_SIZE(drro));
- }
- dmu_buf_rele(db, FTAG);
+ *out_mask = bmask;
+ /*
+ * OBJECT_RANGE records only come from the TO thread, and should always
+ * be treated as overlapping with nothing and sent on immediately. They
+ * are only used in raw sends, and are never redacted.
+ */
+ if (ranges[idx]->type == OBJECT_RANGE) {
+ ASSERT3U(idx, ==, TO_IDX);
+ ASSERT3U(*out_mask, ==, 1 << TO_IDX);
+ struct send_range *ret = ranges[idx];
+ ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
+ return (ret);
}
- dmu_tx_commit(tx);
-
- return (0);
-}
-
-/* ARGSUSED */
-noinline static int
-receive_freeobjects(struct receive_writer_arg *rwa,
- struct drr_freeobjects *drrfo)
-{
- uint64_t obj;
- int next_err = 0;
-
- if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
- return (SET_ERROR(EINVAL));
-
- for (obj = drrfo->drr_firstobj == 0 ? 1 : drrfo->drr_firstobj;
- obj < drrfo->drr_firstobj + drrfo->drr_numobjs && next_err == 0;
- next_err = dmu_object_next(rwa->os, &obj, FALSE, 0)) {
- dmu_object_info_t doi;
- int err;
-
- err = dmu_object_info(rwa->os, obj, &doi);
- if (err == ENOENT)
+ /*
+ * Find the first start or end point after the start of the first range.
+ */
+ uint64_t first_change = ranges[idx]->end_blkid;
+ for (i = 0; i < NUM_THREADS; i++) {
+ if (i == idx || ranges[i]->eos_marker ||
+ ranges[i]->object > ranges[idx]->object ||
+ ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
- else if (err != 0)
- return (err);
-
- err = dmu_free_long_object(rwa->os, obj);
-
- if (err != 0)
- return (err);
-
- if (obj > rwa->max_object)
- rwa->max_object = obj;
+ ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
+ if (first_change > ranges[i]->start_blkid &&
+ (bmask & (1 << i)) == 0)
+ first_change = ranges[i]->start_blkid;
+ else if (first_change > ranges[i]->end_blkid)
+ first_change = ranges[i]->end_blkid;
}
- if (next_err != ESRCH)
- return (next_err);
- return (0);
-}
-
-noinline static int
-receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
- arc_buf_t *abuf)
-{
- int err;
- dmu_tx_t *tx;
- dnode_t *dn;
-
- if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
- !DMU_OT_IS_VALID(drrw->drr_type))
- return (SET_ERROR(EINVAL));
-
/*
- * For resuming to work, records must be in increasing order
- * by (object, offset).
+ * Update all ranges to no longer overlap with the range we're
+ * returning. All such ranges must start at the same place as the range
+ * being returned, and end at or after first_change. Thus we update
+ * their start to first_change. If that makes them size 0, then free
+ * them and pull a new range from that thread.
*/
- if (drrw->drr_object < rwa->last_object ||
- (drrw->drr_object == rwa->last_object &&
- drrw->drr_offset < rwa->last_offset)) {
- return (SET_ERROR(EINVAL));
- }
- rwa->last_object = drrw->drr_object;
- rwa->last_offset = drrw->drr_offset;
-
- if (rwa->last_object > rwa->max_object)
- rwa->max_object = rwa->last_object;
-
- if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
- return (SET_ERROR(EINVAL));
-
- tx = dmu_tx_create(rwa->os);
- dmu_tx_hold_write(tx, drrw->drr_object,
- drrw->drr_offset, drrw->drr_logical_size);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
+ for (i = 0; i < NUM_THREADS; i++) {
+ if (i == idx || (bmask & (1 << i)) == 0)
+ continue;
+ ASSERT3U(first_change, >, ranges[i]->start_blkid);
+ ranges[i]->start_blkid = first_change;
+ ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
+ if (ranges[i]->start_blkid == ranges[i]->end_blkid)
+ ranges[i] = get_next_range(qs[i], ranges[i]);
}
-
- if (rwa->byteswap && !arc_is_encrypted(abuf) &&
- arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
- dmu_object_byteswap_t byteswap =
- DMU_OT_BYTESWAP(drrw->drr_type);
- dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
- DRR_WRITE_PAYLOAD_SIZE(drrw));
+ /*
+ * Short-circuit the simple case; if the range doesn't overlap with
+ * anything else, or it only overlaps with things that start at the same
+ * place and are longer, send it on.
+ */
+ if (first_change == ranges[idx]->end_blkid) {
+ struct send_range *ret = ranges[idx];
+ ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
+ return (ret);
}
- VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
- dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
- dnode_rele(dn, FTAG);
-
/*
- * Note: If the receive fails, we want the resume stream to start
- * with the same record that we last successfully received (as opposed
- * to the next record), so that we can verify that we are
- * resuming from the correct location.
+ * Otherwise, return a truncated copy of ranges[idx] and move the start
+ * of ranges[idx] back to first_change.
*/
- save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
- dmu_tx_commit(tx);
-
- return (0);
+ struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
+ *ret = *ranges[idx];
+ ret->end_blkid = first_change;
+ ranges[idx]->start_blkid = first_change;
+ return (ret);
}
+#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
+
/*
- * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
- * streams to refer to a copy of the data that is already on the
- * system because it came in earlier in the stream. This function
- * finds the earlier copy of the data, and uses that copy instead of
- * data from the stream to fulfill this write.
+ * Merge the results from the from thread and the to thread, and then hand the
+ * records off to send_prefetch_thread to prefetch them. If this is not a
+ * send from a redaction bookmark, the from thread will push an end of stream
+ * record and stop, and we'll just send everything that was changed in the
+ * to_ds since the ancestor's creation txg. If it is, then since
+ * traverse_dataset has a canonical order, we can compare each change as
+ * they're pulled off the queues. That will give us a stream that is
+ * appropriately sorted, and covers all records. In addition, we pull the
+ * data from the redact_list_thread and use that to determine which blocks
+ * should be redacted.
*/
-static int
-receive_write_byref(struct receive_writer_arg *rwa,
- struct drr_write_byref *drrwbr)
+static __attribute__((noreturn)) void
+send_merge_thread(void *arg)
{
- dmu_tx_t *tx;
- int err;
- guid_map_entry_t gmesrch;
- guid_map_entry_t *gmep;
- avl_index_t where;
- objset_t *ref_os = NULL;
- int flags = DMU_READ_PREFETCH;
- dmu_buf_t *dbp;
-
- if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
- return (SET_ERROR(EINVAL));
+ struct send_merge_thread_arg *smt_arg = arg;
+ struct send_range *front_ranges[NUM_THREADS];
+ bqueue_t *queues[NUM_THREADS];
+ int err = 0;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
- /*
- * If the GUID of the referenced dataset is different from the
- * GUID of the target dataset, find the referenced dataset.
- */
- if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
- gmesrch.guid = drrwbr->drr_refguid;
- if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
- &where)) == NULL) {
- return (SET_ERROR(EINVAL));
- }
- if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
- return (SET_ERROR(EINVAL));
+ if (smt_arg->redact_arg == NULL) {
+ front_ranges[REDACT_IDX] =
+ kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
+ front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
+ front_ranges[REDACT_IDX]->type = REDACT;
+ queues[REDACT_IDX] = NULL;
} else {
- ref_os = rwa->os;
- }
-
- if (drrwbr->drr_object > rwa->max_object)
- rwa->max_object = drrwbr->drr_object;
-
- if (rwa->raw)
- flags |= DMU_READ_NO_DECRYPT;
-
- /* may return either a regular db or an encrypted one */
- err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
- drrwbr->drr_refoffset, FTAG, &dbp, flags);
- if (err != 0)
- return (err);
-
- tx = dmu_tx_create(rwa->os);
+ front_ranges[REDACT_IDX] =
+ bqueue_dequeue(&smt_arg->redact_arg->q);
+ queues[REDACT_IDX] = &smt_arg->redact_arg->q;
+ }
+ front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
+ queues[TO_IDX] = &smt_arg->to_arg->q;
+ front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
+ queues[FROM_IDX] = &smt_arg->from_arg->q;
+ uint64_t mask = 0;
+ struct send_range *range;
+ for (range = find_next_range(front_ranges, queues, &mask);
+ !range->eos_marker && err == 0 && !smt_arg->cancel;
+ range = find_next_range(front_ranges, queues, &mask)) {
+ /*
+ * If the range in question was in both the from redact bookmark
+ * and the bookmark we're using to redact, then don't send it.
+ * It's already redacted on the receiving system, so a redaction
+ * record would be redundant.
+ */
+ if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
+ ASSERT3U(range->type, ==, REDACT);
+ range_free(range);
+ continue;
+ }
+ bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
- dmu_tx_hold_write(tx, drrwbr->drr_object,
- drrwbr->drr_offset, drrwbr->drr_length);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
+ if (smt_arg->to_arg->error_code != 0) {
+ err = smt_arg->to_arg->error_code;
+ } else if (smt_arg->from_arg->error_code != 0) {
+ err = smt_arg->from_arg->error_code;
+ } else if (smt_arg->redact_arg != NULL &&
+ smt_arg->redact_arg->error_code != 0) {
+ err = smt_arg->redact_arg->error_code;
+ }
}
-
- if (rwa->raw) {
- dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
- drrwbr->drr_offset, dbp, tx);
- } else {
- dmu_write(rwa->os, drrwbr->drr_object,
- drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
+ if (smt_arg->cancel && err == 0)
+ err = SET_ERROR(EINTR);
+ smt_arg->error = err;
+ if (smt_arg->error != 0) {
+ smt_arg->to_arg->cancel = B_TRUE;
+ smt_arg->from_arg->cancel = B_TRUE;
+ if (smt_arg->redact_arg != NULL)
+ smt_arg->redact_arg->cancel = B_TRUE;
}
- dmu_buf_rele(dbp, FTAG);
-
- /* See comment in restore_write. */
- save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
- dmu_tx_commit(tx);
- return (0);
+ for (int i = 0; i < NUM_THREADS; i++) {
+ while (!front_ranges[i]->eos_marker) {
+ front_ranges[i] = get_next_range(queues[i],
+ front_ranges[i]);
+ }
+ range_free(front_ranges[i]);
+ }
+ range->eos_marker = B_TRUE;
+ bqueue_enqueue_flush(&smt_arg->q, range, 1);
+ spl_fstrans_unmark(cookie);
+ thread_exit();
}
-static int
-receive_write_embedded(struct receive_writer_arg *rwa,
- struct drr_write_embedded *drrwe, void *data)
-{
- dmu_tx_t *tx;
- int err;
-
- if (drrwe->drr_offset + drrwe->drr_length < drrwe->drr_offset)
- return (SET_ERROR(EINVAL));
-
- if (drrwe->drr_psize > BPE_PAYLOAD_SIZE)
- return (SET_ERROR(EINVAL));
-
- if (drrwe->drr_etype >= NUM_BP_EMBEDDED_TYPES)
- return (SET_ERROR(EINVAL));
- if (drrwe->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
- return (SET_ERROR(EINVAL));
- if (rwa->raw)
- return (SET_ERROR(EINVAL));
-
- if (drrwe->drr_object > rwa->max_object)
- rwa->max_object = drrwe->drr_object;
+struct send_reader_thread_arg {
+ struct send_merge_thread_arg *smta;
+ bqueue_t q;
+ boolean_t cancel;
+ boolean_t issue_reads;
+ uint64_t featureflags;
+ int error;
+};
- tx = dmu_tx_create(rwa->os);
+static void
+dmu_send_read_done(zio_t *zio)
+{
+ struct send_range *range = zio->io_private;
- dmu_tx_hold_write(tx, drrwe->drr_object,
- drrwe->drr_offset, drrwe->drr_length);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
- return (err);
+ mutex_enter(&range->sru.data.lock);
+ if (zio->io_error != 0) {
+ abd_free(range->sru.data.abd);
+ range->sru.data.abd = NULL;
+ range->sru.data.io_err = zio->io_error;
}
- dmu_write_embedded(rwa->os, drrwe->drr_object,
- drrwe->drr_offset, data, drrwe->drr_etype,
- drrwe->drr_compression, drrwe->drr_lsize, drrwe->drr_psize,
- rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
-
- /* See comment in restore_write. */
- save_resume_state(rwa, drrwe->drr_object, drrwe->drr_offset, tx);
- dmu_tx_commit(tx);
- return (0);
+ ASSERT(range->sru.data.io_outstanding);
+ range->sru.data.io_outstanding = B_FALSE;
+ cv_broadcast(&range->sru.data.cv);
+ mutex_exit(&range->sru.data.lock);
}
-static int
-receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
- arc_buf_t *abuf)
+static void
+issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
- dmu_tx_t *tx;
- dmu_buf_t *db, *db_spill;
- int err;
- uint32_t flags = 0;
-
- if (drrs->drr_length < SPA_MINBLOCKSIZE ||
- drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
- return (SET_ERROR(EINVAL));
-
- if (rwa->raw) {
- if (!DMU_OT_IS_VALID(drrs->drr_type) ||
- drrs->drr_compressiontype >= ZIO_COMPRESS_FUNCTIONS ||
- drrs->drr_compressed_size == 0)
- return (SET_ERROR(EINVAL));
-
- flags |= DMU_READ_NO_DECRYPT;
- }
+ struct srd *srdp = &range->sru.data;
+ blkptr_t *bp = &srdp->bp;
+ objset_t *os = srta->smta->os;
- if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
- return (SET_ERROR(EINVAL));
+ ASSERT3U(range->type, ==, DATA);
+ ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
+ /*
+ * If we have large blocks stored on disk but
+ * the send flags don't allow us to send large
+ * blocks, we split the data from the arc buf
+ * into chunks.
+ */
+ boolean_t split_large_blocks =
+ srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
+ !(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
+ /*
+ * We should only request compressed data from the ARC if all
+ * the following are true:
+ * - stream compression was requested
+ * - we aren't splitting large blocks into smaller chunks
+ * - the data won't need to be byteswapped before sending
+ * - this isn't an embedded block
+ * - this isn't metadata (if receiving on a different endian
+ * system it can be byteswapped more easily)
+ */
+ boolean_t request_compressed =
+ (srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
+ !split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
+ !BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
- if (drrs->drr_object > rwa->max_object)
- rwa->max_object = drrs->drr_object;
+ zio_flag_t zioflags = ZIO_FLAG_CANFAIL;
- VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
- if ((err = dmu_spill_hold_by_bonus(db, DMU_READ_NO_DECRYPT, FTAG,
- &db_spill)) != 0) {
- dmu_buf_rele(db, FTAG);
- return (err);
+ if (srta->featureflags & DMU_BACKUP_FEATURE_RAW) {
+ zioflags |= ZIO_FLAG_RAW;
+ srdp->io_compressed = B_TRUE;
+ } else if (request_compressed) {
+ zioflags |= ZIO_FLAG_RAW_COMPRESS;
+ srdp->io_compressed = B_TRUE;
}
- tx = dmu_tx_create(rwa->os);
+ srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
+ BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
- dmu_tx_hold_spill(tx, db->db_object);
+ if (!srta->issue_reads)
+ return;
+ if (BP_IS_REDACTED(bp))
+ return;
+ if (send_do_embed(bp, srta->featureflags))
+ return;
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_buf_rele(db, FTAG);
- dmu_buf_rele(db_spill, FTAG);
- dmu_tx_abort(tx);
- return (err);
- }
+ zbookmark_phys_t zb = {
+ .zb_objset = dmu_objset_id(os),
+ .zb_object = range->object,
+ .zb_level = 0,
+ .zb_blkid = range->start_blkid,
+ };
- if (db_spill->db_size < drrs->drr_length)
- VERIFY(0 == dbuf_spill_set_blksz(db_spill,
- drrs->drr_length, tx));
+ arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
- if (rwa->byteswap && !arc_is_encrypted(abuf) &&
- arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
- dmu_object_byteswap_t byteswap =
- DMU_OT_BYTESWAP(drrs->drr_type);
- dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
- DRR_SPILL_PAYLOAD_SIZE(drrs));
+ int arc_err = arc_read(NULL, os->os_spa, bp,
+ arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
+ zioflags, &aflags, &zb);
+ /*
+ * If the data is not already cached in the ARC, we read directly
+ * from zio. This avoids the performance overhead of adding a new
+ * entry to the ARC, and we also avoid polluting the ARC cache with
+ * data that is not likely to be used in the future.
+ */
+ if (arc_err != 0) {
+ srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
+ srdp->io_outstanding = B_TRUE;
+ zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
+ srdp->datasz, dmu_send_read_done, range,
+ ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
-
- dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
-
- dmu_buf_rele(db, FTAG);
- dmu_buf_rele(db_spill, FTAG);
-
- dmu_tx_commit(tx);
- return (0);
}
-/* ARGSUSED */
-noinline static int
-receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
+/*
+ * Create a new record with the given values.
+ */
+static void
+enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
+ uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
- int err;
-
- if (drrf->drr_length != DMU_OBJECT_END &&
- drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
- return (SET_ERROR(EINVAL));
+ enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
+ (BP_IS_REDACTED(bp) ? REDACT : DATA));
- if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
- return (SET_ERROR(EINVAL));
-
- if (drrf->drr_object > rwa->max_object)
- rwa->max_object = drrf->drr_object;
+ struct send_range *range = range_alloc(range_type, dn->dn_object,
+ blkid, blkid + count, B_FALSE);
- err = dmu_free_long_range(rwa->os, drrf->drr_object,
- drrf->drr_offset, drrf->drr_length);
+ if (blkid == DMU_SPILL_BLKID) {
+ ASSERT3P(bp, !=, NULL);
+ ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
+ }
- return (err);
+ switch (range_type) {
+ case HOLE:
+ range->sru.hole.datablksz = datablksz;
+ break;
+ case DATA:
+ ASSERT3U(count, ==, 1);
+ range->sru.data.datablksz = datablksz;
+ range->sru.data.obj_type = dn->dn_type;
+ range->sru.data.bp = *bp;
+ issue_data_read(srta, range);
+ break;
+ case REDACT:
+ range->sru.redact.datablksz = datablksz;
+ break;
+ default:
+ break;
+ }
+ bqueue_enqueue(q, range, datablksz);
}
-static int
-receive_object_range(struct receive_writer_arg *rwa,
- struct drr_object_range *drror)
+/*
+ * This thread is responsible for two things: First, it retrieves the correct
+ * blkptr in the to ds if we need to send the data because of something from
+ * the from thread. As a result of this, we're the first ones to discover that
+ * some indirect blocks can be discarded because they're not holes. Second,
+ * it issues prefetches for the data we need to send.
+ */
+static __attribute__((noreturn)) void
+send_reader_thread(void *arg)
{
- /*
- * By default, we assume this block is in our native format
- * (ZFS_HOST_BYTEORDER). We then take into account whether
- * the send stream is byteswapped (rwa->byteswap). Finally,
- * we need to byteswap again if this particular block was
- * in non-native format on the send side.
- */
- boolean_t byteorder = ZFS_HOST_BYTEORDER ^ rwa->byteswap ^
- !!DRR_IS_RAW_BYTESWAPPED(drror->drr_flags);
+ struct send_reader_thread_arg *srta = arg;
+ struct send_merge_thread_arg *smta = srta->smta;
+ bqueue_t *inq = &smta->q;
+ bqueue_t *outq = &srta->q;
+ objset_t *os = smta->os;
+ fstrans_cookie_t cookie = spl_fstrans_mark();
+ struct send_range *range = bqueue_dequeue(inq);
+ int err = 0;
/*
- * Since dnode block sizes are constant, we should not need to worry
- * about making sure that the dnode block size is the same on the
- * sending and receiving sides for the time being. For non-raw sends,
- * this does not matter (and in fact we do not send a DRR_OBJECT_RANGE
- * record at all). Raw sends require this record type because the
- * encryption parameters are used to protect an entire block of bonus
- * buffers. If the size of dnode blocks ever becomes variable,
- * handling will need to be added to ensure that dnode block sizes
- * match on the sending and receiving side.
+ * If the record we're analyzing is from a redaction bookmark from the
+ * fromds, then we need to know whether or not it exists in the tods so
+ * we know whether to create records for it or not. If it does, we need
+ * the datablksz so we can generate an appropriate record for it.
+ * Finally, if it isn't redacted, we need the blkptr so that we can send
+ * a WRITE record containing the actual data.
*/
- if (drror->drr_numslots != DNODES_PER_BLOCK ||
- P2PHASE(drror->drr_firstobj, DNODES_PER_BLOCK) != 0 ||
- !rwa->raw)
- return (SET_ERROR(EINVAL));
-
- if (drror->drr_firstobj > rwa->max_object)
- rwa->max_object = drror->drr_firstobj;
+ uint64_t last_obj = UINT64_MAX;
+ uint64_t last_obj_exists = B_TRUE;
+ while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
+ err == 0) {
+ switch (range->type) {
+ case DATA:
+ issue_data_read(srta, range);
+ bqueue_enqueue(outq, range, range->sru.data.datablksz);
+ range = get_next_range_nofree(inq, range);
+ break;
+ case HOLE:
+ case OBJECT:
+ case OBJECT_RANGE:
+ case REDACT: // Redacted blocks must exist
+ bqueue_enqueue(outq, range, sizeof (*range));
+ range = get_next_range_nofree(inq, range);
+ break;
+ case PREVIOUSLY_REDACTED: {
+ /*
+ * This entry came from the "from bookmark" when
+ * sending from a bookmark that has a redaction
+ * list. We need to check if this object/blkid
+ * exists in the target ("to") dataset, and if
+ * not then we drop this entry. We also need
+ * to fill in the block pointer so that we know
+ * what to prefetch.
+ *
+ * To accomplish the above, we first cache whether or
+ * not the last object we examined exists. If it
+ * doesn't, we can drop this record. If it does, we hold
+ * the dnode and use it to call dbuf_dnode_findbp. We do
+ * this instead of dbuf_bookmark_findbp because we will
+ * often operate on large ranges, and holding the dnode
+ * once is more efficient.
+ */
+ boolean_t object_exists = B_TRUE;
+ /*
+ * If the data is redacted, we only care if it exists,
+ * so that we don't send records for objects that have
+ * been deleted.
+ */
+ dnode_t *dn;
+ if (range->object == last_obj && !last_obj_exists) {
+ /*
+ * If we're still examining the same object as
+ * previously, and it doesn't exist, we don't
+ * need to call dbuf_bookmark_findbp.
+ */
+ object_exists = B_FALSE;
+ } else {
+ err = dnode_hold(os, range->object, FTAG, &dn);
+ if (err == ENOENT) {
+ object_exists = B_FALSE;
+ err = 0;
+ }
+ last_obj = range->object;
+ last_obj_exists = object_exists;
+ }
- /*
- * The DRR_OBJECT_RANGE handling must be deferred to receive_object()
- * so that the block of dnodes is not written out when it's empty,
- * and converted to a HOLE BP.
- */
- rwa->or_crypt_params_present = B_TRUE;
- rwa->or_firstobj = drror->drr_firstobj;
- rwa->or_numslots = drror->drr_numslots;
- bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
- bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
- bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
- rwa->or_byteorder = byteorder;
+ if (err != 0) {
+ break;
+ } else if (!object_exists) {
+ /*
+ * The block was modified, but doesn't
+ * exist in the to dataset; if it was
+ * deleted in the to dataset, then we'll
+ * visit the hole bp for it at some point.
+ */
+ range = get_next_range(inq, range);
+ continue;
+ }
+ uint64_t file_max =
+ MIN(dn->dn_maxblkid, range->end_blkid);
+ /*
+ * The object exists, so we need to try to find the
+ * blkptr for each block in the range we're processing.
+ */
+ rw_enter(&dn->dn_struct_rwlock, RW_READER);
+ for (uint64_t blkid = range->start_blkid;
+ blkid < file_max; blkid++) {
+ blkptr_t bp;
+ uint32_t datablksz =
+ dn->dn_phys->dn_datablkszsec <<
+ SPA_MINBLOCKSHIFT;
+ uint64_t offset = blkid * datablksz;
+ /*
+ * This call finds the next non-hole block in
+ * the object. This is to prevent a
+ * performance problem where we're unredacting
+ * a large hole. Using dnode_next_offset to
+ * skip over the large hole avoids iterating
+ * over every block in it.
+ */
+ err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
+ &offset, 1, 1, 0);
+ if (err == ESRCH) {
+ offset = UINT64_MAX;
+ err = 0;
+ } else if (err != 0) {
+ break;
+ }
+ if (offset != blkid * datablksz) {
+ /*
+ * if there is a hole from here
+ * (blkid) to offset
+ */
+ offset = MIN(offset, file_max *
+ datablksz);
+ uint64_t nblks = (offset / datablksz) -
+ blkid;
+ enqueue_range(srta, outq, dn, blkid,
+ nblks, NULL, datablksz);
+ blkid += nblks;
+ }
+ if (blkid >= file_max)
+ break;
+ err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
+ NULL, NULL);
+ if (err != 0)
+ break;
+ ASSERT(!BP_IS_HOLE(&bp));
+ enqueue_range(srta, outq, dn, blkid, 1, &bp,
+ datablksz);
+ }
+ rw_exit(&dn->dn_struct_rwlock);
+ dnode_rele(dn, FTAG);
+ range = get_next_range(inq, range);
+ }
+ }
+ }
+ if (srta->cancel || err != 0) {
+ smta->cancel = B_TRUE;
+ srta->error = err;
+ } else if (smta->error != 0) {
+ srta->error = smta->error;
+ }
+ while (!range->eos_marker)
+ range = get_next_range(inq, range);
- return (0);
+ bqueue_enqueue_flush(outq, range, 1);
+ spl_fstrans_unmark(cookie);
+ thread_exit();
}
-/* used to destroy the drc_ds on error */
-static void
-dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
-{
- dsl_dataset_t *ds = drc->drc_ds;
- ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
-
- /*
- * Wait for the txg sync before cleaning up the receive. For
- * resumable receives, this ensures that our resume state has
- * been written out to disk. For raw receives, this ensures
- * that the user accounting code will not attempt to do anything
- * after we stopped receiving the dataset.
- */
- txg_wait_synced(ds->ds_dir->dd_pool, 0);
- ds->ds_objset->os_raw_receive = B_FALSE;
-
- rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
- if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
- rrw_exit(&ds->ds_bp_rwlock, FTAG);
- dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
- } else {
- char name[ZFS_MAX_DATASET_NAME_LEN];
- rrw_exit(&ds->ds_bp_rwlock, FTAG);
- dsl_dataset_name(ds, name);
- dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
- (void) dsl_destroy_head(name);
- }
-}
+#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
-static void
-receive_cksum(struct receive_arg *ra, int len, void *buf)
-{
- if (ra->byteswap) {
- (void) fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
- } else {
- (void) fletcher_4_incremental_native(buf, len, &ra->cksum);
- }
-}
+struct dmu_send_params {
+ /* Pool args */
+ const void *tag; // Tag dp was held with, will be used to release dp.
+ dsl_pool_t *dp;
+ /* To snapshot args */
+ const char *tosnap;
+ dsl_dataset_t *to_ds;
+ /* From snapshot args */
+ zfs_bookmark_phys_t ancestor_zb;
+ uint64_t *fromredactsnaps;
+ /* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
+ uint64_t numfromredactsnaps;
+ /* Stream params */
+ boolean_t is_clone;
+ boolean_t embedok;
+ boolean_t large_block_ok;
+ boolean_t compressok;
+ boolean_t rawok;
+ boolean_t savedok;
+ uint64_t resumeobj;
+ uint64_t resumeoff;
+ uint64_t saved_guid;
+ zfs_bookmark_phys_t *redactbook;
+ /* Stream output params */
+ dmu_send_outparams_t *dso;
+
+ /* Stream progress params */
+ offset_t *off;
+ int outfd;
+ char saved_toname[MAXNAMELEN];
+};
-/*
- * Read the payload into a buffer of size len, and update the current record's
- * payload field.
- * Allocate ra->next_rrd and read the next record's header into
- * ra->next_rrd->header.
- * Verify checksum of payload and next record.
- */
static int
-receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
+setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
+ uint64_t *featureflags)
{
- int err;
- zio_cksum_t cksum_orig;
- zio_cksum_t *cksump;
+ dsl_dataset_t *to_ds = dspp->to_ds;
+ dsl_pool_t *dp = dspp->dp;
- if (len != 0) {
- ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
- err = receive_read(ra, len, buf);
- if (err != 0)
- return (err);
- receive_cksum(ra, len, buf);
+ if (dmu_objset_type(os) == DMU_OST_ZFS) {
+ uint64_t version;
+ if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
+ return (SET_ERROR(EINVAL));
- /* note: rrd is NULL when reading the begin record's payload */
- if (ra->rrd != NULL) {
- ra->rrd->payload = buf;
- ra->rrd->payload_size = len;
- ra->rrd->bytes_read = ra->bytes_read;
- }
+ if (version >= ZPL_VERSION_SA)
+ *featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
- ra->prev_cksum = ra->cksum;
-
- ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
- err = receive_read(ra, sizeof (ra->next_rrd->header),
- &ra->next_rrd->header);
- ra->next_rrd->bytes_read = ra->bytes_read;
+ /* raw sends imply large_block_ok */
+ if ((dspp->rawok || dspp->large_block_ok) &&
+ dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
+ *featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
+ }
- if (err != 0) {
- kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
- ra->next_rrd = NULL;
- return (err);
+ /* encrypted datasets will not have embedded blocks */
+ if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
+ spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
+ *featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
- if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
- kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
- ra->next_rrd = NULL;
- return (SET_ERROR(EINVAL));
+
+ /* raw send implies compressok */
+ if (dspp->compressok || dspp->rawok)
+ *featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
+
+ if (dspp->rawok && os->os_encrypted)
+ *featureflags |= DMU_BACKUP_FEATURE_RAW;
+
+ if ((*featureflags &
+ (DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
+ DMU_BACKUP_FEATURE_RAW)) != 0 &&
+ spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
+ *featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
- * Note: checksum is of everything up to but not including the
- * checksum itself.
+ * We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
+ * allow sending ZSTD compressed datasets to a receiver that does not
+ * support ZSTD
*/
- ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
- ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
- receive_cksum(ra,
- offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
- &ra->next_rrd->header);
+ if ((*featureflags &
+ (DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
+ dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
+ *featureflags |= DMU_BACKUP_FEATURE_ZSTD;
+ }
- cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
- cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
+ if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
+ *featureflags |= DMU_BACKUP_FEATURE_RESUMING;
+ }
- if (ra->byteswap)
- byteswap_record(&ra->next_rrd->header);
+ if (dspp->redactbook != NULL) {
+ *featureflags |= DMU_BACKUP_FEATURE_REDACTED;
+ }
- if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
- !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
- kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
- ra->next_rrd = NULL;
- return (SET_ERROR(ECKSUM));
+ if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
+ *featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
+ return (0);
+}
+
+static dmu_replay_record_t *
+create_begin_record(struct dmu_send_params *dspp, objset_t *os,
+ uint64_t featureflags)
+{
+ dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
+ KM_SLEEP);
+ drr->drr_type = DRR_BEGIN;
- receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
+ struct drr_begin *drrb = &drr->drr_u.drr_begin;
+ dsl_dataset_t *to_ds = dspp->to_ds;
- return (0);
+ drrb->drr_magic = DMU_BACKUP_MAGIC;
+ drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
+ drrb->drr_type = dmu_objset_type(os);
+ drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
+ drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
+
+ DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
+ DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
+
+ if (dspp->is_clone)
+ drrb->drr_flags |= DRR_FLAG_CLONE;
+ if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
+ drrb->drr_flags |= DRR_FLAG_CI_DATA;
+ if (zfs_send_set_freerecords_bit)
+ drrb->drr_flags |= DRR_FLAG_FREERECORDS;
+ drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
+
+ if (dspp->savedok) {
+ drrb->drr_toguid = dspp->saved_guid;
+ strlcpy(drrb->drr_toname, dspp->saved_toname,
+ sizeof (drrb->drr_toname));
+ } else {
+ dsl_dataset_name(to_ds, drrb->drr_toname);
+ if (!to_ds->ds_is_snapshot) {
+ (void) strlcat(drrb->drr_toname, "@--head--",
+ sizeof (drrb->drr_toname));
+ }
+ }
+ return (drr);
}
static void
-objlist_create(struct objlist *list)
+setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
+ dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
- list_create(&list->list, sizeof (struct receive_objnode),
- offsetof(struct receive_objnode, node));
- list->last_lookup = 0;
+ VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
+ MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
+ offsetof(struct send_range, ln)));
+ to_arg->error_code = 0;
+ to_arg->cancel = B_FALSE;
+ to_arg->os = to_os;
+ to_arg->fromtxg = fromtxg;
+ to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
+ if (rawok)
+ to_arg->flags |= TRAVERSE_NO_DECRYPT;
+ if (zfs_send_corrupt_data)
+ to_arg->flags |= TRAVERSE_HARD;
+ to_arg->num_blocks_visited = &dssp->dss_blocks;
+ (void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
+ curproc, TS_RUN, minclsyspri);
}
static void
-objlist_destroy(struct objlist *list)
+setup_from_thread(struct redact_list_thread_arg *from_arg,
+ redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
- for (struct receive_objnode *n = list_remove_head(&list->list);
- n != NULL; n = list_remove_head(&list->list)) {
- kmem_free(n, sizeof (*n));
- }
- list_destroy(&list->list);
+ VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
+ MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
+ offsetof(struct send_range, ln)));
+ from_arg->error_code = 0;
+ from_arg->cancel = B_FALSE;
+ from_arg->rl = from_rl;
+ from_arg->mark_redact = B_FALSE;
+ from_arg->num_blocks_visited = &dssp->dss_blocks;
+ /*
+ * If from_ds is null, send_traverse_thread just returns success and
+ * enqueues an eos marker.
+ */
+ (void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
+ curproc, TS_RUN, minclsyspri);
}
-/*
- * This function looks through the objlist to see if the specified object number
- * is contained in the objlist. In the process, it will remove all object
- * numbers in the list that are smaller than the specified object number. Thus,
- * any lookup of an object number smaller than a previously looked up object
- * number will always return false; therefore, all lookups should be done in
- * ascending order.
- */
-static boolean_t
-objlist_exists(struct objlist *list, uint64_t object)
+static void
+setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
+ struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
- struct receive_objnode *node = list_head(&list->list);
- ASSERT3U(object, >=, list->last_lookup);
- list->last_lookup = object;
- while (node != NULL && node->object < object) {
- VERIFY3P(node, ==, list_remove_head(&list->list));
- kmem_free(node, sizeof (*node));
- node = list_head(&list->list);
- }
- return (node != NULL && node->object == object);
+ if (dspp->redactbook == NULL)
+ return;
+
+ rlt_arg->cancel = B_FALSE;
+ VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
+ MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
+ offsetof(struct send_range, ln)));
+ rlt_arg->error_code = 0;
+ rlt_arg->mark_redact = B_TRUE;
+ rlt_arg->rl = rl;
+ rlt_arg->num_blocks_visited = &dssp->dss_blocks;
+
+ (void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
+ curproc, TS_RUN, minclsyspri);
}
-/*
- * The objlist is a list of object numbers stored in ascending order. However,
- * the insertion of new object numbers does not seek out the correct location to
- * store a new object number; instead, it appends it to the list for simplicity.
- * Thus, any users must take care to only insert new object numbers in ascending
- * order.
- */
static void
-objlist_insert(struct objlist *list, uint64_t object)
+setup_merge_thread(struct send_merge_thread_arg *smt_arg,
+ struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
+ struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
+ objset_t *os)
{
- struct receive_objnode *node = kmem_zalloc(sizeof (*node), KM_SLEEP);
- node->object = object;
-#ifdef ZFS_DEBUG
- {
- struct receive_objnode *last_object = list_tail(&list->list);
- uint64_t last_objnum = (last_object != NULL ? last_object->object : 0);
- ASSERT3U(node->object, >, last_objnum);
- }
-#endif
- list_insert_tail(&list->list, node);
+ VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
+ MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
+ offsetof(struct send_range, ln)));
+ smt_arg->cancel = B_FALSE;
+ smt_arg->error = 0;
+ smt_arg->from_arg = from_arg;
+ smt_arg->to_arg = to_arg;
+ if (dspp->redactbook != NULL)
+ smt_arg->redact_arg = rlt_arg;
+
+ smt_arg->os = os;
+ (void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
+ TS_RUN, minclsyspri);
}
-/*
- * Issue the prefetch reads for any necessary indirect blocks.
- *
- * We use the object ignore list to tell us whether or not to issue prefetches
- * for a given object. We do this for both correctness (in case the blocksize
- * of an object has changed) and performance (if the object doesn't exist, don't
- * needlessly try to issue prefetches). We also trim the list as we go through
- * the stream to prevent it from growing to an unbounded size.
- *
- * The object numbers within will always be in sorted order, and any write
- * records we see will also be in sorted order, but they're not sorted with
- * respect to each other (i.e. we can get several object records before
- * receiving each object's write records). As a result, once we've reached a
- * given object number, we can safely remove any reference to lower object
- * numbers in the ignore list. In practice, we receive up to 32 object records
- * before receiving write records, so the list can have up to 32 nodes in it.
- */
-/* ARGSUSED */
static void
-receive_read_prefetch(struct receive_arg *ra,
- uint64_t object, uint64_t offset, uint64_t length)
+setup_reader_thread(struct send_reader_thread_arg *srt_arg,
+ struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
+ uint64_t featureflags)
{
- if (!objlist_exists(&ra->ignore_objlist, object)) {
- dmu_prefetch(ra->os, object, 1, offset, length,
- ZIO_PRIORITY_SYNC_READ);
- }
+ VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
+ MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
+ offsetof(struct send_range, ln)));
+ srt_arg->smta = smt_arg;
+ srt_arg->issue_reads = !dspp->dso->dso_dryrun;
+ srt_arg->featureflags = featureflags;
+ (void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
+ curproc, TS_RUN, minclsyspri);
}
-/*
- * Read records off the stream, issuing any necessary prefetches.
- */
static int
-receive_read_record(struct receive_arg *ra)
+setup_resume_points(struct dmu_send_params *dspp,
+ struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
+ struct redact_list_thread_arg *rlt_arg,
+ struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
+ redaction_list_t *redact_rl, nvlist_t *nvl)
{
- int err;
-
- switch (ra->rrd->header.drr_type) {
- case DRR_OBJECT:
- {
- struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
- uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
- void *buf = kmem_zalloc(size, KM_SLEEP);
- dmu_object_info_t doi;
+ (void) smt_arg;
+ dsl_dataset_t *to_ds = dspp->to_ds;
+ int err = 0;
- err = receive_read_payload_and_next_header(ra, size, buf);
- if (err != 0) {
- kmem_free(buf, size);
+ uint64_t obj = 0;
+ uint64_t blkid = 0;
+ if (resuming) {
+ obj = dspp->resumeobj;
+ dmu_object_info_t to_doi;
+ err = dmu_object_info(os, obj, &to_doi);
+ if (err != 0)
return (err);
- }
- err = dmu_object_info(ra->os, drro->drr_object, &doi);
- /*
- * See receive_read_prefetch for an explanation why we're
- * storing this object in the ignore_obj_list.
- */
- if (err == ENOENT || err == EEXIST ||
- (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
- objlist_insert(&ra->ignore_objlist, drro->drr_object);
- err = 0;
- }
- return (err);
- }
- case DRR_FREEOBJECTS:
- {
- err = receive_read_payload_and_next_header(ra, 0, NULL);
- return (err);
- }
- case DRR_WRITE:
- {
- struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
- arc_buf_t *abuf;
- boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
-
- if (ra->raw) {
- boolean_t byteorder = ZFS_HOST_BYTEORDER ^
- !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
- ra->byteswap;
-
- abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
- drrw->drr_object, byteorder, drrw->drr_salt,
- drrw->drr_iv, drrw->drr_mac, drrw->drr_type,
- drrw->drr_compressed_size, drrw->drr_logical_size,
- drrw->drr_compressiontype);
- } else if (DRR_WRITE_COMPRESSED(drrw)) {
- ASSERT3U(drrw->drr_compressed_size, >, 0);
- ASSERT3U(drrw->drr_logical_size, >=,
- drrw->drr_compressed_size);
- ASSERT(!is_meta);
- abuf = arc_loan_compressed_buf(
- dmu_objset_spa(ra->os),
- drrw->drr_compressed_size, drrw->drr_logical_size,
- drrw->drr_compressiontype);
- } else {
- abuf = arc_loan_buf(dmu_objset_spa(ra->os),
- is_meta, drrw->drr_logical_size);
- }
- err = receive_read_payload_and_next_header(ra,
- DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
- if (err != 0) {
- dmu_return_arcbuf(abuf);
- return (err);
- }
- ra->rrd->arc_buf = abuf;
- receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
- drrw->drr_logical_size);
- return (err);
+ blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref *drrwb =
- &ra->rrd->header.drr_u.drr_write_byref;
- err = receive_read_payload_and_next_header(ra, 0, NULL);
- receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
- drrwb->drr_length);
- return (err);
+ /*
+ * If we're resuming a redacted send, we can skip to the appropriate
+ * point in the redaction bookmark by binary searching through it.
+ */
+ if (redact_rl != NULL) {
+ SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
- case DRR_WRITE_EMBEDDED:
- {
- struct drr_write_embedded *drrwe =
- &ra->rrd->header.drr_u.drr_write_embedded;
- uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
- void *buf = kmem_zalloc(size, KM_SLEEP);
-
- err = receive_read_payload_and_next_header(ra, size, buf);
- if (err != 0) {
- kmem_free(buf, size);
- return (err);
- }
- receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
- drrwe->drr_length);
- return (err);
- }
- case DRR_FREE:
- {
+ SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
+ if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
+ uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
- * It might be beneficial to prefetch indirect blocks here, but
- * we don't really have the data to decide for sure.
+ * Note: If the resume point is in an object whose
+ * blocksize is different in the from vs to snapshots,
+ * we will have divided by the "wrong" blocksize.
+ * However, in this case fromsnap's send_cb() will
+ * detect that the blocksize has changed and therefore
+ * ignore this object.
+ *
+ * If we're resuming a send from a redaction bookmark,
+ * we still cannot accidentally suggest blocks behind
+ * the to_ds. In addition, we know that any blocks in
+ * the object in the to_ds will have to be sent, since
+ * the size changed. Therefore, we can't cause any harm
+ * this way either.
*/
- err = receive_read_payload_and_next_header(ra, 0, NULL);
- return (err);
- }
- case DRR_END:
- {
- struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
- if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
- return (SET_ERROR(ECKSUM));
- return (0);
- }
- case DRR_SPILL:
- {
- struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
- arc_buf_t *abuf;
- int len = DRR_SPILL_PAYLOAD_SIZE(drrs);
-
- /* DRR_SPILL records are either raw or uncompressed */
- if (ra->raw) {
- boolean_t byteorder = ZFS_HOST_BYTEORDER ^
- !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
- ra->byteswap;
-
- abuf = arc_loan_raw_buf(dmu_objset_spa(ra->os),
- dmu_objset_id(ra->os), byteorder, drrs->drr_salt,
- drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
- drrs->drr_compressed_size, drrs->drr_length,
- drrs->drr_compressiontype);
- } else {
- abuf = arc_loan_buf(dmu_objset_spa(ra->os),
- DMU_OT_IS_METADATA(drrs->drr_type),
- drrs->drr_length);
- }
-
- err = receive_read_payload_and_next_header(ra, len,
- abuf->b_data);
- if (err != 0) {
- dmu_return_arcbuf(abuf);
- return (err);
- }
- ra->rrd->arc_buf = abuf;
- return (err);
+ SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
- case DRR_OBJECT_RANGE:
- {
- err = receive_read_payload_and_next_header(ra, 0, NULL);
- return (err);
- }
- default:
- return (SET_ERROR(EINVAL));
+ if (resuming) {
+ fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
+ fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
+ return (0);
}
-static void
-dprintf_drr(struct receive_record_arg *rrd, int err)
+static dmu_sendstatus_t *
+setup_send_progress(struct dmu_send_params *dspp)
{
-#ifdef ZFS_DEBUG
- switch (rrd->header.drr_type) {
- case DRR_OBJECT:
- {
- struct drr_object *drro = &rrd->header.drr_u.drr_object;
- dprintf("drr_type = OBJECT obj = %llu type = %u "
- "bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
- "compress = %u dn_slots = %u err = %d\n",
- drro->drr_object, drro->drr_type, drro->drr_bonustype,
- drro->drr_blksz, drro->drr_bonuslen,
- drro->drr_checksumtype, drro->drr_compress,
- drro->drr_dn_slots, err);
- break;
- }
- case DRR_FREEOBJECTS:
- {
- struct drr_freeobjects *drrfo =
- &rrd->header.drr_u.drr_freeobjects;
- dprintf("drr_type = FREEOBJECTS firstobj = %llu "
- "numobjs = %llu err = %d\n",
- drrfo->drr_firstobj, drrfo->drr_numobjs, err);
- break;
- }
- case DRR_WRITE:
- {
- struct drr_write *drrw = &rrd->header.drr_u.drr_write;
- dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
- "lsize = %llu cksumtype = %u cksumflags = %u "
- "compress = %u psize = %llu err = %d\n",
- drrw->drr_object, drrw->drr_type, drrw->drr_offset,
- drrw->drr_logical_size, drrw->drr_checksumtype,
- drrw->drr_flags, drrw->drr_compressiontype,
- drrw->drr_compressed_size, err);
- break;
- }
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref *drrwbr =
- &rrd->header.drr_u.drr_write_byref;
- dprintf("drr_type = WRITE_BYREF obj = %llu offset = %llu "
- "length = %llu toguid = %llx refguid = %llx "
- "refobject = %llu refoffset = %llu cksumtype = %u "
- "cksumflags = %u err = %d\n",
- drrwbr->drr_object, drrwbr->drr_offset,
- drrwbr->drr_length, drrwbr->drr_toguid,
- drrwbr->drr_refguid, drrwbr->drr_refobject,
- drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
- drrwbr->drr_flags, err);
- break;
- }
- case DRR_WRITE_EMBEDDED:
- {
- struct drr_write_embedded *drrwe =
- &rrd->header.drr_u.drr_write_embedded;
- dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
- "length = %llu compress = %u etype = %u lsize = %u "
- "psize = %u err = %d\n",
- drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
- drrwe->drr_compression, drrwe->drr_etype,
- drrwe->drr_lsize, drrwe->drr_psize, err);
- break;
- }
- case DRR_FREE:
- {
- struct drr_free *drrf = &rrd->header.drr_u.drr_free;
- dprintf("drr_type = FREE obj = %llu offset = %llu "
- "length = %lld err = %d\n",
- drrf->drr_object, drrf->drr_offset, drrf->drr_length,
- err);
- break;
- }
- case DRR_SPILL:
- {
- struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
- dprintf("drr_type = SPILL obj = %llu length = %llu "
- "err = %d\n", drrs->drr_object, drrs->drr_length, err);
- break;
- }
- default:
- return;
- }
-#endif
+ dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
+ dssp->dss_outfd = dspp->outfd;
+ dssp->dss_off = dspp->off;
+ dssp->dss_proc = curproc;
+ mutex_enter(&dspp->to_ds->ds_sendstream_lock);
+ list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
+ mutex_exit(&dspp->to_ds->ds_sendstream_lock);
+ return (dssp);
}
/*
- * Commit the records to the pool.
+ * Actually do the bulk of the work in a zfs send.
+ *
+ * The idea is that we want to do a send from ancestor_zb to to_ds. We also
+ * want to not send any data that has been modified by all the datasets in
+ * redactsnaparr, and store the list of blocks that are redacted in this way in
+ * a bookmark named redactbook, created on the to_ds. We do this by creating
+ * several worker threads, whose function is described below.
+ *
+ * There are three cases.
+ * The first case is a redacted zfs send. In this case there are 5 threads.
+ * The first thread is the to_ds traversal thread: it calls dataset_traverse on
+ * the to_ds and finds all the blocks that have changed since ancestor_zb (if
+ * it's a full send, that's all blocks in the dataset). It then sends those
+ * blocks on to the send merge thread. The redact list thread takes the data
+ * from the redaction bookmark and sends those blocks on to the send merge
+ * thread. The send merge thread takes the data from the to_ds traversal
+ * thread, and combines it with the redaction records from the redact list
+ * thread. If a block appears in both the to_ds's data and the redaction data,
+ * the send merge thread will mark it as redacted and send it on to the prefetch
+ * thread. Otherwise, the send merge thread will send the block on to the
+ * prefetch thread unchanged. The prefetch thread will issue prefetch reads for
+ * any data that isn't redacted, and then send the data on to the main thread.
+ * The main thread behaves the same as in a normal send case, issuing demand
+ * reads for data blocks and sending out records over the network
+ *
+ * The graphic below diagrams the flow of data in the case of a redacted zfs
+ * send. Each box represents a thread, and each line represents the flow of
+ * data.
+ *
+ * Records from the |
+ * redaction bookmark |
+ * +--------------------+ | +---------------------------+
+ * | | v | Send Merge Thread |
+ * | Redact List Thread +----------> Apply redaction marks to |
+ * | | | records as specified by |
+ * +--------------------+ | redaction ranges |
+ * +----^---------------+------+
+ * | | Merged data
+ * | |
+ * | +------------v--------+
+ * | | Prefetch Thread |
+ * +--------------------+ | | Issues prefetch |
+ * | to_ds Traversal | | | reads of data blocks|
+ * | Thread (finds +---------------+ +------------+--------+
+ * | candidate blocks) | Blocks modified | Prefetched data
+ * +--------------------+ by to_ds since |
+ * ancestor_zb +------------v----+
+ * | Main Thread | File Descriptor
+ * | Sends data over +->(to zfs receive)
+ * | wire |
+ * +-----------------+
+ *
+ * The second case is an incremental send from a redaction bookmark. The to_ds
+ * traversal thread and the main thread behave the same as in the redacted
+ * send case. The new thread is the from bookmark traversal thread. It
+ * iterates over the redaction list in the redaction bookmark, and enqueues
+ * records for each block that was redacted in the original send. The send
+ * merge thread now has to merge the data from the two threads. For details
+ * about that process, see the header comment of send_merge_thread(). Any data
+ * it decides to send on will be prefetched by the prefetch thread. Note that
+ * you can perform a redacted send from a redaction bookmark; in that case,
+ * the data flow behaves very similarly to the flow in the redacted send case,
+ * except with the addition of the bookmark traversal thread iterating over the
+ * redaction bookmark. The send_merge_thread also has to take on the
+ * responsibility of merging the redact list thread's records, the bookmark
+ * traversal thread's records, and the to_ds records.
+ *
+ * +---------------------+
+ * | |
+ * | Redact List Thread +--------------+
+ * | | |
+ * +---------------------+ |
+ * Blocks in redaction list | Ranges modified by every secure snap
+ * of from bookmark | (or EOS if not readcted)
+ * |
+ * +---------------------+ | +----v----------------------+
+ * | bookmark Traversal | v | Send Merge Thread |
+ * | Thread (finds +---------> Merges bookmark, rlt, and |
+ * | candidate blocks) | | to_ds send records |
+ * +---------------------+ +----^---------------+------+
+ * | | Merged data
+ * | +------------v--------+
+ * | | Prefetch Thread |
+ * +--------------------+ | | Issues prefetch |
+ * | to_ds Traversal | | | reads of data blocks|
+ * | Thread (finds +---------------+ +------------+--------+
+ * | candidate blocks) | Blocks modified | Prefetched data
+ * +--------------------+ by to_ds since +------------v----+
+ * ancestor_zb | Main Thread | File Descriptor
+ * | Sends data over +->(to zfs receive)
+ * | wire |
+ * +-----------------+
+ *
+ * The final case is a simple zfs full or incremental send. The to_ds traversal
+ * thread behaves the same as always. The redact list thread is never started.
+ * The send merge thread takes all the blocks that the to_ds traversal thread
+ * sends it, prefetches the data, and sends the blocks on to the main thread.
+ * The main thread sends the data over the wire.
+ *
+ * To keep performance acceptable, we want to prefetch the data in the worker
+ * threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
+ * feature built into traverse_dataset, the combining and deletion of records
+ * due to redaction and sends from redaction bookmarks mean that we could
+ * issue many unnecessary prefetches. As a result, we only prefetch data
+ * after we've determined that the record is not going to be redacted. To
+ * prevent the prefetching from getting too far ahead of the main thread, the
+ * blocking queues that are used for communication are capped not by the
+ * number of entries in the queue, but by the sum of the size of the
+ * prefetches associated with them. The limit on the amount of data that the
+ * thread can prefetch beyond what the main thread has reached is controlled
+ * by the global variable zfs_send_queue_length. In addition, to prevent poor
+ * performance in the beginning of a send, we also limit the distance ahead
+ * that the traversal threads can be. That distance is controlled by the
+ * zfs_send_no_prefetch_queue_length tunable.
+ *
+ * Note: Releases dp using the specified tag.
*/
static int
-receive_process_record(struct receive_writer_arg *rwa,
- struct receive_record_arg *rrd)
+dmu_send_impl(struct dmu_send_params *dspp)
{
+ objset_t *os;
+ dmu_replay_record_t *drr;
+ dmu_sendstatus_t *dssp;
+ dmu_send_cookie_t dsc = {0};
int err;
+ uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
+ uint64_t featureflags = 0;
+ struct redact_list_thread_arg *from_arg;
+ struct send_thread_arg *to_arg;
+ struct redact_list_thread_arg *rlt_arg;
+ struct send_merge_thread_arg *smt_arg;
+ struct send_reader_thread_arg *srt_arg;
+ struct send_range *range;
+ redaction_list_t *from_rl = NULL;
+ redaction_list_t *redact_rl = NULL;
+ boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
+ boolean_t book_resuming = resuming;
+
+ dsl_dataset_t *to_ds = dspp->to_ds;
+ zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
+ dsl_pool_t *dp = dspp->dp;
+ const void *tag = dspp->tag;
- /* Processing in order, therefore bytes_read should be increasing. */
- ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
- rwa->bytes_read = rrd->bytes_read;
-
- switch (rrd->header.drr_type) {
- case DRR_OBJECT:
- {
- struct drr_object *drro = &rrd->header.drr_u.drr_object;
- err = receive_object(rwa, drro, rrd->payload);
- kmem_free(rrd->payload, rrd->payload_size);
- rrd->payload = NULL;
- break;
- }
- case DRR_FREEOBJECTS:
- {
- struct drr_freeobjects *drrfo =
- &rrd->header.drr_u.drr_freeobjects;
- err = receive_freeobjects(rwa, drrfo);
- break;
- }
- case DRR_WRITE:
- {
- struct drr_write *drrw = &rrd->header.drr_u.drr_write;
- err = receive_write(rwa, drrw, rrd->arc_buf);
- /* if receive_write() is successful, it consumes the arc_buf */
- if (err != 0)
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
- rrd->payload = NULL;
- break;
- }
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref *drrwbr =
- &rrd->header.drr_u.drr_write_byref;
- err = receive_write_byref(rwa, drrwbr);
- break;
- }
- case DRR_WRITE_EMBEDDED:
- {
- struct drr_write_embedded *drrwe =
- &rrd->header.drr_u.drr_write_embedded;
- err = receive_write_embedded(rwa, drrwe, rrd->payload);
- kmem_free(rrd->payload, rrd->payload_size);
- rrd->payload = NULL;
- break;
- }
- case DRR_FREE:
- {
- struct drr_free *drrf = &rrd->header.drr_u.drr_free;
- err = receive_free(rwa, drrf);
- break;
- }
- case DRR_SPILL:
- {
- struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
- err = receive_spill(rwa, drrs, rrd->arc_buf);
- /* if receive_spill() is successful, it consumes the arc_buf */
- if (err != 0)
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
- rrd->payload = NULL;
- break;
- }
- case DRR_OBJECT_RANGE:
- {
- struct drr_object_range *drror =
- &rrd->header.drr_u.drr_object_range;
- return (receive_object_range(rwa, drror));
- }
- default:
- return (SET_ERROR(EINVAL));
+ err = dmu_objset_from_ds(to_ds, &os);
+ if (err != 0) {
+ dsl_pool_rele(dp, tag);
+ return (err);
}
- if (err != 0)
- dprintf_drr(rrd, err);
-
- return (err);
-}
-
-/*
- * dmu_recv_stream's worker thread; pull records off the queue, and then call
- * receive_process_record When we're done, signal the main thread and exit.
- */
-static void
-receive_writer_thread(void *arg)
-{
- struct receive_writer_arg *rwa = arg;
- struct receive_record_arg *rrd;
- fstrans_cookie_t cookie = spl_fstrans_mark();
+ /*
+ * If this is a non-raw send of an encrypted ds, we can ensure that
+ * the objset_phys_t is authenticated. This is safe because this is
+ * either a snapshot or we have owned the dataset, ensuring that
+ * it can't be modified.
+ */
+ if (!dspp->rawok && os->os_encrypted &&
+ arc_is_unauthenticated(os->os_phys_buf)) {
+ zbookmark_phys_t zb;
- for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
- rrd = bqueue_dequeue(&rwa->q)) {
- /*
- * If there's an error, the main thread will stop putting things
- * on the queue, but we need to clear everything in it before we
- * can exit.
- */
- if (rwa->err == 0) {
- rwa->err = receive_process_record(rwa, rrd);
- } else if (rrd->arc_buf != NULL) {
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
- rrd->payload = NULL;
- } else if (rrd->payload != NULL) {
- kmem_free(rrd->payload, rrd->payload_size);
- rrd->payload = NULL;
+ SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
+ ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
+ err = arc_untransform(os->os_phys_buf, os->os_spa,
+ &zb, B_FALSE);
+ if (err != 0) {
+ dsl_pool_rele(dp, tag);
+ return (err);
}
- kmem_free(rrd, sizeof (*rrd));
- }
- kmem_free(rrd, sizeof (*rrd));
- mutex_enter(&rwa->mutex);
- rwa->done = B_TRUE;
- cv_signal(&rwa->cv);
- mutex_exit(&rwa->mutex);
- spl_fstrans_unmark(cookie);
- thread_exit();
-}
-static int
-resume_check(struct receive_arg *ra, nvlist_t *begin_nvl)
-{
- uint64_t val;
- objset_t *mos = dmu_objset_pool(ra->os)->dp_meta_objset;
- uint64_t dsobj = dmu_objset_id(ra->os);
- uint64_t resume_obj, resume_off;
-
- if (nvlist_lookup_uint64(begin_nvl,
- "resume_object", &resume_obj) != 0 ||
- nvlist_lookup_uint64(begin_nvl,
- "resume_offset", &resume_off) != 0) {
- return (SET_ERROR(EINVAL));
+ ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
- VERIFY0(zap_lookup(mos, dsobj,
- DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val));
- if (resume_obj != val)
- return (SET_ERROR(EINVAL));
- VERIFY0(zap_lookup(mos, dsobj,
- DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val));
- if (resume_off != val)
- return (SET_ERROR(EINVAL));
-
- return (0);
-}
-/*
- * Read in the stream's records, one by one, and apply them to the pool. There
- * are two threads involved; the thread that calls this function will spin up a
- * worker thread, read the records off the stream one by one, and issue
- * prefetches for any necessary indirect blocks. It will then push the records
- * onto an internal blocking queue. The worker thread will pull the records off
- * the queue, and actually write the data into the DMU. This way, the worker
- * thread doesn't have to wait for reads to complete, since everything it needs
- * (the indirect blocks) will be prefetched.
- *
- * NB: callers *must* call dmu_recv_end() if this succeeds.
- */
-int
-dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
- int cleanup_fd, uint64_t *action_handlep)
-{
- int err = 0;
- struct receive_arg *ra;
- struct receive_writer_arg *rwa;
- int featureflags;
- uint32_t payloadlen;
- void *payload;
- nvlist_t *begin_nvl = NULL;
-
- ra = kmem_zalloc(sizeof (*ra), KM_SLEEP);
- rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
-
- ra->byteswap = drc->drc_byteswap;
- ra->raw = drc->drc_raw;
- ra->cksum = drc->drc_cksum;
- ra->vp = vp;
- ra->voff = *voffp;
-
- if (dsl_dataset_is_zapified(drc->drc_ds)) {
- (void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
- drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
- sizeof (ra->bytes_read), 1, &ra->bytes_read);
+ if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
+ dsl_pool_rele(dp, tag);
+ return (err);
}
- objlist_create(&ra->ignore_objlist);
-
- /* these were verified in dmu_recv_begin */
- ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
- DMU_SUBSTREAM);
- ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
-
/*
- * Open the objset we are modifying.
+ * If we're doing a redacted send, hold the bookmark's redaction list.
*/
- VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os));
-
- ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
+ if (dspp->redactbook != NULL) {
+ err = dsl_redaction_list_hold_obj(dp,
+ dspp->redactbook->zbm_redaction_obj, FTAG,
+ &redact_rl);
+ if (err != 0) {
+ dsl_pool_rele(dp, tag);
+ return (SET_ERROR(EINVAL));
+ }
+ dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
+ }
- featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
- ra->featureflags = featureflags;
+ /*
+ * If we're sending from a redaction bookmark, hold the redaction list
+ * so that we can consider sending the redacted blocks.
+ */
+ if (ancestor_zb->zbm_redaction_obj != 0) {
+ err = dsl_redaction_list_hold_obj(dp,
+ ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
+ if (err != 0) {
+ if (redact_rl != NULL) {
+ dsl_redaction_list_long_rele(redact_rl, FTAG);
+ dsl_redaction_list_rele(redact_rl, FTAG);
+ }
+ dsl_pool_rele(dp, tag);
+ return (SET_ERROR(EINVAL));
+ }
+ dsl_redaction_list_long_hold(dp, from_rl, FTAG);
+ }
- ASSERT0(ra->os->os_encrypted &&
- (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
+ dsl_dataset_long_hold(to_ds, FTAG);
- /* if this stream is dedup'ed, set up the avl tree for guid mapping */
- if (featureflags & DMU_BACKUP_FEATURE_DEDUP) {
- minor_t minor;
+ from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
+ to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
+ rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
+ smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
+ srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
+
+ drr = create_begin_record(dspp, os, featureflags);
+ dssp = setup_send_progress(dspp);
+
+ dsc.dsc_drr = drr;
+ dsc.dsc_dso = dspp->dso;
+ dsc.dsc_os = os;
+ dsc.dsc_off = dspp->off;
+ dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
+ dsc.dsc_fromtxg = fromtxg;
+ dsc.dsc_pending_op = PENDING_NONE;
+ dsc.dsc_featureflags = featureflags;
+ dsc.dsc_resume_object = dspp->resumeobj;
+ dsc.dsc_resume_offset = dspp->resumeoff;
- if (cleanup_fd == -1) {
- err = SET_ERROR(EBADF);
- goto out;
- }
- err = zfs_onexit_fd_hold(cleanup_fd, &minor);
- if (err != 0) {
- cleanup_fd = -1;
- goto out;
- }
+ dsl_pool_rele(dp, tag);
- if (*action_handlep == 0) {
- rwa->guid_to_ds_map =
- kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
- avl_create(rwa->guid_to_ds_map, guid_compare,
- sizeof (guid_map_entry_t),
- offsetof(guid_map_entry_t, avlnode));
- err = zfs_onexit_add_cb(minor,
- free_guid_map_onexit, rwa->guid_to_ds_map,
- action_handlep);
- if (err != 0)
- goto out;
- } else {
- err = zfs_onexit_cb_data(minor, *action_handlep,
- (void **)&rwa->guid_to_ds_map);
- if (err != 0)
- goto out;
- }
+ void *payload = NULL;
+ size_t payload_len = 0;
+ nvlist_t *nvl = fnvlist_alloc();
- drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
+ /*
+ * If we're doing a redacted send, we include the snapshots we're
+ * redacted with respect to so that the target system knows what send
+ * streams can be correctly received on top of this dataset. If we're
+ * instead sending a redacted dataset, we include the snapshots that the
+ * dataset was created with respect to.
+ */
+ if (dspp->redactbook != NULL) {
+ fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
+ redact_rl->rl_phys->rlp_snaps,
+ redact_rl->rl_phys->rlp_num_snaps);
+ } else if (dsl_dataset_feature_is_active(to_ds,
+ SPA_FEATURE_REDACTED_DATASETS)) {
+ uint64_t *tods_guids;
+ uint64_t length;
+ VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
+ SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
+ fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
+ length);
}
- payloadlen = drc->drc_drr_begin->drr_payloadlen;
- payload = NULL;
- if (payloadlen != 0)
- payload = kmem_alloc(payloadlen, KM_SLEEP);
+ /*
+ * If we're sending from a redaction bookmark, then we should retrieve
+ * the guids of that bookmark so we can send them over the wire.
+ */
+ if (from_rl != NULL) {
+ fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
+ from_rl->rl_phys->rlp_snaps,
+ from_rl->rl_phys->rlp_num_snaps);
+ }
- err = receive_read_payload_and_next_header(ra, payloadlen, payload);
- if (err != 0) {
- if (payloadlen != 0)
- kmem_free(payload, payloadlen);
- goto out;
+ /*
+ * If the snapshot we're sending from is redacted, include the redaction
+ * list in the stream.
+ */
+ if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
+ ASSERT3P(from_rl, ==, NULL);
+ fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
+ dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
+ if (dspp->numfromredactsnaps > 0) {
+ kmem_free(dspp->fromredactsnaps,
+ dspp->numfromredactsnaps * sizeof (uint64_t));
+ dspp->fromredactsnaps = NULL;
+ }
}
- if (payloadlen != 0) {
- err = nvlist_unpack(payload, payloadlen, &begin_nvl, KM_SLEEP);
- kmem_free(payload, payloadlen);
+
+ if (resuming || book_resuming) {
+ err = setup_resume_points(dspp, to_arg, from_arg,
+ rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
- /* handle DSL encryption key payload */
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
+ uint64_t ivset_guid = ancestor_zb->zbm_ivset_guid;
nvlist_t *keynvl = NULL;
+ ASSERT(os->os_encrypted);
- ASSERT(ra->os->os_encrypted);
- ASSERT(drc->drc_raw);
-
- err = nvlist_lookup_nvlist(begin_nvl, "crypt_keydata", &keynvl);
- if (err != 0)
+ err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
+ &keynvl);
+ if (err != 0) {
+ fnvlist_free(nvl);
goto out;
+ }
- /*
- * If this is a new dataset we set the key immediately.
- * Otherwise we don't want to change the key until we
- * are sure the rest of the receive succeeded so we stash
- * the keynvl away until then.
- */
- err = dsl_crypto_recv_raw(spa_name(ra->os->os_spa),
- drc->drc_ds->ds_object, drc->drc_drrb->drr_type,
- keynvl, drc->drc_newfs);
- if (err != 0)
- goto out;
+ fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
+ fnvlist_free(keynvl);
+ }
- if (!drc->drc_newfs)
- drc->drc_keynvl = fnvlist_dup(keynvl);
+ if (!nvlist_empty(nvl)) {
+ payload = fnvlist_pack(nvl, &payload_len);
+ drr->drr_payloadlen = payload_len;
}
- if (featureflags & DMU_BACKUP_FEATURE_RESUMING) {
- err = resume_check(ra, begin_nvl);
- if (err != 0)
- goto out;
+ fnvlist_free(nvl);
+ err = dump_record(&dsc, payload, payload_len);
+ fnvlist_pack_free(payload, payload_len);
+ if (err != 0) {
+ err = dsc.dsc_err;
+ goto out;
+ }
+
+ setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
+ setup_from_thread(from_arg, from_rl, dssp);
+ setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
+ setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
+ setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
+
+ range = bqueue_dequeue(&srt_arg->q);
+ while (err == 0 && !range->eos_marker) {
+ err = do_dump(&dsc, range);
+ range = get_next_range(&srt_arg->q, range);
+ if (issig(JUSTLOOKING) && issig(FORREAL))
+ err = SET_ERROR(EINTR);
}
- (void) bqueue_init(&rwa->q,
- MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
- offsetof(struct receive_record_arg, node));
- cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
- mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
- rwa->os = ra->os;
- rwa->byteswap = drc->drc_byteswap;
- rwa->resumable = drc->drc_resumable;
- rwa->raw = drc->drc_raw;
- rwa->os->os_raw_receive = drc->drc_raw;
-
- (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
- TS_RUN, minclsyspri);
/*
- * We're reading rwa->err without locks, which is safe since we are the
- * only reader, and the worker thread is the only writer. It's ok if we
- * miss a write for an iteration or two of the loop, since the writer
- * thread will keep freeing records we send it until we send it an eos
- * marker.
- *
- * We can leave this loop in 3 ways: First, if rwa->err is
- * non-zero. In that case, the writer thread will free the rrd we just
- * pushed. Second, if we're interrupted; in that case, either it's the
- * first loop and ra->rrd was never allocated, or it's later and ra->rrd
- * has been handed off to the writer thread who will free it. Finally,
- * if receive_read_record fails or we're at the end of the stream, then
- * we free ra->rrd and exit.
+ * If we hit an error or are interrupted, cancel our worker threads and
+ * clear the queue of any pending records. The threads will pass the
+ * cancel up the tree of worker threads, and each one will clean up any
+ * pending records before exiting.
*/
- while (rwa->err == 0) {
- if (issig(JUSTLOOKING) && issig(FORREAL)) {
- err = SET_ERROR(EINTR);
- break;
+ if (err != 0) {
+ srt_arg->cancel = B_TRUE;
+ while (!range->eos_marker) {
+ range = get_next_range(&srt_arg->q, range);
}
+ }
+ range_free(range);
- ASSERT3P(ra->rrd, ==, NULL);
- ra->rrd = ra->next_rrd;
- ra->next_rrd = NULL;
- /* Allocates and loads header into ra->next_rrd */
- err = receive_read_record(ra);
+ bqueue_destroy(&srt_arg->q);
+ bqueue_destroy(&smt_arg->q);
+ if (dspp->redactbook != NULL)
+ bqueue_destroy(&rlt_arg->q);
+ bqueue_destroy(&to_arg->q);
+ bqueue_destroy(&from_arg->q);
- if (ra->rrd->header.drr_type == DRR_END || err != 0) {
- kmem_free(ra->rrd, sizeof (*ra->rrd));
- ra->rrd = NULL;
- break;
- }
+ if (err == 0 && srt_arg->error != 0)
+ err = srt_arg->error;
- bqueue_enqueue(&rwa->q, ra->rrd,
- sizeof (struct receive_record_arg) + ra->rrd->payload_size);
- ra->rrd = NULL;
- }
- if (ra->next_rrd == NULL)
- ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
- ra->next_rrd->eos_marker = B_TRUE;
- bqueue_enqueue(&rwa->q, ra->next_rrd, 1);
-
- mutex_enter(&rwa->mutex);
- while (!rwa->done) {
- cv_wait(&rwa->cv, &rwa->mutex);
+ if (err != 0)
+ goto out;
+
+ if (dsc.dsc_pending_op != PENDING_NONE)
+ if (dump_record(&dsc, NULL, 0) != 0)
+ err = SET_ERROR(EINTR);
+
+ if (err != 0) {
+ if (err == EINTR && dsc.dsc_err != 0)
+ err = dsc.dsc_err;
+ goto out;
}
- mutex_exit(&rwa->mutex);
/*
- * If we are receiving a full stream as a clone, all object IDs which
- * are greater than the maximum ID referenced in the stream are
- * by definition unused and must be freed.
+ * Send the DRR_END record if this is not a saved stream.
+ * Otherwise, the omitted DRR_END record will signal to
+ * the receive side that the stream is incomplete.
*/
- if (drc->drc_clone && drc->drc_drrb->drr_fromguid == 0) {
- uint64_t obj = rwa->max_object + 1;
- int free_err = 0;
- int next_err = 0;
-
- while (next_err == 0) {
- free_err = dmu_free_long_object(rwa->os, obj);
- if (free_err != 0 && free_err != ENOENT)
- break;
-
- next_err = dmu_object_next(rwa->os, &obj, FALSE, 0);
- }
+ if (!dspp->savedok) {
+ memset(drr, 0, sizeof (dmu_replay_record_t));
+ drr->drr_type = DRR_END;
+ drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
+ drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
- if (err == 0) {
- if (free_err != 0 && free_err != ENOENT)
- err = free_err;
- else if (next_err != ESRCH)
- err = next_err;
- }
+ if (dump_record(&dsc, NULL, 0) != 0)
+ err = dsc.dsc_err;
}
+out:
+ mutex_enter(&to_ds->ds_sendstream_lock);
+ list_remove(&to_ds->ds_sendstreams, dssp);
+ mutex_exit(&to_ds->ds_sendstream_lock);
- cv_destroy(&rwa->cv);
- mutex_destroy(&rwa->mutex);
- bqueue_destroy(&rwa->q);
- if (err == 0)
- err = rwa->err;
+ VERIFY(err != 0 || (dsc.dsc_sent_begin &&
+ (dsc.dsc_sent_end || dspp->savedok)));
-out:
- nvlist_free(begin_nvl);
- if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
- zfs_onexit_fd_rele(cleanup_fd);
+ kmem_free(drr, sizeof (dmu_replay_record_t));
+ kmem_free(dssp, sizeof (dmu_sendstatus_t));
+ kmem_free(from_arg, sizeof (*from_arg));
+ kmem_free(to_arg, sizeof (*to_arg));
+ kmem_free(rlt_arg, sizeof (*rlt_arg));
+ kmem_free(smt_arg, sizeof (*smt_arg));
+ kmem_free(srt_arg, sizeof (*srt_arg));
- if (err != 0) {
- /*
- * Clean up references. If receive is not resumable,
- * destroy what we created, so we don't leave it in
- * the inconsistent state.
- */
- dmu_recv_cleanup_ds(drc);
- nvlist_free(drc->drc_keynvl);
+ dsl_dataset_long_rele(to_ds, FTAG);
+ if (from_rl != NULL) {
+ dsl_redaction_list_long_rele(from_rl, FTAG);
+ dsl_redaction_list_rele(from_rl, FTAG);
+ }
+ if (redact_rl != NULL) {
+ dsl_redaction_list_long_rele(redact_rl, FTAG);
+ dsl_redaction_list_rele(redact_rl, FTAG);
}
- *voffp = ra->voff;
- objlist_destroy(&ra->ignore_objlist);
- kmem_free(ra, sizeof (*ra));
- kmem_free(rwa, sizeof (*rwa));
return (err);
}
-static int
-dmu_recv_end_check(void *arg, dmu_tx_t *tx)
+int
+dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
+ boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
+ boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
+ dmu_send_outparams_t *dsop)
{
- dmu_recv_cookie_t *drc = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- int error;
-
- ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
+ int err;
+ dsl_dataset_t *fromds;
+ ds_hold_flags_t dsflags;
+ struct dmu_send_params dspp = {0};
+ dspp.embedok = embedok;
+ dspp.large_block_ok = large_block_ok;
+ dspp.compressok = compressok;
+ dspp.outfd = outfd;
+ dspp.off = off;
+ dspp.dso = dsop;
+ dspp.tag = FTAG;
+ dspp.rawok = rawok;
+ dspp.savedok = savedok;
+
+ dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
+ err = dsl_pool_hold(pool, FTAG, &dspp.dp);
+ if (err != 0)
+ return (err);
- if (!drc->drc_newfs) {
- dsl_dataset_t *origin_head;
+ err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
+ &dspp.to_ds);
+ if (err != 0) {
+ dsl_pool_rele(dspp.dp, FTAG);
+ return (err);
+ }
- error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
- if (error != 0)
- return (error);
- if (drc->drc_force) {
- /*
- * We will destroy any snapshots in tofs (i.e. before
- * origin_head) that are after the origin (which is
- * the snap before drc_ds, because drc_ds can not
- * have any snaps of its own).
- */
- uint64_t obj;
-
- obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
- while (obj !=
- dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
- dsl_dataset_t *snap;
- error = dsl_dataset_hold_obj(dp, obj, FTAG,
- &snap);
- if (error != 0)
- break;
- if (snap->ds_dir != origin_head->ds_dir)
- error = SET_ERROR(EINVAL);
- if (error == 0) {
- error = dsl_destroy_snapshot_check_impl(
- snap, B_FALSE);
- }
- obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
- dsl_dataset_rele(snap, FTAG);
- if (error != 0)
- break;
- }
- if (error != 0) {
- dsl_dataset_rele(origin_head, FTAG);
- return (error);
- }
+ if (fromsnap != 0) {
+ err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
+ FTAG, &fromds);
+ if (err != 0) {
+ dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
+ dsl_pool_rele(dspp.dp, FTAG);
+ return (err);
}
- if (drc->drc_keynvl != NULL) {
- error = dsl_crypto_recv_raw_key_check(drc->drc_ds,
- drc->drc_keynvl, tx);
- if (error != 0) {
- dsl_dataset_rele(origin_head, FTAG);
- return (error);
- }
+ dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
+ dspp.ancestor_zb.zbm_creation_txg =
+ dsl_dataset_phys(fromds)->ds_creation_txg;
+ dspp.ancestor_zb.zbm_creation_time =
+ dsl_dataset_phys(fromds)->ds_creation_time;
+
+ if (dsl_dataset_is_zapified(fromds)) {
+ (void) zap_lookup(dspp.dp->dp_meta_objset,
+ fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
+ &dspp.ancestor_zb.zbm_ivset_guid);
}
- error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
- origin_head, drc->drc_force, drc->drc_owner, tx);
- if (error != 0) {
- dsl_dataset_rele(origin_head, FTAG);
- return (error);
+ /* See dmu_send for the reasons behind this. */
+ uint64_t *fromredact;
+
+ if (!dsl_dataset_get_uint64_array_feature(fromds,
+ SPA_FEATURE_REDACTED_DATASETS,
+ &dspp.numfromredactsnaps,
+ &fromredact)) {
+ dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
+ } else if (dspp.numfromredactsnaps > 0) {
+ uint64_t size = dspp.numfromredactsnaps *
+ sizeof (uint64_t);
+ dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
+ memcpy(dspp.fromredactsnaps, fromredact, size);
}
- error = dsl_dataset_snapshot_check_impl(origin_head,
- drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
- dsl_dataset_rele(origin_head, FTAG);
- if (error != 0)
- return (error);
- error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
+ boolean_t is_before =
+ dsl_dataset_is_before(dspp.to_ds, fromds, 0);
+ dspp.is_clone = (dspp.to_ds->ds_dir !=
+ fromds->ds_dir);
+ dsl_dataset_rele(fromds, FTAG);
+ if (!is_before) {
+ dsl_pool_rele(dspp.dp, FTAG);
+ err = SET_ERROR(EXDEV);
+ } else {
+ err = dmu_send_impl(&dspp);
+ }
} else {
- error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
- drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
+ dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
+ err = dmu_send_impl(&dspp);
}
- return (error);
+ if (dspp.fromredactsnaps)
+ kmem_free(dspp.fromredactsnaps,
+ dspp.numfromredactsnaps * sizeof (uint64_t));
+
+ dsl_dataset_rele(dspp.to_ds, FTAG);
+ return (err);
}
-static void
-dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
+int
+dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
+ boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
+ boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
+ const char *redactbook, int outfd, offset_t *off,
+ dmu_send_outparams_t *dsop)
{
- dmu_recv_cookie_t *drc = arg;
- dsl_pool_t *dp = dmu_tx_pool(tx);
- boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
+ int err = 0;
+ ds_hold_flags_t dsflags;
+ boolean_t owned = B_FALSE;
+ dsl_dataset_t *fromds = NULL;
+ zfs_bookmark_phys_t book = {0};
+ struct dmu_send_params dspp = {0};
+
+ dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
+ dspp.tosnap = tosnap;
+ dspp.embedok = embedok;
+ dspp.large_block_ok = large_block_ok;
+ dspp.compressok = compressok;
+ dspp.outfd = outfd;
+ dspp.off = off;
+ dspp.dso = dsop;
+ dspp.tag = FTAG;
+ dspp.resumeobj = resumeobj;
+ dspp.resumeoff = resumeoff;
+ dspp.rawok = rawok;
+ dspp.savedok = savedok;
- spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
- tx, "snap=%s", drc->drc_tosnap);
- drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
+ if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
+ return (SET_ERROR(EINVAL));
- if (!drc->drc_newfs) {
- dsl_dataset_t *origin_head;
+ err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
+ if (err != 0)
+ return (err);
- VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
- &origin_head));
+ if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
+ /*
+ * We are sending a filesystem or volume. Ensure
+ * that it doesn't change by owning the dataset.
+ */
- if (drc->drc_force) {
+ if (savedok) {
/*
- * Destroy any snapshots of drc_tofs (origin_head)
- * after the origin (the snap before drc_ds).
+ * We are looking for the dataset that represents the
+ * partially received send stream. If this stream was
+ * received as a new snapshot of an existing dataset,
+ * this will be saved in a hidden clone named
+ * "<pool>/<dataset>/%recv". Otherwise, the stream
+ * will be saved in the live dataset itself. In
+ * either case we need to use dsl_dataset_own_force()
+ * because the stream is marked as inconsistent,
+ * which would normally make it unavailable to be
+ * owned.
*/
- uint64_t obj;
-
- obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
- while (obj !=
- dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
- dsl_dataset_t *snap;
- VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
- &snap));
- ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
- obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
- dsl_destroy_snapshot_sync_impl(snap,
- B_FALSE, tx);
- dsl_dataset_rele(snap, FTAG);
+ char *name = kmem_asprintf("%s/%s", tosnap,
+ recv_clone_name);
+ err = dsl_dataset_own_force(dspp.dp, name, dsflags,
+ FTAG, &dspp.to_ds);
+ if (err == ENOENT) {
+ err = dsl_dataset_own_force(dspp.dp, tosnap,
+ dsflags, FTAG, &dspp.to_ds);
}
+
+ if (err == 0) {
+ owned = B_TRUE;
+ err = zap_lookup(dspp.dp->dp_meta_objset,
+ dspp.to_ds->ds_object,
+ DS_FIELD_RESUME_TOGUID, 8, 1,
+ &dspp.saved_guid);
+ }
+
+ if (err == 0) {
+ err = zap_lookup(dspp.dp->dp_meta_objset,
+ dspp.to_ds->ds_object,
+ DS_FIELD_RESUME_TONAME, 1,
+ sizeof (dspp.saved_toname),
+ dspp.saved_toname);
+ }
+ /* Only disown if there was an error in the lookups */
+ if (owned && (err != 0))
+ dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
+
+ kmem_strfree(name);
+ } else {
+ err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
+ FTAG, &dspp.to_ds);
+ if (err == 0)
+ owned = B_TRUE;
}
- if (drc->drc_keynvl != NULL) {
- dsl_crypto_recv_raw_key_sync(drc->drc_ds,
- drc->drc_keynvl, tx);
- nvlist_free(drc->drc_keynvl);
- drc->drc_keynvl = NULL;
- }
+ } else {
+ err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
+ &dspp.to_ds);
+ }
- VERIFY3P(drc->drc_ds->ds_prev, ==, origin_head->ds_prev);
+ if (err != 0) {
+ /* Note: dsl dataset is not owned at this point */
+ dsl_pool_rele(dspp.dp, FTAG);
+ return (err);
+ }
- dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
- origin_head, tx);
- dsl_dataset_snapshot_sync_impl(origin_head,
- drc->drc_tosnap, tx);
+ if (redactbook != NULL) {
+ char path[ZFS_MAX_DATASET_NAME_LEN];
+ (void) strlcpy(path, tosnap, sizeof (path));
+ char *at = strchr(path, '@');
+ if (at == NULL) {
+ err = EINVAL;
+ } else {
+ (void) snprintf(at, sizeof (path) - (at - path), "#%s",
+ redactbook);
+ err = dsl_bookmark_lookup(dspp.dp, path,
+ NULL, &book);
+ dspp.redactbook = &book;
+ }
+ }
- /* set snapshot's creation time and guid */
- dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
- dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
- drc->drc_drrb->drr_creation_time;
- dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
- drc->drc_drrb->drr_toguid;
- dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
- ~DS_FLAG_INCONSISTENT;
+ if (err != 0) {
+ dsl_pool_rele(dspp.dp, FTAG);
+ if (owned)
+ dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
+ else
+ dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
+ return (err);
+ }
- dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
- dsl_dataset_phys(origin_head)->ds_flags &=
- ~DS_FLAG_INCONSISTENT;
+ if (fromsnap != NULL) {
+ zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
+ int fsnamelen;
+ if (strpbrk(tosnap, "@#") != NULL)
+ fsnamelen = strpbrk(tosnap, "@#") - tosnap;
+ else
+ fsnamelen = strlen(tosnap);
- drc->drc_newsnapobj =
- dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
+ /*
+ * If the fromsnap is in a different filesystem, then
+ * mark the send stream as a clone.
+ */
+ if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
+ (fromsnap[fsnamelen] != '@' &&
+ fromsnap[fsnamelen] != '#')) {
+ dspp.is_clone = B_TRUE;
+ }
- dsl_dataset_rele(origin_head, FTAG);
- dsl_destroy_head_sync_impl(drc->drc_ds, tx);
+ if (strchr(fromsnap, '@') != NULL) {
+ err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
+ &fromds);
- if (drc->drc_owner != NULL)
- VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
- } else {
- dsl_dataset_t *ds = drc->drc_ds;
-
- dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
-
- /* set snapshot's creation time and guid */
- dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
- dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
- drc->drc_drrb->drr_creation_time;
- dsl_dataset_phys(ds->ds_prev)->ds_guid =
- drc->drc_drrb->drr_toguid;
- dsl_dataset_phys(ds->ds_prev)->ds_flags &=
- ~DS_FLAG_INCONSISTENT;
-
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
- if (dsl_dataset_has_resume_receive_state(ds)) {
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_FROMGUID, tx);
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_OBJECT, tx);
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_OFFSET, tx);
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_BYTES, tx);
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_TOGUID, tx);
- (void) zap_remove(dp->dp_meta_objset, ds->ds_object,
- DS_FIELD_RESUME_TONAME, tx);
+ if (err != 0) {
+ ASSERT3P(fromds, ==, NULL);
+ } else {
+ /*
+ * We need to make a deep copy of the redact
+ * snapshots of the from snapshot, because the
+ * array will be freed when we evict from_ds.
+ */
+ uint64_t *fromredact;
+ if (!dsl_dataset_get_uint64_array_feature(
+ fromds, SPA_FEATURE_REDACTED_DATASETS,
+ &dspp.numfromredactsnaps,
+ &fromredact)) {
+ dspp.numfromredactsnaps =
+ NUM_SNAPS_NOT_REDACTED;
+ } else if (dspp.numfromredactsnaps > 0) {
+ uint64_t size =
+ dspp.numfromredactsnaps *
+ sizeof (uint64_t);
+ dspp.fromredactsnaps = kmem_zalloc(size,
+ KM_SLEEP);
+ memcpy(dspp.fromredactsnaps, fromredact,
+ size);
+ }
+ if (!dsl_dataset_is_before(dspp.to_ds, fromds,
+ 0)) {
+ err = SET_ERROR(EXDEV);
+ } else {
+ zb->zbm_creation_txg =
+ dsl_dataset_phys(fromds)->
+ ds_creation_txg;
+ zb->zbm_creation_time =
+ dsl_dataset_phys(fromds)->
+ ds_creation_time;
+ zb->zbm_guid =
+ dsl_dataset_phys(fromds)->ds_guid;
+ zb->zbm_redaction_obj = 0;
+
+ if (dsl_dataset_is_zapified(fromds)) {
+ (void) zap_lookup(
+ dspp.dp->dp_meta_objset,
+ fromds->ds_object,
+ DS_FIELD_IVSET_GUID, 8, 1,
+ &zb->zbm_ivset_guid);
+ }
+ }
+ dsl_dataset_rele(fromds, FTAG);
+ }
+ } else {
+ dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
+ err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
+ zb);
+ if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
+ zb->zbm_guid ==
+ dsl_dataset_phys(dspp.to_ds)->ds_guid)
+ err = 0;
}
- drc->drc_newsnapobj =
- dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
- }
- zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
- /*
- * Release the hold from dmu_recv_begin. This must be done before
- * we return to open context, so that when we free the dataset's dnode
- * we can evict its bonus buffer. Since the dataset may be destroyed
- * at this point (and therefore won't have a valid pointer to the spa)
- * we release the key mapping manually here while we do have a valid
- * pointer, if it exists.
- */
- if (!drc->drc_raw && encrypted) {
- (void) spa_keystore_remove_mapping(dmu_tx_pool(tx)->dp_spa,
- drc->drc_ds->ds_object, drc->drc_ds);
+ if (err == 0) {
+ /* dmu_send_impl will call dsl_pool_rele for us. */
+ err = dmu_send_impl(&dspp);
+ } else {
+ if (dspp.fromredactsnaps)
+ kmem_free(dspp.fromredactsnaps,
+ dspp.numfromredactsnaps *
+ sizeof (uint64_t));
+ dsl_pool_rele(dspp.dp, FTAG);
+ }
+ } else {
+ dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
+ err = dmu_send_impl(&dspp);
}
- dsl_dataset_disown(drc->drc_ds, 0, dmu_recv_tag);
- drc->drc_ds = NULL;
+ if (owned)
+ dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
+ else
+ dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
+ return (err);
}
static int
-add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj,
- boolean_t raw)
+dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
+ uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
- dsl_pool_t *dp;
- dsl_dataset_t *snapds;
- guid_map_entry_t *gmep;
- ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
- int err;
+ int err = 0;
+ uint64_t size;
+ /*
+ * Assume that space (both on-disk and in-stream) is dominated by
+ * data. We will adjust for indirect blocks and the copies property,
+ * but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
+ */
- ASSERT(guid_map != NULL);
+ uint64_t recordsize;
+ uint64_t record_count;
+ objset_t *os;
+ VERIFY0(dmu_objset_from_ds(ds, &os));
- err = dsl_pool_hold(name, FTAG, &dp);
- if (err != 0)
- return (err);
- gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
- err = dsl_dataset_hold_obj_flags(dp, snapobj, dsflags, gmep, &snapds);
- if (err == 0) {
- gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
- gmep->raw = raw;
- gmep->gme_ds = snapds;
- avl_add(guid_map, gmep);
- dsl_dataset_long_hold(snapds, gmep);
+ /* Assume all (uncompressed) blocks are recordsize. */
+ if (zfs_override_estimate_recordsize != 0) {
+ recordsize = zfs_override_estimate_recordsize;
+ } else if (os->os_phys->os_type == DMU_OST_ZVOL) {
+ err = dsl_prop_get_int_ds(ds,
+ zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
- kmem_free(gmep, sizeof (*gmep));
+ err = dsl_prop_get_int_ds(ds,
+ zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
+ if (err != 0)
+ return (err);
+ record_count = uncompressed / recordsize;
- dsl_pool_rele(dp, FTAG);
- return (err);
-}
-
-static int dmu_recv_end_modified_blocks = 3;
+ /*
+ * If we're estimating a send size for a compressed stream, use the
+ * compressed data size to estimate the stream size. Otherwise, use the
+ * uncompressed data size.
+ */
+ size = stream_compressed ? compressed : uncompressed;
-static int
-dmu_recv_existing_end(dmu_recv_cookie_t *drc)
-{
-#ifdef _KERNEL
/*
- * We will be destroying the ds; make sure its origin is unmounted if
- * necessary.
+ * Subtract out approximate space used by indirect blocks.
+ * Assume most space is used by data blocks (non-indirect, non-dnode).
+ * Assume no ditto blocks or internal fragmentation.
+ *
+ * Therefore, space used by indirect blocks is sizeof(blkptr_t) per
+ * block.
*/
- char name[ZFS_MAX_DATASET_NAME_LEN];
- dsl_dataset_name(drc->drc_ds, name);
- zfs_destroy_unmount_origin(name);
-#endif
+ size -= record_count * sizeof (blkptr_t);
- return (dsl_sync_task(drc->drc_tofs,
- dmu_recv_end_check, dmu_recv_end_sync, drc,
- dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
-}
+ /* Add in the space for the record associated with each block. */
+ size += record_count * sizeof (dmu_replay_record_t);
-static int
-dmu_recv_new_end(dmu_recv_cookie_t *drc)
-{
- return (dsl_sync_task(drc->drc_tofs,
- dmu_recv_end_check, dmu_recv_end_sync, drc,
- dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL));
+ *sizep = size;
+
+ return (0);
}
int
-dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
+dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
+ zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
+ boolean_t saved, uint64_t *sizep)
{
- int error;
+ int err;
+ dsl_dataset_t *ds = origds;
+ uint64_t uncomp, comp;
- drc->drc_owner = owner;
+ ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
+ ASSERT(fromds == NULL || frombook == NULL);
- if (drc->drc_newfs)
- error = dmu_recv_new_end(drc);
- else
- error = dmu_recv_existing_end(drc);
-
- if (error != 0) {
- dmu_recv_cleanup_ds(drc);
- nvlist_free(drc->drc_keynvl);
- } else if (drc->drc_guid_to_ds_map != NULL) {
- (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map,
- drc->drc_newsnapobj, drc->drc_raw);
+ /*
+ * If this is a saved send we may actually be sending
+ * from the %recv clone used for resuming.
+ */
+ if (saved) {
+ objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
+ uint64_t guid;
+ char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
+
+ dsl_dataset_name(origds, dsname);
+ (void) strcat(dsname, "/");
+ (void) strlcat(dsname, recv_clone_name, sizeof (dsname));
+
+ err = dsl_dataset_hold(origds->ds_dir->dd_pool,
+ dsname, FTAG, &ds);
+ if (err != ENOENT && err != 0) {
+ return (err);
+ } else if (err == ENOENT) {
+ ds = origds;
+ }
+
+ /* check that this dataset has partially received data */
+ err = zap_lookup(mos, ds->ds_object,
+ DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
+ if (err != 0) {
+ err = SET_ERROR(err == ENOENT ? EINVAL : err);
+ goto out;
+ }
+
+ err = zap_lookup(mos, ds->ds_object,
+ DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
+ if (err != 0) {
+ err = SET_ERROR(err == ENOENT ? EINVAL : err);
+ goto out;
+ }
}
- return (error);
-}
-/*
- * Return TRUE if this objset is currently being received into.
- */
-boolean_t
-dmu_objset_is_receiving(objset_t *os)
-{
- return (os->os_dsl_dataset != NULL &&
- os->os_dsl_dataset->ds_owner == dmu_recv_tag);
+ /* tosnap must be a snapshot or the target of a saved send */
+ if (!ds->ds_is_snapshot && ds == origds)
+ return (SET_ERROR(EINVAL));
+
+ if (fromds != NULL) {
+ uint64_t used;
+ if (!fromds->ds_is_snapshot) {
+ err = SET_ERROR(EINVAL);
+ goto out;
+ }
+
+ if (!dsl_dataset_is_before(ds, fromds, 0)) {
+ err = SET_ERROR(EXDEV);
+ goto out;
+ }
+
+ err = dsl_dataset_space_written(fromds, ds, &used, &comp,
+ &uncomp);
+ if (err != 0)
+ goto out;
+ } else if (frombook != NULL) {
+ uint64_t used;
+ err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
+ &comp, &uncomp);
+ if (err != 0)
+ goto out;
+ } else {
+ uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
+ comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
+ }
+
+ err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
+ stream_compressed, sizep);
+ /*
+ * Add the size of the BEGIN and END records to the estimate.
+ */
+ *sizep += 2 * sizeof (dmu_replay_record_t);
+
+out:
+ if (ds != origds)
+ dsl_dataset_rele(ds, FTAG);
+ return (err);
}
-#if defined(_KERNEL)
-/* BEGIN CSTYLED */
-module_param(zfs_override_estimate_recordsize, ulong, 0644);
-MODULE_PARM_DESC(zfs_override_estimate_recordsize,
- "Record size calculation override for zfs send estimates");
-/* END CSTYLED */
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
+ "Allow sending corrupt data");
-module_param(zfs_send_corrupt_data, int, 0644);
-MODULE_PARM_DESC(zfs_send_corrupt_data, "Allow sending corrupt data");
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, UINT, ZMOD_RW,
+ "Maximum send queue length");
-module_param(zfs_send_queue_length, int, 0644);
-MODULE_PARM_DESC(zfs_send_queue_length, "Maximum send queue length");
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
+ "Send unmodified spill blocks");
-module_param(zfs_recv_queue_length, int, 0644);
-MODULE_PARM_DESC(zfs_recv_queue_length, "Maximum receive queue length");
-#endif
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, UINT, ZMOD_RW,
+ "Maximum send queue length for non-prefetch queues");
+
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, UINT, ZMOD_RW,
+ "Send queue fill fraction");
+
+ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, UINT, ZMOD_RW,
+ "Send queue fill fraction for non-prefetch queues");
+
+ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, UINT, ZMOD_RW,
+ "Override block size estimate with fixed size");