*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2011 by Delphix. All rights reserved.
+ * Copyright (c) 2014, Joyent, Inc. All rights reserved.
+ * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
+#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
+#include <sys/dmu_send.h>
+#include <sys/dsl_destroy.h>
+#include <sys/blkptr.h>
+#include <sys/dsl_bookmark.h>
+#include <sys/zfeature.h>
+#include <sys/bqueue.h>
+#include <sys/zvol.h>
+#include <sys/policy.h>
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
int zfs_send_corrupt_data = B_FALSE;
+int zfs_send_queue_length = 16 * 1024 * 1024;
+int zfs_recv_queue_length = 16 * 1024 * 1024;
static char *dmu_recv_tag = "dmu_recv_tag";
+static const char *recv_clone_name = "%recv";
+
+#define BP_SPAN(datablkszsec, indblkshift, level) \
+ (((uint64_t)datablkszsec) << (SPA_MINBLOCKSHIFT + \
+ (level) * (indblkshift - SPA_BLKPTRSHIFT)))
+
+struct send_thread_arg {
+ bqueue_t q;
+ dsl_dataset_t *ds; /* Dataset to traverse */
+ uint64_t fromtxg; /* Traverse from this txg */
+ int flags; /* flags to pass to traverse_dataset */
+ int error_code;
+ boolean_t cancel;
+};
-/*
- * The list of data whose inclusion in a send stream can be pending from
- * one call to backup_cb to another. Multiple calls to dump_free() and
- * dump_freeobjects() can be aggregated into a single DRR_FREE or
- * DRR_FREEOBJECTS replay record.
- */
-typedef enum {
- PENDING_NONE,
- PENDING_FREE,
- PENDING_FREEOBJECTS
-} pendop_t;
-
-struct backuparg {
- dmu_replay_record_t *drr;
- vnode_t *vp;
- offset_t *off;
- objset_t *os;
- zio_cksum_t zc;
- uint64_t toguid;
- int err;
- pendop_t pending_op;
+struct send_block_record {
+ boolean_t eos_marker; /* Marks the end of the stream */
+ blkptr_t bp;
+ zbookmark_phys_t zb;
+ uint8_t indblkshift;
+ uint16_t datablkszsec;
+ bqueue_node_t ln;
};
-static int
-dump_bytes(struct backuparg *ba, void *buf, int len)
+typedef struct dump_bytes_io {
+ dmu_sendarg_t *dbi_dsp;
+ void *dbi_buf;
+ int dbi_len;
+} dump_bytes_io_t;
+
+static void
+dump_bytes_cb(void *arg)
{
+ dump_bytes_io_t *dbi = (dump_bytes_io_t *)arg;
+ dmu_sendarg_t *dsp = dbi->dbi_dsp;
+ dsl_dataset_t *ds = dsp->dsa_os->os_dsl_dataset;
ssize_t resid; /* have to get resid to get detailed errno */
- ASSERT3U(len % 8, ==, 0);
+ ASSERT0(dbi->dbi_len % 8);
- fletcher_4_incremental_native(buf, len, &ba->zc);
- ba->err = vn_rdwr(UIO_WRITE, ba->vp,
- (caddr_t)buf, len,
+ dsp->dsa_err = vn_rdwr(UIO_WRITE, dsp->dsa_vp,
+ (caddr_t)dbi->dbi_buf, dbi->dbi_len,
0, UIO_SYSSPACE, FAPPEND, RLIM64_INFINITY, CRED(), &resid);
- *ba->off += len;
- return (ba->err);
+
+ mutex_enter(&ds->ds_sendstream_lock);
+ *dsp->dsa_off += dbi->dbi_len;
+ mutex_exit(&ds->ds_sendstream_lock);
+}
+
+static int
+dump_bytes(dmu_sendarg_t *dsp, void *buf, int len)
+{
+ dump_bytes_io_t dbi;
+
+ dbi.dbi_dsp = dsp;
+ dbi.dbi_buf = buf;
+ dbi.dbi_len = len;
+
+#if defined(HAVE_LARGE_STACKS)
+ dump_bytes_cb(&dbi);
+#else
+ /*
+ * The vn_rdwr() call is performed in a taskq to ensure that there is
+ * always enough stack space to write safely to the target filesystem.
+ * The ZIO_TYPE_FREE threads are used because there can be a lot of
+ * them and they are used in vdev_file.c for a similar purpose.
+ */
+ spa_taskq_dispatch_sync(dmu_objset_spa(dsp->dsa_os), ZIO_TYPE_FREE,
+ ZIO_TASKQ_ISSUE, dump_bytes_cb, &dbi, TQ_SLEEP);
+#endif /* HAVE_LARGE_STACKS */
+
+ return (dsp->dsa_err);
+}
+
+/*
+ * For all record types except BEGIN, fill in the checksum (overlaid in
+ * drr_u.drr_checksum.drr_checksum). The checksum verifies everything
+ * up to the start of the checksum itself.
+ */
+static int
+dump_record(dmu_sendarg_t *dsp, void *payload, int payload_len)
+{
+ ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
+ ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
+ fletcher_4_incremental_native(dsp->dsa_drr,
+ offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
+ &dsp->dsa_zc);
+ if (dsp->dsa_drr->drr_type != DRR_BEGIN) {
+ ASSERT(ZIO_CHECKSUM_IS_ZERO(&dsp->dsa_drr->drr_u.
+ drr_checksum.drr_checksum));
+ dsp->dsa_drr->drr_u.drr_checksum.drr_checksum = dsp->dsa_zc;
+ }
+ fletcher_4_incremental_native(&dsp->dsa_drr->
+ drr_u.drr_checksum.drr_checksum,
+ sizeof (zio_cksum_t), &dsp->dsa_zc);
+ if (dump_bytes(dsp, dsp->dsa_drr, sizeof (dmu_replay_record_t)) != 0)
+ return (SET_ERROR(EINTR));
+ if (payload_len != 0) {
+ fletcher_4_incremental_native(payload, payload_len,
+ &dsp->dsa_zc);
+ if (dump_bytes(dsp, payload, payload_len) != 0)
+ return (SET_ERROR(EINTR));
+ }
+ return (0);
}
static int
-dump_free(struct backuparg *ba, uint64_t object, uint64_t offset,
+dump_free(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
uint64_t length)
{
- struct drr_free *drrf = &(ba->drr->drr_u.drr_free);
+ struct drr_free *drrf = &(dsp->dsa_drr->drr_u.drr_free);
+
+ /*
+ * When we receive a free record, dbuf_free_range() assumes
+ * that the receiving system doesn't have any dbufs in the range
+ * being freed. This is always true because there is a one-record
+ * constraint: we only send one WRITE record for any given
+ * object+offset. We know that the one-record constraint is
+ * true because we always send data in increasing order by
+ * object,offset.
+ *
+ * If the increasing-order constraint ever changes, we should find
+ * another way to assert that the one-record constraint is still
+ * satisfied.
+ */
+ ASSERT(object > dsp->dsa_last_data_object ||
+ (object == dsp->dsa_last_data_object &&
+ offset > dsp->dsa_last_data_offset));
+
+ /*
+ * If we are doing a non-incremental send, then there can't
+ * be any data in the dataset we're receiving into. Therefore
+ * a free record would simply be a no-op. Save space by not
+ * sending it to begin with.
+ */
+ if (!dsp->dsa_incremental)
+ return (0);
if (length != -1ULL && offset + length < offset)
length = -1ULL;
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records.
*/
- if (ba->pending_op != PENDING_NONE && ba->pending_op != PENDING_FREE) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dsp->dsa_pending_op != PENDING_NONE &&
+ dsp->dsa_pending_op != PENDING_FREE) {
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
- if (ba->pending_op == PENDING_FREE) {
+ if (dsp->dsa_pending_op == PENDING_FREE) {
/*
* There should never be a PENDING_FREE if length is -1
* (because dump_dnode is the only place where this
return (0);
} else {
/* not a continuation. Push out pending record */
- if (dump_bytes(ba, ba->drr,
- sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
- bzero(ba->drr, sizeof (dmu_replay_record_t));
- ba->drr->drr_type = DRR_FREE;
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
drrf->drr_length = length;
- drrf->drr_toguid = ba->toguid;
+ drrf->drr_toguid = dsp->dsa_toguid;
if (length == -1ULL) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
} else {
- ba->pending_op = PENDING_FREE;
+ dsp->dsa_pending_op = PENDING_FREE;
}
return (0);
}
static int
-dump_data(struct backuparg *ba, dmu_object_type_t type,
+dump_write(dmu_sendarg_t *dsp, dmu_object_type_t type,
uint64_t object, uint64_t offset, int blksz, const blkptr_t *bp, void *data)
{
- struct drr_write *drrw = &(ba->drr->drr_u.drr_write);
+ struct drr_write *drrw = &(dsp->dsa_drr->drr_u.drr_write);
+ /*
+ * We send data in increasing object, offset order.
+ * See comment in dump_free() for details.
+ */
+ ASSERT(object > dsp->dsa_last_data_object ||
+ (object == dsp->dsa_last_data_object &&
+ offset > dsp->dsa_last_data_offset));
+ dsp->dsa_last_data_object = object;
+ dsp->dsa_last_data_offset = offset + blksz - 1;
/*
* If there is any kind of pending aggregation (currently either
* the stream, since aggregation can't be done across operations
* of different types.
*/
- if (ba->pending_op != PENDING_NONE) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dsp->dsa_pending_op != PENDING_NONE) {
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
- /* write a DATA record */
- bzero(ba->drr, sizeof (dmu_replay_record_t));
- ba->drr->drr_type = DRR_WRITE;
+ /* write a WRITE record */
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
- drrw->drr_toguid = ba->toguid;
- drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
- if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
- drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
- DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
- DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
- DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
- drrw->drr_key.ddk_cksum = bp->blk_cksum;
-
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- if (dump_bytes(ba, data, blksz) != 0)
- return (EINTR);
+ drrw->drr_toguid = dsp->dsa_toguid;
+ if (bp == NULL || BP_IS_EMBEDDED(bp)) {
+ /*
+ * There's no pre-computed checksum for partial-block
+ * writes or embedded BP's, so (like
+ * fletcher4-checkummed blocks) userland will have to
+ * compute a dedup-capable checksum itself.
+ */
+ drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
+ } else {
+ drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
+ if (zio_checksum_table[drrw->drr_checksumtype].ci_dedup)
+ drrw->drr_checksumflags |= DRR_CHECKSUM_DEDUP;
+ DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
+ DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
+ DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
+ drrw->drr_key.ddk_cksum = bp->blk_cksum;
+ }
+
+ if (dump_record(dsp, data, blksz) != 0)
+ return (SET_ERROR(EINTR));
return (0);
}
static int
-dump_spill(struct backuparg *ba, uint64_t object, int blksz, void *data)
+dump_write_embedded(dmu_sendarg_t *dsp, uint64_t object, uint64_t offset,
+ int blksz, const blkptr_t *bp)
{
- struct drr_spill *drrs = &(ba->drr->drr_u.drr_spill);
+ char buf[BPE_PAYLOAD_SIZE];
+ struct drr_write_embedded *drrw =
+ &(dsp->dsa_drr->drr_u.drr_write_embedded);
- if (ba->pending_op != PENDING_NONE) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
+ if (dsp->dsa_pending_op != PENDING_NONE) {
+ if (dump_record(dsp, NULL, 0) != 0)
return (EINTR);
- ba->pending_op = PENDING_NONE;
+ dsp->dsa_pending_op = PENDING_NONE;
+ }
+
+ ASSERT(BP_IS_EMBEDDED(bp));
+
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_WRITE_EMBEDDED;
+ drrw->drr_object = object;
+ drrw->drr_offset = offset;
+ drrw->drr_length = blksz;
+ drrw->drr_toguid = dsp->dsa_toguid;
+ drrw->drr_compression = BP_GET_COMPRESS(bp);
+ drrw->drr_etype = BPE_GET_ETYPE(bp);
+ drrw->drr_lsize = BPE_GET_LSIZE(bp);
+ drrw->drr_psize = BPE_GET_PSIZE(bp);
+
+ decode_embedded_bp_compressed(bp, buf);
+
+ if (dump_record(dsp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
+ return (EINTR);
+ return (0);
+}
+
+static int
+dump_spill(dmu_sendarg_t *dsp, uint64_t object, int blksz, void *data)
+{
+ struct drr_spill *drrs = &(dsp->dsa_drr->drr_u.drr_spill);
+
+ if (dsp->dsa_pending_op != PENDING_NONE) {
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
/* write a SPILL record */
- bzero(ba->drr, sizeof (dmu_replay_record_t));
- ba->drr->drr_type = DRR_SPILL;
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
- drrs->drr_toguid = ba->toguid;
+ drrs->drr_toguid = dsp->dsa_toguid;
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)))
- return (EINTR);
- if (dump_bytes(ba, data, blksz))
- return (EINTR);
+ if (dump_record(dsp, data, blksz) != 0)
+ return (SET_ERROR(EINTR));
return (0);
}
static int
-dump_freeobjects(struct backuparg *ba, uint64_t firstobj, uint64_t numobjs)
+dump_freeobjects(dmu_sendarg_t *dsp, uint64_t firstobj, uint64_t numobjs)
{
- struct drr_freeobjects *drrfo = &(ba->drr->drr_u.drr_freeobjects);
+ struct drr_freeobjects *drrfo = &(dsp->dsa_drr->drr_u.drr_freeobjects);
+
+ /* See comment in dump_free(). */
+ if (!dsp->dsa_incremental)
+ return (0);
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records.
*/
- if (ba->pending_op != PENDING_NONE &&
- ba->pending_op != PENDING_FREEOBJECTS) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dsp->dsa_pending_op != PENDING_NONE &&
+ dsp->dsa_pending_op != PENDING_FREEOBJECTS) {
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
- if (ba->pending_op == PENDING_FREEOBJECTS) {
+ if (dsp->dsa_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
return (0);
} else {
/* can't be aggregated. Push out pending record */
- if (dump_bytes(ba, ba->drr,
- sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
- bzero(ba->drr, sizeof (dmu_replay_record_t));
- ba->drr->drr_type = DRR_FREEOBJECTS;
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
- drrfo->drr_toguid = ba->toguid;
+ drrfo->drr_toguid = dsp->dsa_toguid;
- ba->pending_op = PENDING_FREEOBJECTS;
+ dsp->dsa_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
-dump_dnode(struct backuparg *ba, uint64_t object, dnode_phys_t *dnp)
+dump_dnode(dmu_sendarg_t *dsp, uint64_t object, dnode_phys_t *dnp)
{
- struct drr_object *drro = &(ba->drr->drr_u.drr_object);
+ struct drr_object *drro = &(dsp->dsa_drr->drr_u.drr_object);
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
- return (dump_freeobjects(ba, object, 1));
+ return (dump_freeobjects(dsp, object, 1));
- if (ba->pending_op != PENDING_NONE) {
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
- ba->pending_op = PENDING_NONE;
+ if (dsp->dsa_pending_op != PENDING_NONE) {
+ if (dump_record(dsp, NULL, 0) != 0)
+ return (SET_ERROR(EINTR));
+ dsp->dsa_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
- bzero(ba->drr, sizeof (dmu_replay_record_t));
- ba->drr->drr_type = DRR_OBJECT;
+ bzero(dsp->dsa_drr, sizeof (dmu_replay_record_t));
+ dsp->dsa_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
- drro->drr_toguid = ba->toguid;
+ drro->drr_toguid = dsp->dsa_toguid;
- if (dump_bytes(ba, ba->drr, sizeof (dmu_replay_record_t)) != 0)
- return (EINTR);
+ if (!(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
+ drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
+ drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
- if (dump_bytes(ba, DN_BONUS(dnp), P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0)
- return (EINTR);
+ if (dump_record(dsp, DN_BONUS(dnp),
+ P2ROUNDUP(dnp->dn_bonuslen, 8)) != 0) {
+ return (SET_ERROR(EINTR));
+ }
- /* free anything past the end of the file */
- if (dump_free(ba, object, (dnp->dn_maxblkid + 1) *
- (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL))
- return (EINTR);
- if (ba->err)
- return (EINTR);
+ /* Free anything past the end of the file. */
+ if (dump_free(dsp, object, (dnp->dn_maxblkid + 1) *
+ (dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), -1ULL) != 0)
+ return (SET_ERROR(EINTR));
+ if (dsp->dsa_err != 0)
+ return (SET_ERROR(EINTR));
return (0);
}
-#define BP_SPAN(dnp, level) \
- (((uint64_t)dnp->dn_datablkszsec) << (SPA_MINBLOCKSHIFT + \
- (level) * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT)))
+static boolean_t
+backup_do_embed(dmu_sendarg_t *dsp, const blkptr_t *bp)
+{
+ if (!BP_IS_EMBEDDED(bp))
+ return (B_FALSE);
-/* ARGSUSED */
+ /*
+ * Compression function must be legacy, or explicitly enabled.
+ */
+ if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
+ !(dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4)))
+ return (B_FALSE);
+
+ /*
+ * Embed type must be explicitly enabled.
+ */
+ switch (BPE_GET_ETYPE(bp)) {
+ case BP_EMBEDDED_TYPE_DATA:
+ if (dsp->dsa_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
+ return (B_TRUE);
+ break;
+ default:
+ return (B_FALSE);
+ }
+ return (B_FALSE);
+}
+
+/*
+ * This is the callback function to traverse_dataset that acts as the worker
+ * thread for dmu_send_impl.
+ */
+/*ARGSUSED*/
+static int
+send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
+ const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
+{
+ struct send_thread_arg *sta = arg;
+ struct send_block_record *record;
+ uint64_t record_size;
+ int err = 0;
+
+ if (sta->cancel)
+ return (SET_ERROR(EINTR));
+
+ if (bp == NULL) {
+ ASSERT3U(zb->zb_level, ==, ZB_DNODE_LEVEL);
+ return (0);
+ } else if (zb->zb_level < 0) {
+ return (0);
+ }
+
+ record = kmem_zalloc(sizeof (struct send_block_record), KM_SLEEP);
+ record->eos_marker = B_FALSE;
+ record->bp = *bp;
+ record->zb = *zb;
+ record->indblkshift = dnp->dn_indblkshift;
+ record->datablkszsec = dnp->dn_datablkszsec;
+ record_size = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
+ bqueue_enqueue(&sta->q, record, record_size);
+
+ return (err);
+}
+
+/*
+ * This function kicks off the traverse_dataset. It also handles setting the
+ * error code of the thread in case something goes wrong, and pushes the End of
+ * Stream record when the traverse_dataset call has finished. If there is no
+ * dataset to traverse, the thread immediately pushes End of Stream marker.
+ */
+static void
+send_traverse_thread(void *arg)
+{
+ struct send_thread_arg *st_arg = arg;
+ int err;
+ struct send_block_record *data;
+
+ if (st_arg->ds != NULL) {
+ err = traverse_dataset(st_arg->ds, st_arg->fromtxg,
+ st_arg->flags, send_cb, arg);
+ if (err != EINTR)
+ st_arg->error_code = err;
+ }
+ data = kmem_zalloc(sizeof (*data), KM_SLEEP);
+ data->eos_marker = B_TRUE;
+ bqueue_enqueue(&st_arg->q, data, 1);
+}
+
+/*
+ * This function actually handles figuring out what kind of record needs to be
+ * dumped, reading the data (which has hopefully been prefetched), and calling
+ * the appropriate helper function.
+ */
static int
-backup_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
- const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
+do_dump(dmu_sendarg_t *dsa, struct send_block_record *data)
{
- struct backuparg *ba = arg;
+ dsl_dataset_t *ds = dmu_objset_ds(dsa->dsa_os);
+ const blkptr_t *bp = &data->bp;
+ const zbookmark_phys_t *zb = &data->zb;
+ uint8_t indblkshift = data->indblkshift;
+ uint16_t dblkszsec = data->datablkszsec;
+ spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
dmu_object_type_t type = bp ? BP_GET_TYPE(bp) : DMU_OT_NONE;
int err = 0;
+ dnode_phys_t *blk;
+ uint64_t dnobj;
- if (issig(JUSTLOOKING) && issig(FORREAL))
- return (EINTR);
+ ASSERT3U(zb->zb_level, >=, 0);
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object)) {
return (0);
- } else if (bp == NULL && zb->zb_object == DMU_META_DNODE_OBJECT) {
- uint64_t span = BP_SPAN(dnp, zb->zb_level);
+ } else if (BP_IS_HOLE(bp) &&
+ zb->zb_object == DMU_META_DNODE_OBJECT) {
+ uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
uint64_t dnobj = (zb->zb_blkid * span) >> DNODE_SHIFT;
- err = dump_freeobjects(ba, dnobj, span >> DNODE_SHIFT);
- } else if (bp == NULL) {
- uint64_t span = BP_SPAN(dnp, zb->zb_level);
- err = dump_free(ba, zb->zb_object, zb->zb_blkid * span, span);
+ err = dump_freeobjects(dsa, dnobj, span >> DNODE_SHIFT);
+ } else if (BP_IS_HOLE(bp)) {
+ uint64_t span = BP_SPAN(dblkszsec, indblkshift, zb->zb_level);
+ uint64_t offset = zb->zb_blkid * span;
+ err = dump_free(dsa, zb->zb_object, offset, span);
} else if (zb->zb_level > 0 || type == DMU_OT_OBJSET) {
return (0);
} else if (type == DMU_OT_DNODE) {
- dnode_phys_t *blk;
- int i;
int blksz = BP_GET_LSIZE(bp);
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
+ int i;
+
+ ASSERT0(zb->zb_level);
- if (dsl_read(NULL, spa, bp, pbuf,
- arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
- ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
- return (EIO);
+ if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
+ ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
+ &aflags, zb) != 0)
+ return (SET_ERROR(EIO));
blk = abuf->b_data;
+ dnobj = zb->zb_blkid * (blksz >> DNODE_SHIFT);
for (i = 0; i < blksz >> DNODE_SHIFT; i++) {
- uint64_t dnobj = (zb->zb_blkid <<
- (DNODE_BLOCK_SHIFT - DNODE_SHIFT)) + i;
- err = dump_dnode(ba, dnobj, blk+i);
- if (err)
+ err = dump_dnode(dsa, dnobj + i, blk + i);
+ if (err != 0)
break;
}
(void) arc_buf_remove_ref(abuf, &abuf);
} else if (type == DMU_OT_SA) {
- uint32_t aflags = ARC_WAIT;
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
int blksz = BP_GET_LSIZE(bp);
- if (arc_read_nolock(NULL, spa, bp,
- arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
- ZIO_FLAG_CANFAIL, &aflags, zb) != 0)
- return (EIO);
+ if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
+ ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
+ &aflags, zb) != 0)
+ return (SET_ERROR(EIO));
- err = dump_spill(ba, zb->zb_object, blksz, abuf->b_data);
+ err = dump_spill(dsa, zb->zb_object, blksz, abuf->b_data);
(void) arc_buf_remove_ref(abuf, &abuf);
- } else { /* it's a level-0 block of a regular object */
- uint32_t aflags = ARC_WAIT;
+ } else if (backup_do_embed(dsa, bp)) {
+ /* it's an embedded level-0 block of a regular object */
+ int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
+ ASSERT0(zb->zb_level);
+ err = dump_write_embedded(dsa, zb->zb_object,
+ zb->zb_blkid * blksz, blksz, bp);
+ } else {
+ /* it's a level-0 block of a regular object */
+ arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf;
- int blksz = BP_GET_LSIZE(bp);
+ int blksz = dblkszsec << SPA_MINBLOCKSHIFT;
+ uint64_t offset;
- if (dsl_read(NULL, spa, bp, pbuf,
- arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
- ZIO_FLAG_CANFAIL, &aflags, zb) != 0) {
+ ASSERT0(zb->zb_level);
+ if (arc_read(NULL, spa, bp, arc_getbuf_func, &abuf,
+ ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL,
+ &aflags, zb) != 0) {
if (zfs_send_corrupt_data) {
uint64_t *ptr;
/* Send a block filled with 0x"zfs badd bloc" */
for (ptr = abuf->b_data;
(char *)ptr < (char *)abuf->b_data + blksz;
ptr++)
- *ptr = 0x2f5baddb10c;
+ *ptr = 0x2f5baddb10cULL;
} else {
- return (EIO);
+ return (SET_ERROR(EIO));
}
}
- err = dump_data(ba, type, zb->zb_object, zb->zb_blkid * blksz,
- blksz, bp, abuf->b_data);
+ offset = zb->zb_blkid * blksz;
+
+ if (!(dsa->dsa_featureflags &
+ DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
+ blksz > SPA_OLD_MAXBLOCKSIZE) {
+ char *buf = abuf->b_data;
+ while (blksz > 0 && err == 0) {
+ int n = MIN(blksz, SPA_OLD_MAXBLOCKSIZE);
+ err = dump_write(dsa, type, zb->zb_object,
+ offset, n, NULL, buf);
+ offset += n;
+ buf += n;
+ blksz -= n;
+ }
+ } else {
+ err = dump_write(dsa, type, zb->zb_object,
+ offset, blksz, bp, abuf->b_data);
+ }
(void) arc_buf_remove_ref(abuf, &abuf);
}
return (err);
}
-int
-dmu_sendbackup(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
- vnode_t *vp, offset_t *off)
+/*
+ * Pop the new data off the queue, and free the old data.
+ */
+static struct send_block_record *
+get_next_record(bqueue_t *bq, struct send_block_record *data)
+{
+ struct send_block_record *tmp = bqueue_dequeue(bq);
+ kmem_free(data, sizeof (*data));
+ return (tmp);
+}
+
+/*
+ * Actually do the bulk of the work in a zfs send.
+ *
+ * Note: Releases dp using the specified tag.
+ */
+static int
+dmu_send_impl(void *tag, dsl_pool_t *dp, dsl_dataset_t *to_ds,
+ zfs_bookmark_phys_t *ancestor_zb, boolean_t is_clone, boolean_t embedok,
+ boolean_t large_block_ok, int outfd, vnode_t *vp, offset_t *off)
{
- dsl_dataset_t *ds = tosnap->os_dsl_dataset;
- dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
+ objset_t *os;
dmu_replay_record_t *drr;
- struct backuparg ba;
+ dmu_sendarg_t *dsp;
int err;
uint64_t fromtxg = 0;
+ uint64_t featureflags = 0;
+ struct send_thread_arg to_arg;
+ struct send_block_record *to_data;
- /* tosnap must be a snapshot */
- if (ds->ds_phys->ds_next_snap_obj == 0)
- return (EINVAL);
-
- /* fromsnap must be an earlier snapshot from the same fs as tosnap */
- if (fromds && (ds->ds_dir != fromds->ds_dir ||
- fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
- return (EXDEV);
-
- if (fromorigin) {
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
-
- if (fromsnap)
- return (EINVAL);
-
- if (dsl_dir_is_clone(ds->ds_dir)) {
- rw_enter(&dp->dp_config_rwlock, RW_READER);
- err = dsl_dataset_hold_obj(dp,
- ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
- rw_exit(&dp->dp_config_rwlock);
- if (err)
- return (err);
- } else {
- fromorigin = B_FALSE;
- }
+ err = dmu_objset_from_ds(to_ds, &os);
+ if (err != 0) {
+ dsl_pool_rele(dp, tag);
+ return (err);
}
-
drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
drr->drr_type = DRR_BEGIN;
drr->drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SUBSTREAM);
#ifdef _KERNEL
- if (dmu_objset_type(tosnap) == DMU_OST_ZFS) {
+ if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
- if (zfs_get_zplprop(tosnap, ZFS_PROP_VERSION, &version) != 0)
- return (EINVAL);
- if (version == ZPL_VERSION_SA) {
- DMU_SET_FEATUREFLAGS(
- drr->drr_u.drr_begin.drr_versioninfo,
- DMU_BACKUP_FEATURE_SA_SPILL);
+ if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0) {
+ kmem_free(drr, sizeof (dmu_replay_record_t));
+ dsl_pool_rele(dp, tag);
+ return (SET_ERROR(EINVAL));
+ }
+ if (version >= ZPL_VERSION_SA) {
+ featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
}
#endif
+ if (large_block_ok && to_ds->ds_feature_inuse[SPA_FEATURE_LARGE_BLOCKS])
+ featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
+ if (embedok &&
+ spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
+ featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
+ if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
+ featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA_LZ4;
+ }
+
+ DMU_SET_FEATUREFLAGS(drr->drr_u.drr_begin.drr_versioninfo,
+ featureflags);
+
drr->drr_u.drr_begin.drr_creation_time =
- ds->ds_phys->ds_creation_time;
- drr->drr_u.drr_begin.drr_type = tosnap->os_phys->os_type;
- if (fromorigin)
+ dsl_dataset_phys(to_ds)->ds_creation_time;
+ drr->drr_u.drr_begin.drr_type = dmu_objset_type(os);
+ if (is_clone)
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CLONE;
- drr->drr_u.drr_begin.drr_toguid = ds->ds_phys->ds_guid;
- if (ds->ds_phys->ds_flags & DS_FLAG_CI_DATASET)
+ drr->drr_u.drr_begin.drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
+ if (dsl_dataset_phys(to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_CI_DATA;
- if (fromds)
- drr->drr_u.drr_begin.drr_fromguid = fromds->ds_phys->ds_guid;
- dsl_dataset_name(ds, drr->drr_u.drr_begin.drr_toname);
+ if (ancestor_zb != NULL) {
+ drr->drr_u.drr_begin.drr_fromguid =
+ ancestor_zb->zbm_guid;
+ fromtxg = ancestor_zb->zbm_creation_txg;
+ }
+ dsl_dataset_name(to_ds, drr->drr_u.drr_begin.drr_toname);
+ if (!to_ds->ds_is_snapshot) {
+ (void) strlcat(drr->drr_u.drr_begin.drr_toname, "@--head--",
+ sizeof (drr->drr_u.drr_begin.drr_toname));
+ }
- if (fromds)
- fromtxg = fromds->ds_phys->ds_creation_txg;
- if (fromorigin)
- dsl_dataset_rele(fromds, FTAG);
+ dsp = kmem_zalloc(sizeof (dmu_sendarg_t), KM_SLEEP);
+
+ dsp->dsa_drr = drr;
+ dsp->dsa_vp = vp;
+ dsp->dsa_outfd = outfd;
+ dsp->dsa_proc = curproc;
+ dsp->dsa_os = os;
+ dsp->dsa_off = off;
+ dsp->dsa_toguid = dsl_dataset_phys(to_ds)->ds_guid;
+ dsp->dsa_pending_op = PENDING_NONE;
+ dsp->dsa_incremental = (ancestor_zb != NULL);
+ dsp->dsa_featureflags = featureflags;
+
+ mutex_enter(&to_ds->ds_sendstream_lock);
+ list_insert_head(&to_ds->ds_sendstreams, dsp);
+ mutex_exit(&to_ds->ds_sendstream_lock);
+
+ dsl_dataset_long_hold(to_ds, FTAG);
+ dsl_pool_rele(dp, tag);
+
+ if (dump_record(dsp, NULL, 0) != 0) {
+ err = dsp->dsa_err;
+ goto out;
+ }
- ba.drr = drr;
- ba.vp = vp;
- ba.os = tosnap;
- ba.off = off;
- ba.toguid = ds->ds_phys->ds_guid;
- ZIO_SET_CHECKSUM(&ba.zc, 0, 0, 0, 0);
- ba.pending_op = PENDING_NONE;
+ err = bqueue_init(&to_arg.q, zfs_send_queue_length,
+ offsetof(struct send_block_record, ln));
+ to_arg.error_code = 0;
+ to_arg.cancel = B_FALSE;
+ to_arg.ds = to_ds;
+ to_arg.fromtxg = fromtxg;
+ to_arg.flags = TRAVERSE_PRE | TRAVERSE_PREFETCH;
+ (void) thread_create(NULL, 0, send_traverse_thread, &to_arg, 0, curproc,
+ TS_RUN, minclsyspri);
+
+ to_data = bqueue_dequeue(&to_arg.q);
+
+ while (!to_data->eos_marker && err == 0) {
+ err = do_dump(dsp, to_data);
+ to_data = get_next_record(&to_arg.q, to_data);
+ if (issig(JUSTLOOKING) && issig(FORREAL))
+ err = EINTR;
+ }
- if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
- kmem_free(drr, sizeof (dmu_replay_record_t));
- return (ba.err);
+ if (err != 0) {
+ to_arg.cancel = B_TRUE;
+ while (!to_data->eos_marker) {
+ to_data = get_next_record(&to_arg.q, to_data);
+ }
}
+ kmem_free(to_data, sizeof (*to_data));
- err = traverse_dataset(ds, fromtxg, TRAVERSE_PRE | TRAVERSE_PREFETCH,
- backup_cb, &ba);
+ bqueue_destroy(&to_arg.q);
- if (ba.pending_op != PENDING_NONE)
- if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0)
- err = EINTR;
+ if (err == 0 && to_arg.error_code != 0)
+ err = to_arg.error_code;
- if (err) {
- if (err == EINTR && ba.err)
- err = ba.err;
- kmem_free(drr, sizeof (dmu_replay_record_t));
- return (err);
+ if (err != 0)
+ goto out;
+
+ if (dsp->dsa_pending_op != PENDING_NONE)
+ if (dump_record(dsp, NULL, 0) != 0)
+ err = SET_ERROR(EINTR);
+
+ if (err != 0) {
+ if (err == EINTR && dsp->dsa_err != 0)
+ err = dsp->dsa_err;
+ goto out;
}
bzero(drr, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
- drr->drr_u.drr_end.drr_checksum = ba.zc;
- drr->drr_u.drr_end.drr_toguid = ba.toguid;
+ drr->drr_u.drr_end.drr_checksum = dsp->dsa_zc;
+ drr->drr_u.drr_end.drr_toguid = dsp->dsa_toguid;
- if (dump_bytes(&ba, drr, sizeof (dmu_replay_record_t)) != 0) {
- kmem_free(drr, sizeof (dmu_replay_record_t));
- return (ba.err);
- }
+ if (dump_record(dsp, NULL, 0) != 0)
+ err = dsp->dsa_err;
+
+out:
+ mutex_enter(&to_ds->ds_sendstream_lock);
+ list_remove(&to_ds->ds_sendstreams, dsp);
+ mutex_exit(&to_ds->ds_sendstream_lock);
kmem_free(drr, sizeof (dmu_replay_record_t));
+ kmem_free(dsp, sizeof (dmu_sendarg_t));
- return (0);
+ dsl_dataset_long_rele(to_ds, FTAG);
+
+ return (err);
}
int
-dmu_send_estimate(objset_t *tosnap, objset_t *fromsnap, boolean_t fromorigin,
- uint64_t *sizep)
+dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
+ boolean_t embedok, boolean_t large_block_ok,
+ int outfd, vnode_t *vp, offset_t *off)
{
- dsl_dataset_t *ds = tosnap->os_dsl_dataset;
- dsl_dataset_t *fromds = fromsnap ? fromsnap->os_dsl_dataset : NULL;
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
+ dsl_pool_t *dp;
+ dsl_dataset_t *ds;
+ dsl_dataset_t *fromds = NULL;
int err;
- uint64_t size, recordsize;
- /* tosnap must be a snapshot */
- if (ds->ds_phys->ds_next_snap_obj == 0)
- return (EINVAL);
+ err = dsl_pool_hold(pool, FTAG, &dp);
+ if (err != 0)
+ return (err);
- /* fromsnap must be an earlier snapshot from the same fs as tosnap */
- if (fromds && (ds->ds_dir != fromds->ds_dir ||
- fromds->ds_phys->ds_creation_txg >= ds->ds_phys->ds_creation_txg))
- return (EXDEV);
-
- if (fromorigin) {
- if (fromsnap)
- return (EINVAL);
-
- if (dsl_dir_is_clone(ds->ds_dir)) {
- rw_enter(&dp->dp_config_rwlock, RW_READER);
- err = dsl_dataset_hold_obj(dp,
- ds->ds_dir->dd_phys->dd_origin_obj, FTAG, &fromds);
- rw_exit(&dp->dp_config_rwlock);
- if (err)
- return (err);
- } else {
- fromorigin = B_FALSE;
+ err = dsl_dataset_hold_obj(dp, tosnap, FTAG, &ds);
+ if (err != 0) {
+ dsl_pool_rele(dp, FTAG);
+ return (err);
+ }
+
+ if (fromsnap != 0) {
+ zfs_bookmark_phys_t zb;
+ boolean_t is_clone;
+
+ err = dsl_dataset_hold_obj(dp, fromsnap, FTAG, &fromds);
+ if (err != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ dsl_pool_rele(dp, FTAG);
+ return (err);
}
+ if (!dsl_dataset_is_before(ds, fromds, 0))
+ err = SET_ERROR(EXDEV);
+ zb.zbm_creation_time =
+ dsl_dataset_phys(fromds)->ds_creation_time;
+ zb.zbm_creation_txg = dsl_dataset_phys(fromds)->ds_creation_txg;
+ zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
+ is_clone = (fromds->ds_dir != ds->ds_dir);
+ dsl_dataset_rele(fromds, FTAG);
+ err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
+ embedok, large_block_ok, outfd, vp, off);
+ } else {
+ err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
+ embedok, large_block_ok, outfd, vp, off);
}
+ dsl_dataset_rele(ds, FTAG);
+ return (err);
+}
- /* Get uncompressed size estimate of changed data. */
- if (fromds == NULL) {
- size = ds->ds_phys->ds_uncompressed_bytes;
+int
+dmu_send(const char *tosnap, const char *fromsnap,
+ boolean_t embedok, boolean_t large_block_ok,
+ int outfd, vnode_t *vp, offset_t *off)
+{
+ dsl_pool_t *dp;
+ dsl_dataset_t *ds;
+ int err;
+ boolean_t owned = B_FALSE;
+
+ if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
+ return (SET_ERROR(EINVAL));
+
+ err = dsl_pool_hold(tosnap, FTAG, &dp);
+ if (err != 0)
+ return (err);
+
+ if (strchr(tosnap, '@') == NULL && spa_writeable(dp->dp_spa)) {
+ /*
+ * We are sending a filesystem or volume. Ensure
+ * that it doesn't change by owning the dataset.
+ */
+ err = dsl_dataset_own(dp, tosnap, FTAG, &ds);
+ owned = B_TRUE;
} else {
- uint64_t used, comp;
- err = dsl_dataset_space_written(fromds, ds,
- &used, &comp, &size);
- if (fromorigin)
- dsl_dataset_rele(fromds, FTAG);
- if (err)
+ err = dsl_dataset_hold(dp, tosnap, FTAG, &ds);
+ }
+ if (err != 0) {
+ dsl_pool_rele(dp, FTAG);
+ return (err);
+ }
+
+ if (fromsnap != NULL) {
+ zfs_bookmark_phys_t zb;
+ boolean_t is_clone = B_FALSE;
+ int fsnamelen = strchr(tosnap, '@') - tosnap;
+
+ /*
+ * If the fromsnap is in a different filesystem, then
+ * mark the send stream as a clone.
+ */
+ if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
+ (fromsnap[fsnamelen] != '@' &&
+ fromsnap[fsnamelen] != '#')) {
+ is_clone = B_TRUE;
+ }
+
+ if (strchr(fromsnap, '@')) {
+ dsl_dataset_t *fromds;
+ err = dsl_dataset_hold(dp, fromsnap, FTAG, &fromds);
+ if (err == 0) {
+ if (!dsl_dataset_is_before(ds, fromds, 0))
+ err = SET_ERROR(EXDEV);
+ zb.zbm_creation_time =
+ dsl_dataset_phys(fromds)->ds_creation_time;
+ zb.zbm_creation_txg =
+ dsl_dataset_phys(fromds)->ds_creation_txg;
+ zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
+ is_clone = (ds->ds_dir != fromds->ds_dir);
+ dsl_dataset_rele(fromds, FTAG);
+ }
+ } else {
+ err = dsl_bookmark_lookup(dp, fromsnap, ds, &zb);
+ }
+ if (err != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ dsl_pool_rele(dp, FTAG);
return (err);
+ }
+ err = dmu_send_impl(FTAG, dp, ds, &zb, is_clone,
+ embedok, large_block_ok, outfd, vp, off);
+ } else {
+ err = dmu_send_impl(FTAG, dp, ds, NULL, B_FALSE,
+ embedok, large_block_ok, outfd, vp, off);
}
+ if (owned)
+ dsl_dataset_disown(ds, FTAG);
+ else
+ dsl_dataset_rele(ds, FTAG);
+ return (err);
+}
+static int
+dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t size,
+ uint64_t *sizep)
+{
+ int err;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block, which we observe in practice.
*/
- rw_enter(&dp->dp_config_rwlock, RW_READER);
- err = dsl_prop_get_ds(ds, "recordsize",
- sizeof (recordsize), 1, &recordsize, NULL);
- rw_exit(&dp->dp_config_rwlock);
- if (err)
+ uint64_t recordsize;
+ err = dsl_prop_get_int_ds(ds, "recordsize", &recordsize);
+ if (err != 0)
return (err);
size -= size / recordsize * sizeof (blkptr_t);
return (0);
}
-struct recvbeginsyncarg {
- const char *tofs;
- const char *tosnap;
- dsl_dataset_t *origin;
- uint64_t fromguid;
- dmu_objset_type_t type;
- void *tag;
- boolean_t force;
- uint64_t dsflags;
- char clonelastname[MAXNAMELEN];
- dsl_dataset_t *ds; /* the ds to recv into; returned from the syncfunc */
- cred_t *cr;
-};
-
-/* ARGSUSED */
-static int
-recv_new_check(void *arg1, void *arg2, dmu_tx_t *tx)
+int
+dmu_send_estimate(dsl_dataset_t *ds, dsl_dataset_t *fromds, uint64_t *sizep)
{
- dsl_dir_t *dd = arg1;
- struct recvbeginsyncarg *rbsa = arg2;
- objset_t *mos = dd->dd_pool->dp_meta_objset;
- uint64_t val;
int err;
+ uint64_t size;
- err = zap_lookup(mos, dd->dd_phys->dd_child_dir_zapobj,
- strrchr(rbsa->tofs, '/') + 1, sizeof (uint64_t), 1, &val);
-
- if (err != ENOENT)
- return (err ? err : EEXIST);
+ ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
- if (rbsa->origin) {
- /* make sure it's a snap in the same pool */
- if (rbsa->origin->ds_dir->dd_pool != dd->dd_pool)
- return (EXDEV);
- if (!dsl_dataset_is_snapshot(rbsa->origin))
- return (EINVAL);
- if (rbsa->origin->ds_phys->ds_guid != rbsa->fromguid)
- return (ENODEV);
- }
-
- return (0);
-}
+ /* tosnap must be a snapshot */
+ if (!ds->ds_is_snapshot)
+ return (SET_ERROR(EINVAL));
-static void
-recv_new_sync(void *arg1, void *arg2, dmu_tx_t *tx)
-{
- dsl_dir_t *dd = arg1;
- struct recvbeginsyncarg *rbsa = arg2;
- uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
- uint64_t dsobj;
+ /* fromsnap, if provided, must be a snapshot */
+ if (fromds != NULL && !fromds->ds_is_snapshot)
+ return (SET_ERROR(EINVAL));
- /* Create and open new dataset. */
- dsobj = dsl_dataset_create_sync(dd, strrchr(rbsa->tofs, '/') + 1,
- rbsa->origin, flags, rbsa->cr, tx);
- VERIFY(0 == dsl_dataset_own_obj(dd->dd_pool, dsobj,
- B_TRUE, dmu_recv_tag, &rbsa->ds));
+ /*
+ * fromsnap must be an earlier snapshot from the same fs as tosnap,
+ * or the origin's fs.
+ */
+ if (fromds != NULL && !dsl_dataset_is_before(ds, fromds, 0))
+ return (SET_ERROR(EXDEV));
- if (rbsa->origin == NULL) {
- (void) dmu_objset_create_impl(dd->dd_pool->dp_spa,
- rbsa->ds, &rbsa->ds->ds_phys->ds_bp, rbsa->type, tx);
+ /* Get uncompressed size estimate of changed data. */
+ if (fromds == NULL) {
+ size = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
+ } else {
+ uint64_t used, comp;
+ err = dsl_dataset_space_written(fromds, ds,
+ &used, &comp, &size);
+ if (err != 0)
+ return (err);
}
- spa_history_log_internal(LOG_DS_REPLAY_FULL_SYNC,
- dd->dd_pool->dp_spa, tx, "dataset = %lld", dsobj);
+ err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
+ return (err);
}
+/*
+ * Simple callback used to traverse the blocks of a snapshot and sum their
+ * uncompressed size
+ */
/* ARGSUSED */
static int
-recv_existing_check(void *arg1, void *arg2, dmu_tx_t *tx)
+dmu_calculate_send_traversal(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
+ const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
- dsl_dataset_t *ds = arg1;
- struct recvbeginsyncarg *rbsa = arg2;
- int err;
- uint64_t val;
-
- /* must not have any changes since most recent snapshot */
- if (!rbsa->force && dsl_dataset_modified_since_lastsnap(ds))
- return (ETXTBSY);
+ uint64_t *spaceptr = arg;
+ if (bp != NULL && !BP_IS_HOLE(bp)) {
+ *spaceptr += BP_GET_UCSIZE(bp);
+ }
+ return (0);
+}
- /* new snapshot name must not exist */
- err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
- ds->ds_phys->ds_snapnames_zapobj, rbsa->tosnap, 8, 1, &val);
- if (err == 0)
- return (EEXIST);
- if (err != ENOENT)
- return (err);
+/*
+ * Given a desination snapshot and a TXG, calculate the approximate size of a
+ * send stream sent from that TXG. from_txg may be zero, indicating that the
+ * whole snapshot will be sent.
+ */
+int
+dmu_send_estimate_from_txg(dsl_dataset_t *ds, uint64_t from_txg,
+ uint64_t *sizep)
+{
+ int err;
+ uint64_t size = 0;
- if (rbsa->fromguid) {
- /* if incremental, most recent snapshot must match fromguid */
- if (ds->ds_prev == NULL)
- return (ENODEV);
+ ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
- /*
- * most recent snapshot must match fromguid, or there are no
- * changes since the fromguid one
- */
- if (ds->ds_prev->ds_phys->ds_guid != rbsa->fromguid) {
- uint64_t birth = ds->ds_prev->ds_phys->ds_bp.blk_birth;
- uint64_t obj = ds->ds_prev->ds_phys->ds_prev_snap_obj;
- while (obj != 0) {
- dsl_dataset_t *snap;
- err = dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
- obj, FTAG, &snap);
- if (err)
- return (ENODEV);
- if (snap->ds_phys->ds_creation_txg < birth) {
- dsl_dataset_rele(snap, FTAG);
- return (ENODEV);
- }
- if (snap->ds_phys->ds_guid == rbsa->fromguid) {
- dsl_dataset_rele(snap, FTAG);
- break; /* it's ok */
- }
- obj = snap->ds_phys->ds_prev_snap_obj;
- dsl_dataset_rele(snap, FTAG);
- }
- if (obj == 0)
- return (ENODEV);
- }
- } else {
- /* if full, most recent snapshot must be $ORIGIN */
- if (ds->ds_phys->ds_prev_snap_txg >= TXG_INITIAL)
- return (ENODEV);
- }
+ /* tosnap must be a snapshot */
+ if (!dsl_dataset_is_snapshot(ds))
+ return (SET_ERROR(EINVAL));
- /* temporary clone name must not exist */
- err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset,
- ds->ds_dir->dd_phys->dd_child_dir_zapobj,
- rbsa->clonelastname, 8, 1, &val);
- if (err == 0)
- return (EEXIST);
- if (err != ENOENT)
+ /* verify that from_txg is before the provided snapshot was taken */
+ if (from_txg >= dsl_dataset_phys(ds)->ds_creation_txg) {
+ return (SET_ERROR(EXDEV));
+ }
+ /*
+ * traverse the blocks of the snapshot with birth times after
+ * from_txg, summing their uncompressed size
+ */
+ err = traverse_dataset(ds, from_txg, TRAVERSE_POST,
+ dmu_calculate_send_traversal, &size);
+ if (err)
return (err);
- return (0);
+ err = dmu_adjust_send_estimate_for_indirects(ds, size, sizep);
+ return (err);
}
-/* ARGSUSED */
-static void
-recv_existing_sync(void *arg1, void *arg2, dmu_tx_t *tx)
+typedef struct dmu_recv_begin_arg {
+ const char *drba_origin;
+ dmu_recv_cookie_t *drba_cookie;
+ cred_t *drba_cred;
+ uint64_t drba_snapobj;
+} dmu_recv_begin_arg_t;
+
+static int
+recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
+ uint64_t fromguid)
{
- dsl_dataset_t *ohds = arg1;
- struct recvbeginsyncarg *rbsa = arg2;
- dsl_pool_t *dp = ohds->ds_dir->dd_pool;
- dsl_dataset_t *cds;
- uint64_t flags = DS_FLAG_INCONSISTENT | rbsa->dsflags;
- uint64_t dsobj;
+ uint64_t val;
+ int error;
+ dsl_pool_t *dp = ds->ds_dir->dd_pool;
+
+ /* temporary clone name must not exist */
+ error = zap_lookup(dp->dp_meta_objset,
+ dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
+ 8, 1, &val);
+ if (error != ENOENT)
+ return (error == 0 ? EBUSY : error);
- /* create and open the temporary clone */
- dsobj = dsl_dataset_create_sync(ohds->ds_dir, rbsa->clonelastname,
- ohds->ds_prev, flags, rbsa->cr, tx);
- VERIFY(0 == dsl_dataset_own_obj(dp, dsobj, B_TRUE, dmu_recv_tag, &cds));
+ /* new snapshot name must not exist */
+ error = zap_lookup(dp->dp_meta_objset,
+ dsl_dataset_phys(ds)->ds_snapnames_zapobj,
+ drba->drba_cookie->drc_tosnap, 8, 1, &val);
+ if (error != ENOENT)
+ return (error == 0 ? EEXIST : error);
/*
- * If we actually created a non-clone, we need to create the
- * objset in our new dataset.
+ * Check snapshot limit before receiving. We'll recheck again at the
+ * end, but might as well abort before receiving if we're already over
+ * the limit.
+ *
+ * Note that we do not check the file system limit with
+ * dsl_dir_fscount_check because the temporary %clones don't count
+ * against that limit.
*/
- if (BP_IS_HOLE(dsl_dataset_get_blkptr(cds))) {
- (void) dmu_objset_create_impl(dp->dp_spa,
- cds, dsl_dataset_get_blkptr(cds), rbsa->type, tx);
- }
-
- rbsa->ds = cds;
+ error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
+ NULL, drba->drba_cred);
+ if (error != 0)
+ return (error);
+
+ if (fromguid != 0) {
+ dsl_dataset_t *snap;
+ uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
+
+ /* Find snapshot in this dir that matches fromguid. */
+ while (obj != 0) {
+ error = dsl_dataset_hold_obj(dp, obj, FTAG,
+ &snap);
+ if (error != 0)
+ return (SET_ERROR(ENODEV));
+ if (snap->ds_dir != ds->ds_dir) {
+ dsl_dataset_rele(snap, FTAG);
+ return (SET_ERROR(ENODEV));
+ }
+ if (dsl_dataset_phys(snap)->ds_guid == fromguid)
+ break;
+ obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
+ dsl_dataset_rele(snap, FTAG);
+ }
+ if (obj == 0)
+ return (SET_ERROR(ENODEV));
- spa_history_log_internal(LOG_DS_REPLAY_INC_SYNC,
- dp->dp_spa, tx, "dataset = %lld", dsobj);
-}
+ if (drba->drba_cookie->drc_force) {
+ drba->drba_snapobj = obj;
+ } else {
+ /*
+ * If we are not forcing, there must be no
+ * changes since fromsnap.
+ */
+ if (dsl_dataset_modified_since_snap(ds, snap)) {
+ dsl_dataset_rele(snap, FTAG);
+ return (SET_ERROR(ETXTBSY));
+ }
+ drba->drba_snapobj = ds->ds_prev->ds_object;
+ }
-static boolean_t
-dmu_recv_verify_features(dsl_dataset_t *ds, struct drr_begin *drrb)
-{
- int featureflags;
+ dsl_dataset_rele(snap, FTAG);
+ } else {
+ /* if full, then must be forced */
+ if (!drba->drba_cookie->drc_force)
+ return (SET_ERROR(EEXIST));
+ /* start from $ORIGIN@$ORIGIN, if supported */
+ drba->drba_snapobj = dp->dp_origin_snap != NULL ?
+ dp->dp_origin_snap->ds_object : 0;
+ }
- featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
+ return (0);
- /* Verify pool version supports SA if SA_SPILL feature set */
- return ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
- (spa_version(dsl_dataset_get_spa(ds)) < SPA_VERSION_SA));
}
-/*
- * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
- * succeeds; otherwise we will leak the holds on the datasets.
- */
-int
-dmu_recv_begin(char *tofs, char *tosnap, char *top_ds, struct drr_begin *drrb,
- boolean_t force, objset_t *origin, dmu_recv_cookie_t *drc)
+static int
+dmu_recv_begin_check(void *arg, dmu_tx_t *tx)
{
- int err = 0;
- boolean_t byteswap;
- struct recvbeginsyncarg rbsa = { 0 };
- uint64_t versioninfo;
- int flags;
+ dmu_recv_begin_arg_t *drba = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+ struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
+ uint64_t fromguid = drrb->drr_fromguid;
+ int flags = drrb->drr_flags;
+ int error;
+ uint64_t featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
dsl_dataset_t *ds;
+ const char *tofs = drba->drba_cookie->drc_tofs;
- if (drrb->drr_magic == DMU_BACKUP_MAGIC)
- byteswap = FALSE;
- else if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
- byteswap = TRUE;
- else
- return (EINVAL);
+ /* already checked */
+ ASSERT3U(drrb->drr_magic, ==, DMU_BACKUP_MAGIC);
- rbsa.tofs = tofs;
- rbsa.tosnap = tosnap;
- rbsa.origin = origin ? origin->os_dsl_dataset : NULL;
- rbsa.fromguid = drrb->drr_fromguid;
- rbsa.type = drrb->drr_type;
- rbsa.tag = FTAG;
- rbsa.dsflags = 0;
- rbsa.cr = CRED();
- versioninfo = drrb->drr_versioninfo;
- flags = drrb->drr_flags;
-
- if (byteswap) {
- rbsa.type = BSWAP_32(rbsa.type);
- rbsa.fromguid = BSWAP_64(rbsa.fromguid);
- versioninfo = BSWAP_64(versioninfo);
- flags = BSWAP_32(flags);
- }
-
- if (DMU_GET_STREAM_HDRTYPE(versioninfo) == DMU_COMPOUNDSTREAM ||
- rbsa.type >= DMU_OST_NUMTYPES ||
- ((flags & DRR_FLAG_CLONE) && origin == NULL))
- return (EINVAL);
+ if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
+ DMU_COMPOUNDSTREAM ||
+ drrb->drr_type >= DMU_OST_NUMTYPES ||
+ ((flags & DRR_FLAG_CLONE) && drba->drba_origin == NULL))
+ return (SET_ERROR(EINVAL));
- if (flags & DRR_FLAG_CI_DATA)
- rbsa.dsflags = DS_FLAG_CI_DATASET;
+ /* Verify pool version supports SA if SA_SPILL feature set */
+ if ((featureflags & DMU_BACKUP_FEATURE_SA_SPILL) &&
+ spa_version(dp->dp_spa) < SPA_VERSION_SA)
+ return (SET_ERROR(ENOTSUP));
- bzero(drc, sizeof (dmu_recv_cookie_t));
- drc->drc_drrb = drrb;
- drc->drc_tosnap = tosnap;
- drc->drc_top_ds = top_ds;
- drc->drc_force = force;
+ /*
+ * The receiving code doesn't know how to translate a WRITE_EMBEDDED
+ * record to a plan WRITE record, so the pool must have the
+ * EMBEDDED_DATA feature enabled if the stream has WRITE_EMBEDDED
+ * records. Same with WRITE_EMBEDDED records that use LZ4 compression.
+ */
+ if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
+ !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA))
+ return (SET_ERROR(ENOTSUP));
+ if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA_LZ4) &&
+ !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS))
+ return (SET_ERROR(ENOTSUP));
/*
- * Process the begin in syncing context.
+ * The receiving code doesn't know how to translate large blocks
+ * to smaller ones, so the pool must have the LARGE_BLOCKS
+ * feature enabled if the stream has LARGE_BLOCKS.
*/
+ if ((featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
+ !spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_LARGE_BLOCKS))
+ return (SET_ERROR(ENOTSUP));
- /* open the dataset we are logically receiving into */
- err = dsl_dataset_hold(tofs, dmu_recv_tag, &ds);
- if (err == 0) {
- if (dmu_recv_verify_features(ds, drrb)) {
- dsl_dataset_rele(ds, dmu_recv_tag);
- return (ENOTSUP);
- }
+ error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
+ if (error == 0) {
/* target fs already exists; recv into temp clone */
/* Can't recv a clone into an existing fs */
if (flags & DRR_FLAG_CLONE) {
- dsl_dataset_rele(ds, dmu_recv_tag);
- return (EINVAL);
- }
-
- /* must not have an incremental recv already in progress */
- if (!mutex_tryenter(&ds->ds_recvlock)) {
- dsl_dataset_rele(ds, dmu_recv_tag);
- return (EBUSY);
+ dsl_dataset_rele(ds, FTAG);
+ return (SET_ERROR(EINVAL));
}
- /* tmp clone name is: tofs/%tosnap" */
- (void) snprintf(rbsa.clonelastname, sizeof (rbsa.clonelastname),
- "%%%s", tosnap);
- rbsa.force = force;
- err = dsl_sync_task_do(ds->ds_dir->dd_pool,
- recv_existing_check, recv_existing_sync, ds, &rbsa, 5);
- if (err) {
- mutex_exit(&ds->ds_recvlock);
- dsl_dataset_rele(ds, dmu_recv_tag);
- return (err);
- }
- drc->drc_logical_ds = ds;
- drc->drc_real_ds = rbsa.ds;
- } else if (err == ENOENT) {
+ error = recv_begin_check_existing_impl(drba, ds, fromguid);
+ dsl_dataset_rele(ds, FTAG);
+ } else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
- char *cp;
+ char buf[MAXNAMELEN];
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
*/
- if (rbsa.fromguid && !(flags & DRR_FLAG_CLONE))
- return (ENOENT);
+ if (fromguid != 0 && !(flags & DRR_FLAG_CLONE ||
+ drba->drba_origin))
+ return (SET_ERROR(ENOENT));
/* Open the parent of tofs */
- cp = strrchr(tofs, '/');
- *cp = '\0';
- err = dsl_dataset_hold(tofs, FTAG, &ds);
- *cp = '/';
- if (err)
- return (err);
+ ASSERT3U(strlen(tofs), <, MAXNAMELEN);
+ (void) strlcpy(buf, tofs, strrchr(tofs, '/') - tofs + 1);
+ error = dsl_dataset_hold(dp, buf, FTAG, &ds);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Check filesystem and snapshot limits before receiving. We'll
+ * recheck snapshot limits again at the end (we create the
+ * filesystems and increment those counts during begin_sync).
+ */
+ error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
+ ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
+ if (error != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ return (error);
+ }
- if (dmu_recv_verify_features(ds, drrb)) {
+ error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
+ ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
+ if (error != 0) {
dsl_dataset_rele(ds, FTAG);
- return (ENOTSUP);
+ return (error);
+ }
+
+ if (drba->drba_origin != NULL) {
+ dsl_dataset_t *origin;
+ error = dsl_dataset_hold(dp, drba->drba_origin,
+ FTAG, &origin);
+ if (error != 0) {
+ dsl_dataset_rele(ds, FTAG);
+ return (error);
+ }
+ if (!origin->ds_is_snapshot) {
+ dsl_dataset_rele(origin, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ return (SET_ERROR(EINVAL));
+ }
+ if (dsl_dataset_phys(origin)->ds_guid != fromguid) {
+ dsl_dataset_rele(origin, FTAG);
+ dsl_dataset_rele(ds, FTAG);
+ return (SET_ERROR(ENODEV));
+ }
+ dsl_dataset_rele(origin, FTAG);
}
+ dsl_dataset_rele(ds, FTAG);
+ error = 0;
+ }
+ return (error);
+}
- err = dsl_sync_task_do(ds->ds_dir->dd_pool,
- recv_new_check, recv_new_sync, ds->ds_dir, &rbsa, 5);
+static void
+dmu_recv_begin_sync(void *arg, dmu_tx_t *tx)
+{
+ dmu_recv_begin_arg_t *drba = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+ struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
+ const char *tofs = drba->drba_cookie->drc_tofs;
+ dsl_dataset_t *ds, *newds;
+ uint64_t dsobj;
+ int error;
+ uint64_t crflags;
+
+ crflags = (drrb->drr_flags & DRR_FLAG_CI_DATA) ?
+ DS_FLAG_CI_DATASET : 0;
+
+ error = dsl_dataset_hold(dp, tofs, FTAG, &ds);
+ if (error == 0) {
+ /* create temporary clone */
+ dsl_dataset_t *snap = NULL;
+ if (drba->drba_snapobj != 0) {
+ VERIFY0(dsl_dataset_hold_obj(dp,
+ drba->drba_snapobj, FTAG, &snap));
+ }
+ dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
+ snap, crflags, drba->drba_cred, tx);
+ if (drba->drba_snapobj != 0)
+ dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele(ds, FTAG);
- if (err)
- return (err);
- drc->drc_logical_ds = drc->drc_real_ds = rbsa.ds;
- drc->drc_newfs = B_TRUE;
+ } else {
+ dsl_dir_t *dd;
+ const char *tail;
+ dsl_dataset_t *origin = NULL;
+
+ VERIFY0(dsl_dir_hold(dp, tofs, FTAG, &dd, &tail));
+
+ if (drba->drba_origin != NULL) {
+ VERIFY0(dsl_dataset_hold(dp, drba->drba_origin,
+ FTAG, &origin));
+ }
+
+ /* Create new dataset. */
+ dsobj = dsl_dataset_create_sync(dd,
+ strrchr(tofs, '/') + 1,
+ origin, crflags, drba->drba_cred, tx);
+ if (origin != NULL)
+ dsl_dataset_rele(origin, FTAG);
+ dsl_dir_rele(dd, FTAG);
+ drba->drba_cookie->drc_newfs = B_TRUE;
}
+ VERIFY0(dsl_dataset_own_obj(dp, dsobj, dmu_recv_tag, &newds));
- return (err);
+ dmu_buf_will_dirty(newds->ds_dbuf, tx);
+ dsl_dataset_phys(newds)->ds_flags |= DS_FLAG_INCONSISTENT;
+
+ /*
+ * If we actually created a non-clone, we need to create the
+ * objset in our new dataset.
+ */
+ if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds))) {
+ (void) dmu_objset_create_impl(dp->dp_spa,
+ newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
+ }
+
+ drba->drba_cookie->drc_ds = newds;
+
+ spa_history_log_internal_ds(newds, "receive", tx, "");
}
-struct restorearg {
+/*
+ * NB: callers *MUST* call dmu_recv_stream() if dmu_recv_begin()
+ * succeeds; otherwise we will leak the holds on the datasets.
+ */
+int
+dmu_recv_begin(char *tofs, char *tosnap, struct drr_begin *drrb,
+ boolean_t force, char *origin, dmu_recv_cookie_t *drc)
+{
+ dmu_recv_begin_arg_t drba = { 0 };
+ dmu_replay_record_t *drr;
+
+ bzero(drc, sizeof (dmu_recv_cookie_t));
+ drc->drc_drrb = drrb;
+ drc->drc_tosnap = tosnap;
+ drc->drc_tofs = tofs;
+ drc->drc_force = force;
+ drc->drc_cred = CRED();
+
+ if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
+ drc->drc_byteswap = B_TRUE;
+ else if (drrb->drr_magic != DMU_BACKUP_MAGIC)
+ return (SET_ERROR(EINVAL));
+
+ drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
+ drr->drr_type = DRR_BEGIN;
+ drr->drr_u.drr_begin = *drc->drc_drrb;
+ if (drc->drc_byteswap) {
+ fletcher_4_incremental_byteswap(drr,
+ sizeof (dmu_replay_record_t), &drc->drc_cksum);
+ } else {
+ fletcher_4_incremental_native(drr,
+ sizeof (dmu_replay_record_t), &drc->drc_cksum);
+ }
+ kmem_free(drr, sizeof (dmu_replay_record_t));
+
+ if (drc->drc_byteswap) {
+ drrb->drr_magic = BSWAP_64(drrb->drr_magic);
+ drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
+ drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
+ drrb->drr_type = BSWAP_32(drrb->drr_type);
+ drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
+ drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
+ }
+
+ drba.drba_origin = origin;
+ drba.drba_cookie = drc;
+ drba.drba_cred = CRED();
+
+ return (dsl_sync_task(tofs, dmu_recv_begin_check, dmu_recv_begin_sync,
+ &drba, 5, ZFS_SPACE_CHECK_NORMAL));
+}
+
+struct receive_record_arg {
+ dmu_replay_record_t header;
+ void *payload; /* Pointer to a buffer containing the payload */
+ /*
+ * If the record is a write, pointer to the arc_buf_t containing the
+ * payload.
+ */
+ arc_buf_t *write_buf;
+ int payload_size;
+ boolean_t eos_marker; /* Marks the end of the stream */
+ bqueue_node_t node;
+};
+
+struct receive_writer_arg {
+ objset_t *os;
+ boolean_t byteswap;
+ bqueue_t q;
+ /*
+ * These three args are used to signal to the main thread that we're
+ * done.
+ */
+ kmutex_t mutex;
+ kcondvar_t cv;
+ boolean_t done;
int err;
- int byteswap;
- vnode_t *vp;
- char *buf;
- uint64_t voff;
- int bufsize; /* amount of memory allocated for buf */
- zio_cksum_t cksum;
+ /* A map from guid to dataset to help handle dedup'd streams. */
avl_tree_t *guid_to_ds_map;
};
+struct receive_arg {
+ objset_t *os;
+ vnode_t *vp; /* The vnode to read the stream from */
+ uint64_t voff; /* The current offset in the stream */
+ /*
+ * A record that has had its payload read in, but hasn't yet been handed
+ * off to the worker thread.
+ */
+ struct receive_record_arg *rrd;
+ /* A record that has had its header read in, but not its payload. */
+ struct receive_record_arg *next_rrd;
+ zio_cksum_t cksum;
+ zio_cksum_t prev_cksum;
+ int err;
+ boolean_t byteswap;
+ /* Sorted list of objects not to issue prefetches for. */
+ list_t ignore_obj_list;
+};
+
+struct receive_ign_obj_node {
+ list_node_t node;
+ uint64_t object;
+};
+
typedef struct guid_map_entry {
uint64_t guid;
dsl_dataset_t *gme_ds;
guid_map_entry_t *gmep;
while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
- dsl_dataset_rele(gmep->gme_ds, ca);
+ dsl_dataset_long_rele(gmep->gme_ds, gmep);
+ dsl_dataset_rele(gmep->gme_ds, gmep);
kmem_free(gmep, sizeof (guid_map_entry_t));
}
avl_destroy(ca);
kmem_free(ca, sizeof (avl_tree_t));
}
-static void *
-restore_read(struct restorearg *ra, int len)
+static int
+receive_read(struct receive_arg *ra, int len, void *buf)
{
- void *rv;
int done = 0;
/* some things will require 8-byte alignment, so everything must */
- ASSERT3U(len % 8, ==, 0);
+ ASSERT0(len % 8);
while (done < len) {
ssize_t resid;
ra->err = vn_rdwr(UIO_READ, ra->vp,
- (caddr_t)ra->buf + done, len - done,
+ (char *)buf + done, len - done,
ra->voff, UIO_SYSSPACE, FAPPEND,
RLIM64_INFINITY, CRED(), &resid);
if (resid == len - done)
- ra->err = EINVAL;
+ ra->err = SET_ERROR(EINVAL);
ra->voff += len - done - resid;
done = len - resid;
- if (ra->err)
- return (NULL);
+ if (ra->err != 0)
+ return (ra->err);
}
ASSERT3U(done, ==, len);
- rv = ra->buf;
- if (ra->byteswap)
- fletcher_4_incremental_byteswap(rv, len, &ra->cksum);
- else
- fletcher_4_incremental_native(rv, len, &ra->cksum);
- return (rv);
+ return (0);
}
noinline static void
-backup_byteswap(dmu_replay_record_t *drr)
+byteswap_record(dmu_replay_record_t *drr)
{
#define DO64(X) (drr->drr_u.X = BSWAP_64(drr->drr_u.X))
#define DO32(X) (drr->drr_u.X = BSWAP_32(drr->drr_u.X))
drr->drr_type = BSWAP_32(drr->drr_type);
drr->drr_payloadlen = BSWAP_32(drr->drr_payloadlen);
+
switch (drr->drr_type) {
case DRR_BEGIN:
DO64(drr_begin.drr_magic);
break;
case DRR_OBJECT:
DO64(drr_object.drr_object);
- /* DO64(drr_object.drr_allocation_txg); */
DO32(drr_object.drr_type);
DO32(drr_object.drr_bonustype);
DO32(drr_object.drr_blksz);
DO64(drr_write.drr_offset);
DO64(drr_write.drr_length);
DO64(drr_write.drr_toguid);
- DO64(drr_write.drr_key.ddk_cksum.zc_word[0]);
- DO64(drr_write.drr_key.ddk_cksum.zc_word[1]);
- DO64(drr_write.drr_key.ddk_cksum.zc_word[2]);
- DO64(drr_write.drr_key.ddk_cksum.zc_word[3]);
+ ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write.drr_key.ddk_cksum);
DO64(drr_write.drr_key.ddk_prop);
break;
case DRR_WRITE_BYREF:
DO64(drr_write_byref.drr_refguid);
DO64(drr_write_byref.drr_refobject);
DO64(drr_write_byref.drr_refoffset);
- DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[0]);
- DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[1]);
- DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[2]);
- DO64(drr_write_byref.drr_key.ddk_cksum.zc_word[3]);
+ ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
+ drr_key.ddk_cksum);
DO64(drr_write_byref.drr_key.ddk_prop);
break;
+ case DRR_WRITE_EMBEDDED:
+ DO64(drr_write_embedded.drr_object);
+ DO64(drr_write_embedded.drr_offset);
+ DO64(drr_write_embedded.drr_length);
+ DO64(drr_write_embedded.drr_toguid);
+ DO32(drr_write_embedded.drr_lsize);
+ DO32(drr_write_embedded.drr_psize);
+ break;
case DRR_FREE:
DO64(drr_free.drr_object);
DO64(drr_free.drr_offset);
DO64(drr_spill.drr_toguid);
break;
case DRR_END:
- DO64(drr_end.drr_checksum.zc_word[0]);
- DO64(drr_end.drr_checksum.zc_word[1]);
- DO64(drr_end.drr_checksum.zc_word[2]);
- DO64(drr_end.drr_checksum.zc_word[3]);
DO64(drr_end.drr_toguid);
+ ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_end.drr_checksum);
break;
default:
break;
}
+
+ if (drr->drr_type != DRR_BEGIN) {
+ ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_checksum.drr_checksum);
+ }
+
#undef DO64
#undef DO32
}
+static inline uint8_t
+deduce_nblkptr(dmu_object_type_t bonus_type, uint64_t bonus_size)
+{
+ if (bonus_type == DMU_OT_SA) {
+ return (1);
+ } else {
+ return (1 +
+ ((DN_MAX_BONUSLEN - bonus_size) >> SPA_BLKPTRSHIFT));
+ }
+}
+
noinline static int
-restore_object(struct restorearg *ra, objset_t *os, struct drr_object *drro)
+receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
+ void *data)
{
- int err;
+ dmu_object_info_t doi;
dmu_tx_t *tx;
- void *data = NULL;
+ uint64_t object;
+ int err;
if (drro->drr_type == DMU_OT_NONE ||
- drro->drr_type >= DMU_OT_NUMTYPES ||
- drro->drr_bonustype >= DMU_OT_NUMTYPES ||
+ !DMU_OT_IS_VALID(drro->drr_type) ||
+ !DMU_OT_IS_VALID(drro->drr_bonustype) ||
drro->drr_checksumtype >= ZIO_CHECKSUM_FUNCTIONS ||
drro->drr_compress >= ZIO_COMPRESS_FUNCTIONS ||
P2PHASE(drro->drr_blksz, SPA_MINBLOCKSIZE) ||
drro->drr_blksz < SPA_MINBLOCKSIZE ||
- drro->drr_blksz > SPA_MAXBLOCKSIZE ||
+ drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
drro->drr_bonuslen > DN_MAX_BONUSLEN) {
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
}
- err = dmu_object_info(os, drro->drr_object, NULL);
+ err = dmu_object_info(rwa->os, drro->drr_object, &doi);
if (err != 0 && err != ENOENT)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
+ object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT;
- if (drro->drr_bonuslen) {
- data = restore_read(ra, P2ROUNDUP(drro->drr_bonuslen, 8));
- if (ra->err)
- return (ra->err);
- }
+ /*
+ * If we are losing blkptrs or changing the block size this must
+ * be a new file instance. We must clear out the previous file
+ * contents before we can change this type of metadata in the dnode.
+ */
+ if (err == 0) {
+ int nblkptr;
- if (err == ENOENT) {
- /* currently free, want to be allocated */
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
- dmu_tx_abort(tx);
- return (err);
+ nblkptr = deduce_nblkptr(drro->drr_bonustype,
+ drro->drr_bonuslen);
+
+ if (drro->drr_blksz != doi.doi_data_block_size ||
+ nblkptr < doi.doi_nblkptr) {
+ err = dmu_free_long_range(rwa->os, drro->drr_object,
+ 0, DMU_OBJECT_END);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
}
- err = dmu_object_claim(os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
- drro->drr_bonustype, drro->drr_bonuslen, tx);
- dmu_tx_commit(tx);
- } else {
- /* currently allocated, want to be allocated */
- err = dmu_object_reclaim(os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
- drro->drr_bonustype, drro->drr_bonuslen);
- }
- if (err) {
- return (EINVAL);
}
- tx = dmu_tx_create(os);
- dmu_tx_hold_bonus(tx, drro->drr_object);
+ tx = dmu_tx_create(rwa->os);
+ dmu_tx_hold_bonus(tx, object);
err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
+ if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
- dmu_object_set_checksum(os, drro->drr_object, drro->drr_checksumtype,
- tx);
- dmu_object_set_compress(os, drro->drr_object, drro->drr_compress, tx);
+ if (object == DMU_NEW_OBJECT) {
+ /* currently free, want to be allocated */
+ err = dmu_object_claim(rwa->os, drro->drr_object,
+ drro->drr_type, drro->drr_blksz,
+ drro->drr_bonustype, drro->drr_bonuslen, tx);
+ } else if (drro->drr_type != doi.doi_type ||
+ drro->drr_blksz != doi.doi_data_block_size ||
+ drro->drr_bonustype != doi.doi_bonus_type ||
+ drro->drr_bonuslen != doi.doi_bonus_size) {
+ /* currently allocated, but with different properties */
+ err = dmu_object_reclaim(rwa->os, drro->drr_object,
+ drro->drr_type, drro->drr_blksz,
+ drro->drr_bonustype, drro->drr_bonuslen, tx);
+ }
+ if (err != 0) {
+ dmu_tx_commit(tx);
+ return (SET_ERROR(EINVAL));
+ }
+
+ dmu_object_set_checksum(rwa->os, drro->drr_object,
+ drro->drr_checksumtype, tx);
+ dmu_object_set_compress(rwa->os, drro->drr_object,
+ drro->drr_compress, tx);
if (data != NULL) {
dmu_buf_t *db;
- VERIFY(0 == dmu_bonus_hold(os, drro->drr_object, FTAG, &db));
+ VERIFY0(dmu_bonus_hold(rwa->os, drro->drr_object, FTAG, &db));
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
bcopy(data, db->db_data, drro->drr_bonuslen);
- if (ra->byteswap) {
- dmu_ot[drro->drr_bonustype].ot_byteswap(db->db_data,
+ if (rwa->byteswap) {
+ dmu_object_byteswap_t byteswap =
+ DMU_OT_BYTESWAP(drro->drr_bonustype);
+ dmu_ot_byteswap[byteswap].ob_func(db->db_data,
drro->drr_bonuslen);
}
dmu_buf_rele(db, FTAG);
/* ARGSUSED */
noinline static int
-restore_freeobjects(struct restorearg *ra, objset_t *os,
+receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
{
uint64_t obj;
if (drrfo->drr_firstobj + drrfo->drr_numobjs < drrfo->drr_firstobj)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
for (obj = drrfo->drr_firstobj;
obj < drrfo->drr_firstobj + drrfo->drr_numobjs;
- (void) dmu_object_next(os, &obj, FALSE, 0)) {
+ (void) dmu_object_next(rwa->os, &obj, FALSE, 0)) {
int err;
- if (dmu_object_info(os, obj, NULL) != 0)
+ if (dmu_object_info(rwa->os, obj, NULL) != 0)
continue;
- err = dmu_free_object(os, obj);
- if (err)
+ err = dmu_free_long_object(rwa->os, obj);
+ if (err != 0)
return (err);
}
return (0);
}
noinline static int
-restore_write(struct restorearg *ra, objset_t *os,
- struct drr_write *drrw)
+receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
+ arc_buf_t *abuf)
{
dmu_tx_t *tx;
- void *data;
+ dmu_buf_t *bonus;
int err;
if (drrw->drr_offset + drrw->drr_length < drrw->drr_offset ||
- drrw->drr_type >= DMU_OT_NUMTYPES)
- return (EINVAL);
+ !DMU_OT_IS_VALID(drrw->drr_type))
+ return (SET_ERROR(EINVAL));
- data = restore_read(ra, drrw->drr_length);
- if (data == NULL)
- return (ra->err);
+ if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
+ return (SET_ERROR(EINVAL));
- if (dmu_object_info(os, drrw->drr_object, NULL) != 0)
- return (EINVAL);
-
- tx = dmu_tx_create(os);
+ tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrw->drr_object,
drrw->drr_offset, drrw->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
+ if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
- if (ra->byteswap)
- dmu_ot[drrw->drr_type].ot_byteswap(data, drrw->drr_length);
- dmu_write(os, drrw->drr_object,
- drrw->drr_offset, drrw->drr_length, data, tx);
+ if (rwa->byteswap) {
+ dmu_object_byteswap_t byteswap =
+ DMU_OT_BYTESWAP(drrw->drr_type);
+ dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
+ drrw->drr_length);
+ }
+
+ if (dmu_bonus_hold(rwa->os, drrw->drr_object, FTAG, &bonus) != 0)
+ return (SET_ERROR(EINVAL));
+ dmu_assign_arcbuf(bonus, drrw->drr_offset, abuf, tx);
dmu_tx_commit(tx);
+ dmu_buf_rele(bonus, FTAG);
return (0);
}
* data from the stream to fulfill this write.
*/
static int
-restore_write_byref(struct restorearg *ra, objset_t *os,
+receive_write_byref(struct receive_writer_arg *rwa,
struct drr_write_byref *drrwbr)
{
dmu_tx_t *tx;
int err;
guid_map_entry_t gmesrch;
guid_map_entry_t *gmep;
- avl_index_t where;
+ avl_index_t where;
objset_t *ref_os = NULL;
dmu_buf_t *dbp;
if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
/*
* If the GUID of the referenced dataset is different from the
*/
if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
gmesrch.guid = drrwbr->drr_refguid;
- if ((gmep = avl_find(ra->guid_to_ds_map, &gmesrch,
+ if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
&where)) == NULL) {
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
}
if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
} else {
- ref_os = os;
+ ref_os = rwa->os;
}
err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
drrwbr->drr_refoffset, FTAG, &dbp, DMU_READ_PREFETCH);
- if (err)
+ if (err != 0)
return (err);
- tx = dmu_tx_create(os);
+ tx = dmu_tx_create(rwa->os);
dmu_tx_hold_write(tx, drrwbr->drr_object,
drrwbr->drr_offset, drrwbr->drr_length);
err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
+ if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
- dmu_write(os, drrwbr->drr_object,
+ dmu_write(rwa->os, drrwbr->drr_object,
drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
dmu_buf_rele(dbp, FTAG);
dmu_tx_commit(tx);
}
static int
-restore_spill(struct restorearg *ra, objset_t *os, struct drr_spill *drrs)
+receive_write_embedded(struct receive_writer_arg *rwa,
+ struct drr_write_embedded *drrwnp, void *data)
{
dmu_tx_t *tx;
- void *data;
- dmu_buf_t *db, *db_spill;
int err;
- if (drrs->drr_length < SPA_MINBLOCKSIZE ||
- drrs->drr_length > SPA_MAXBLOCKSIZE)
+ if (drrwnp->drr_offset + drrwnp->drr_length < drrwnp->drr_offset)
return (EINVAL);
- data = restore_read(ra, drrs->drr_length);
- if (data == NULL)
- return (ra->err);
+ if (drrwnp->drr_psize > BPE_PAYLOAD_SIZE)
+ return (EINVAL);
- if (dmu_object_info(os, drrs->drr_object, NULL) != 0)
+ if (drrwnp->drr_etype >= NUM_BP_EMBEDDED_TYPES)
return (EINVAL);
+ if (drrwnp->drr_compression >= ZIO_COMPRESS_FUNCTIONS)
+ return (EINVAL);
+
+ tx = dmu_tx_create(rwa->os);
+
+ dmu_tx_hold_write(tx, drrwnp->drr_object,
+ drrwnp->drr_offset, drrwnp->drr_length);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err != 0) {
+ dmu_tx_abort(tx);
+ return (err);
+ }
+
+ dmu_write_embedded(rwa->os, drrwnp->drr_object,
+ drrwnp->drr_offset, data, drrwnp->drr_etype,
+ drrwnp->drr_compression, drrwnp->drr_lsize, drrwnp->drr_psize,
+ rwa->byteswap ^ ZFS_HOST_BYTEORDER, tx);
+
+ dmu_tx_commit(tx);
+ return (0);
+}
- VERIFY(0 == dmu_bonus_hold(os, drrs->drr_object, FTAG, &db));
+static int
+receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
+ void *data)
+{
+ dmu_tx_t *tx;
+ dmu_buf_t *db, *db_spill;
+ int err;
+
+ if (drrs->drr_length < SPA_MINBLOCKSIZE ||
+ drrs->drr_length > spa_maxblocksize(dmu_objset_spa(rwa->os)))
+ return (SET_ERROR(EINVAL));
+
+ if (dmu_object_info(rwa->os, drrs->drr_object, NULL) != 0)
+ return (SET_ERROR(EINVAL));
+
+ VERIFY0(dmu_bonus_hold(rwa->os, drrs->drr_object, FTAG, &db));
if ((err = dmu_spill_hold_by_bonus(db, FTAG, &db_spill)) != 0) {
dmu_buf_rele(db, FTAG);
return (err);
}
- tx = dmu_tx_create(os);
+ tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
err = dmu_tx_assign(tx, TXG_WAIT);
- if (err) {
+ if (err != 0) {
dmu_buf_rele(db, FTAG);
dmu_buf_rele(db_spill, FTAG);
dmu_tx_abort(tx);
/* ARGSUSED */
noinline static int
-restore_free(struct restorearg *ra, objset_t *os,
- struct drr_free *drrf)
+receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
int err;
if (drrf->drr_length != -1ULL &&
drrf->drr_offset + drrf->drr_length < drrf->drr_offset)
- return (EINVAL);
+ return (SET_ERROR(EINVAL));
- if (dmu_object_info(os, drrf->drr_object, NULL) != 0)
- return (EINVAL);
+ if (dmu_object_info(rwa->os, drrf->drr_object, NULL) != 0)
+ return (SET_ERROR(EINVAL));
- err = dmu_free_long_range(os, drrf->drr_object,
+ err = dmu_free_long_range(rwa->os, drrf->drr_object,
drrf->drr_offset, drrf->drr_length);
+
return (err);
}
+/* used to destroy the drc_ds on error */
+static void
+dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
+{
+ char name[MAXNAMELEN];
+ dsl_dataset_name(drc->drc_ds, name);
+ dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
+ (void) dsl_destroy_head(name);
+}
+
+static void
+receive_cksum(struct receive_arg *ra, int len, void *buf)
+{
+ if (ra->byteswap) {
+ fletcher_4_incremental_byteswap(buf, len, &ra->cksum);
+ } else {
+ fletcher_4_incremental_native(buf, len, &ra->cksum);
+ }
+}
+
/*
- * NB: callers *must* call dmu_recv_end() if this succeeds.
+ * Read the payload into a buffer of size len, and update the current record's
+ * payload field.
+ * Allocate ra->next_rrd and read the next record's header into
+ * ra->next_rrd->header.
+ * Verify checksum of payload and next record.
*/
-int
-dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
- int cleanup_fd, uint64_t *action_handlep)
+static int
+receive_read_payload_and_next_header(struct receive_arg *ra, int len, void *buf)
{
- struct restorearg ra = { 0 };
- dmu_replay_record_t *drr;
- objset_t *os;
- zio_cksum_t pcksum;
- int featureflags;
+ int err;
+ zio_cksum_t cksum_orig;
+ zio_cksum_t *cksump;
+
+ if (len != 0) {
+ ASSERT3U(len, <=, SPA_MAXBLOCKSIZE);
+ ra->rrd->payload = buf;
+ ra->rrd->payload_size = len;
+ err = receive_read(ra, len, ra->rrd->payload);
+ if (err != 0)
+ return (err);
+ receive_cksum(ra, len, ra->rrd->payload);
+ }
+
+ ra->prev_cksum = ra->cksum;
+
+ ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
+ err = receive_read(ra, sizeof (ra->next_rrd->header),
+ &ra->next_rrd->header);
+ if (err != 0) {
+ kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
+ ra->next_rrd = NULL;
+ return (err);
+ }
+ if (ra->next_rrd->header.drr_type == DRR_BEGIN) {
+ kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
+ ra->next_rrd = NULL;
+ return (SET_ERROR(EINVAL));
+ }
+
+ /*
+ * Note: checksum is of everything up to but not including the
+ * checksum itself.
+ */
+ ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
+ ==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
+ receive_cksum(ra,
+ offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
+ &ra->next_rrd->header);
+
+ cksum_orig = ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
+ cksump = &ra->next_rrd->header.drr_u.drr_checksum.drr_checksum;
+
+ if (ra->byteswap)
+ byteswap_record(&ra->next_rrd->header);
+
+ if ((!ZIO_CHECKSUM_IS_ZERO(cksump)) &&
+ !ZIO_CHECKSUM_EQUAL(ra->cksum, *cksump)) {
+ kmem_free(ra->next_rrd, sizeof (*ra->next_rrd));
+ ra->next_rrd = NULL;
+ return (SET_ERROR(ECKSUM));
+ }
+
+ receive_cksum(ra, sizeof (cksum_orig), &cksum_orig);
+
+ return (0);
+}
+
+/*
+ * Issue the prefetch reads for any necessary indirect blocks.
+ *
+ * We use the object ignore list to tell us whether or not to issue prefetches
+ * for a given object. We do this for both correctness (in case the blocksize
+ * of an object has changed) and performance (if the object doesn't exist, don't
+ * needlessly try to issue prefetches). We also trim the list as we go through
+ * the stream to prevent it from growing to an unbounded size.
+ *
+ * The object numbers within will always be in sorted order, and any write
+ * records we see will also be in sorted order, but they're not sorted with
+ * respect to each other (i.e. we can get several object records before
+ * receiving each object's write records). As a result, once we've reached a
+ * given object number, we can safely remove any reference to lower object
+ * numbers in the ignore list. In practice, we receive up to 32 object records
+ * before receiving write records, so the list can have up to 32 nodes in it.
+ */
+/* ARGSUSED */
+static void
+receive_read_prefetch(struct receive_arg *ra,
+ uint64_t object, uint64_t offset, uint64_t length)
+{
+ struct receive_ign_obj_node *node = list_head(&ra->ignore_obj_list);
+ while (node != NULL && node->object < object) {
+ VERIFY3P(node, ==, list_remove_head(&ra->ignore_obj_list));
+ kmem_free(node, sizeof (*node));
+ node = list_head(&ra->ignore_obj_list);
+ }
+ if (node == NULL || node->object > object) {
+ dmu_prefetch(ra->os, object, 1, offset, length,
+ ZIO_PRIORITY_SYNC_READ);
+ }
+}
- if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC))
- ra.byteswap = TRUE;
+/*
+ * Read records off the stream, issuing any necessary prefetches.
+ */
+static int
+receive_read_record(struct receive_arg *ra)
+{
+ int err;
+ switch (ra->rrd->header.drr_type) {
+ case DRR_OBJECT:
{
- /* compute checksum of drr_begin record */
- dmu_replay_record_t *drr;
- drr = kmem_zalloc(sizeof (dmu_replay_record_t), KM_SLEEP);
-
- drr->drr_type = DRR_BEGIN;
- drr->drr_u.drr_begin = *drc->drc_drrb;
- if (ra.byteswap) {
- fletcher_4_incremental_byteswap(drr,
- sizeof (dmu_replay_record_t), &ra.cksum);
- } else {
- fletcher_4_incremental_native(drr,
- sizeof (dmu_replay_record_t), &ra.cksum);
+ struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
+ uint32_t size = P2ROUNDUP(drro->drr_bonuslen, 8);
+ void *buf = kmem_zalloc(size, KM_SLEEP);
+ dmu_object_info_t doi;
+ err = receive_read_payload_and_next_header(ra, size, buf);
+ if (err != 0) {
+ kmem_free(buf, size);
+ return (err);
+ }
+ err = dmu_object_info(ra->os, drro->drr_object, &doi);
+ /*
+ * See receive_read_prefetch for an explanation why we're
+ * storing this object in the ignore_obj_list.
+ */
+ if (err == ENOENT ||
+ (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) {
+ struct receive_ign_obj_node *node =
+ kmem_zalloc(sizeof (*node),
+ KM_SLEEP);
+ node->object = drro->drr_object;
+#ifdef ZFS_DEBUG
+ {
+ struct receive_ign_obj_node *last_object =
+ list_tail(&ra->ignore_obj_list);
+ uint64_t last_objnum = (last_object != NULL ?
+ last_object->object : 0);
+ ASSERT3U(node->object, >, last_objnum);
+ }
+#endif
+ list_insert_tail(&ra->ignore_obj_list, node);
+ err = 0;
+ }
+ return (err);
+ }
+ case DRR_FREEOBJECTS:
+ {
+ err = receive_read_payload_and_next_header(ra, 0, NULL);
+ return (err);
+ }
+ case DRR_WRITE:
+ {
+ struct drr_write *drrw = &ra->rrd->header.drr_u.drr_write;
+ arc_buf_t *abuf = arc_loan_buf(dmu_objset_spa(ra->os),
+ drrw->drr_length);
+
+ err = receive_read_payload_and_next_header(ra,
+ drrw->drr_length, abuf->b_data);
+ if (err != 0) {
+ dmu_return_arcbuf(abuf);
+ return (err);
}
- kmem_free(drr, sizeof (dmu_replay_record_t));
+ ra->rrd->write_buf = abuf;
+ receive_read_prefetch(ra, drrw->drr_object, drrw->drr_offset,
+ drrw->drr_length);
+ return (err);
+ }
+ case DRR_WRITE_BYREF:
+ {
+ struct drr_write_byref *drrwb =
+ &ra->rrd->header.drr_u.drr_write_byref;
+ err = receive_read_payload_and_next_header(ra, 0, NULL);
+ receive_read_prefetch(ra, drrwb->drr_object, drrwb->drr_offset,
+ drrwb->drr_length);
+ return (err);
}
+ case DRR_WRITE_EMBEDDED:
+ {
+ struct drr_write_embedded *drrwe =
+ &ra->rrd->header.drr_u.drr_write_embedded;
+ uint32_t size = P2ROUNDUP(drrwe->drr_psize, 8);
+ void *buf = kmem_zalloc(size, KM_SLEEP);
+
+ err = receive_read_payload_and_next_header(ra, size, buf);
+ if (err != 0) {
+ kmem_free(buf, size);
+ return (err);
+ }
- if (ra.byteswap) {
- struct drr_begin *drrb = drc->drc_drrb;
- drrb->drr_magic = BSWAP_64(drrb->drr_magic);
- drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
- drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
- drrb->drr_type = BSWAP_32(drrb->drr_type);
- drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
- drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
+ receive_read_prefetch(ra, drrwe->drr_object, drrwe->drr_offset,
+ drrwe->drr_length);
+ return (err);
+ }
+ case DRR_FREE:
+ {
+ /*
+ * It might be beneficial to prefetch indirect blocks here, but
+ * we don't really have the data to decide for sure.
+ */
+ err = receive_read_payload_and_next_header(ra, 0, NULL);
+ return (err);
+ }
+ case DRR_END:
+ {
+ struct drr_end *drre = &ra->rrd->header.drr_u.drr_end;
+ if (!ZIO_CHECKSUM_EQUAL(ra->prev_cksum, drre->drr_checksum))
+ return (SET_ERROR(EINVAL));
+ return (0);
+ }
+ case DRR_SPILL:
+ {
+ struct drr_spill *drrs = &ra->rrd->header.drr_u.drr_spill;
+ void *buf = kmem_zalloc(drrs->drr_length, KM_SLEEP);
+ err = receive_read_payload_and_next_header(ra, drrs->drr_length,
+ buf);
+ if (err != 0)
+ kmem_free(buf, drrs->drr_length);
+ return (err);
}
+ default:
+ return (SET_ERROR(EINVAL));
+ }
+}
- ra.vp = vp;
- ra.voff = *voffp;
- ra.bufsize = 1<<20;
- ra.buf = vmem_alloc(ra.bufsize, KM_SLEEP);
+/*
+ * Commit the records to the pool.
+ */
+static int
+receive_process_record(struct receive_writer_arg *rwa,
+ struct receive_record_arg *rrd)
+{
+ int err;
+
+ switch (rrd->header.drr_type) {
+ case DRR_OBJECT:
+ {
+ struct drr_object *drro = &rrd->header.drr_u.drr_object;
+ err = receive_object(rwa, drro, rrd->payload);
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ return (err);
+ }
+ case DRR_FREEOBJECTS:
+ {
+ struct drr_freeobjects *drrfo =
+ &rrd->header.drr_u.drr_freeobjects;
+ return (receive_freeobjects(rwa, drrfo));
+ }
+ case DRR_WRITE:
+ {
+ struct drr_write *drrw = &rrd->header.drr_u.drr_write;
+ err = receive_write(rwa, drrw, rrd->write_buf);
+ /* if receive_write() is successful, it consumes the arc_buf */
+ if (err != 0)
+ dmu_return_arcbuf(rrd->write_buf);
+ rrd->write_buf = NULL;
+ rrd->payload = NULL;
+ return (err);
+ }
+ case DRR_WRITE_BYREF:
+ {
+ struct drr_write_byref *drrwbr =
+ &rrd->header.drr_u.drr_write_byref;
+ return (receive_write_byref(rwa, drrwbr));
+ }
+ case DRR_WRITE_EMBEDDED:
+ {
+ struct drr_write_embedded *drrwe =
+ &rrd->header.drr_u.drr_write_embedded;
+ err = receive_write_embedded(rwa, drrwe, rrd->payload);
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ return (err);
+ }
+ case DRR_FREE:
+ {
+ struct drr_free *drrf = &rrd->header.drr_u.drr_free;
+ return (receive_free(rwa, drrf));
+ }
+ case DRR_SPILL:
+ {
+ struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
+ err = receive_spill(rwa, drrs, rrd->payload);
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ return (err);
+ }
+ default:
+ return (SET_ERROR(EINVAL));
+ }
+}
+
+/*
+ * dmu_recv_stream's worker thread; pull records off the queue, and then call
+ * receive_process_record When we're done, signal the main thread and exit.
+ */
+static void
+receive_writer_thread(void *arg)
+{
+ struct receive_writer_arg *rwa = arg;
+ struct receive_record_arg *rrd;
+ for (rrd = bqueue_dequeue(&rwa->q); !rrd->eos_marker;
+ rrd = bqueue_dequeue(&rwa->q)) {
+ /*
+ * If there's an error, the main thread will stop putting things
+ * on the queue, but we need to clear everything in it before we
+ * can exit.
+ */
+ if (rwa->err == 0) {
+ rwa->err = receive_process_record(rwa, rrd);
+ } else if (rrd->write_buf != NULL) {
+ dmu_return_arcbuf(rrd->write_buf);
+ rrd->write_buf = NULL;
+ rrd->payload = NULL;
+ } else if (rrd->payload != NULL) {
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ }
+ kmem_free(rrd, sizeof (*rrd));
+ }
+ kmem_free(rrd, sizeof (*rrd));
+ mutex_enter(&rwa->mutex);
+ rwa->done = B_TRUE;
+ cv_signal(&rwa->cv);
+ mutex_exit(&rwa->mutex);
+}
+
+/*
+ * Read in the stream's records, one by one, and apply them to the pool. There
+ * are two threads involved; the thread that calls this function will spin up a
+ * worker thread, read the records off the stream one by one, and issue
+ * prefetches for any necessary indirect blocks. It will then push the records
+ * onto an internal blocking queue. The worker thread will pull the records off
+ * the queue, and actually write the data into the DMU. This way, the worker
+ * thread doesn't have to wait for reads to complete, since everything it needs
+ * (the indirect blocks) will be prefetched.
+ *
+ * NB: callers *must* call dmu_recv_end() if this succeeds.
+ */
+int
+dmu_recv_stream(dmu_recv_cookie_t *drc, vnode_t *vp, offset_t *voffp,
+ int cleanup_fd, uint64_t *action_handlep)
+{
+ int err = 0;
+ struct receive_arg *ra;
+ struct receive_writer_arg *rwa;
+ int featureflags;
+ struct receive_ign_obj_node *n;
+
+ ra = kmem_zalloc(sizeof (*ra), KM_SLEEP);
+ rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
+
+ ra->byteswap = drc->drc_byteswap;
+ ra->cksum = drc->drc_cksum;
+ ra->vp = vp;
+ ra->voff = *voffp;
+ list_create(&ra->ignore_obj_list, sizeof (struct receive_ign_obj_node),
+ offsetof(struct receive_ign_obj_node, node));
/* these were verified in dmu_recv_begin */
- ASSERT(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo) ==
+ ASSERT3U(DMU_GET_STREAM_HDRTYPE(drc->drc_drrb->drr_versioninfo), ==,
DMU_SUBSTREAM);
- ASSERT(drc->drc_drrb->drr_type < DMU_OST_NUMTYPES);
+ ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
/*
* Open the objset we are modifying.
*/
- VERIFY(dmu_objset_from_ds(drc->drc_real_ds, &os) == 0);
+ VERIFY0(dmu_objset_from_ds(drc->drc_ds, &ra->os));
- ASSERT(drc->drc_real_ds->ds_phys->ds_flags & DS_FLAG_INCONSISTENT);
+ ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
featureflags = DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
minor_t minor;
if (cleanup_fd == -1) {
- ra.err = EBADF;
+ ra->err = SET_ERROR(EBADF);
goto out;
}
- ra.err = zfs_onexit_fd_hold(cleanup_fd, &minor);
- if (ra.err) {
+ ra->err = zfs_onexit_fd_hold(cleanup_fd, &minor);
+ if (ra->err != 0) {
cleanup_fd = -1;
goto out;
}
if (*action_handlep == 0) {
- ra.guid_to_ds_map =
+ rwa->guid_to_ds_map =
kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
- avl_create(ra.guid_to_ds_map, guid_compare,
+ avl_create(rwa->guid_to_ds_map, guid_compare,
sizeof (guid_map_entry_t),
offsetof(guid_map_entry_t, avlnode));
- ra.err = zfs_onexit_add_cb(minor,
- free_guid_map_onexit, ra.guid_to_ds_map,
+ err = zfs_onexit_add_cb(minor,
+ free_guid_map_onexit, rwa->guid_to_ds_map,
action_handlep);
- if (ra.err)
+ if (ra->err != 0)
goto out;
} else {
- ra.err = zfs_onexit_cb_data(minor, *action_handlep,
- (void **)&ra.guid_to_ds_map);
- if (ra.err)
+ err = zfs_onexit_cb_data(minor, *action_handlep,
+ (void **)&rwa->guid_to_ds_map);
+ if (ra->err != 0)
goto out;
}
- drc->drc_guid_to_ds_map = ra.guid_to_ds_map;
+ drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
}
+ err = receive_read_payload_and_next_header(ra, 0, NULL);
+ if (err)
+ goto out;
+
+ (void) bqueue_init(&rwa->q, zfs_recv_queue_length,
+ offsetof(struct receive_record_arg, node));
+ cv_init(&rwa->cv, NULL, CV_DEFAULT, NULL);
+ mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
+ rwa->os = ra->os;
+ rwa->byteswap = drc->drc_byteswap;
+
+ (void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
+ TS_RUN, minclsyspri);
/*
- * Read records and process them.
+ * We're reading rwa->err without locks, which is safe since we are the
+ * only reader, and the worker thread is the only writer. It's ok if we
+ * miss a write for an iteration or two of the loop, since the writer
+ * thread will keep freeing records we send it until we send it an eos
+ * marker.
+ *
+ * We can leave this loop in 3 ways: First, if rwa->err is
+ * non-zero. In that case, the writer thread will free the rrd we just
+ * pushed. Second, if we're interrupted; in that case, either it's the
+ * first loop and ra->rrd was never allocated, or it's later, and ra.rrd
+ * has been handed off to the writer thread who will free it. Finally,
+ * if receive_read_record fails or we're at the end of the stream, then
+ * we free ra->rrd and exit.
*/
- pcksum = ra.cksum;
- while (ra.err == 0 &&
- NULL != (drr = restore_read(&ra, sizeof (*drr)))) {
+ while (rwa->err == 0) {
if (issig(JUSTLOOKING) && issig(FORREAL)) {
- ra.err = EINTR;
- goto out;
+ err = SET_ERROR(EINTR);
+ break;
}
- if (ra.byteswap)
- backup_byteswap(drr);
+ ASSERT3P(ra->rrd, ==, NULL);
+ ra->rrd = ra->next_rrd;
+ ra->next_rrd = NULL;
+ /* Allocates and loads header into ra->next_rrd */
+ err = receive_read_record(ra);
- switch (drr->drr_type) {
- case DRR_OBJECT:
- {
- /*
- * We need to make a copy of the record header,
- * because restore_{object,write} may need to
- * restore_read(), which will invalidate drr.
- */
- struct drr_object drro = drr->drr_u.drr_object;
- ra.err = restore_object(&ra, os, &drro);
- break;
- }
- case DRR_FREEOBJECTS:
- {
- struct drr_freeobjects drrfo =
- drr->drr_u.drr_freeobjects;
- ra.err = restore_freeobjects(&ra, os, &drrfo);
- break;
- }
- case DRR_WRITE:
- {
- struct drr_write drrw = drr->drr_u.drr_write;
- ra.err = restore_write(&ra, os, &drrw);
- break;
- }
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref drrwbr =
- drr->drr_u.drr_write_byref;
- ra.err = restore_write_byref(&ra, os, &drrwbr);
- break;
- }
- case DRR_FREE:
- {
- struct drr_free drrf = drr->drr_u.drr_free;
- ra.err = restore_free(&ra, os, &drrf);
- break;
- }
- case DRR_END:
- {
- struct drr_end drre = drr->drr_u.drr_end;
- /*
- * We compare against the *previous* checksum
- * value, because the stored checksum is of
- * everything before the DRR_END record.
- */
- if (!ZIO_CHECKSUM_EQUAL(drre.drr_checksum, pcksum))
- ra.err = ECKSUM;
- goto out;
- }
- case DRR_SPILL:
- {
- struct drr_spill drrs = drr->drr_u.drr_spill;
- ra.err = restore_spill(&ra, os, &drrs);
+ if (ra->rrd->header.drr_type == DRR_END || err != 0) {
+ kmem_free(ra->rrd, sizeof (*ra->rrd));
+ ra->rrd = NULL;
break;
}
- default:
- ra.err = EINVAL;
- goto out;
- }
- pcksum = ra.cksum;
+
+ bqueue_enqueue(&rwa->q, ra->rrd,
+ sizeof (struct receive_record_arg) + ra->rrd->payload_size);
+ ra->rrd = NULL;
}
- ASSERT(ra.err != 0);
+ if (ra->next_rrd == NULL)
+ ra->next_rrd = kmem_zalloc(sizeof (*ra->next_rrd), KM_SLEEP);
+ ra->next_rrd->eos_marker = B_TRUE;
+ bqueue_enqueue(&rwa->q, ra->next_rrd, 1);
+
+ mutex_enter(&rwa->mutex);
+ while (!rwa->done) {
+ cv_wait(&rwa->cv, &rwa->mutex);
+ }
+ mutex_exit(&rwa->mutex);
+
+ cv_destroy(&rwa->cv);
+ mutex_destroy(&rwa->mutex);
+ bqueue_destroy(&rwa->q);
+ if (err == 0)
+ err = rwa->err;
out:
if ((featureflags & DMU_BACKUP_FEATURE_DEDUP) && (cleanup_fd != -1))
zfs_onexit_fd_rele(cleanup_fd);
- if (ra.err != 0) {
+ if (err != 0) {
/*
* destroy what we created, so we don't leave it in the
* inconsistent restoring state.
*/
- txg_wait_synced(drc->drc_real_ds->ds_dir->dd_pool, 0);
-
- (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
- B_FALSE);
- if (drc->drc_real_ds != drc->drc_logical_ds) {
- mutex_exit(&drc->drc_logical_ds->ds_recvlock);
- dsl_dataset_rele(drc->drc_logical_ds, dmu_recv_tag);
- }
+ dmu_recv_cleanup_ds(drc);
}
- vmem_free(ra.buf, ra.bufsize);
- *voffp = ra.voff;
- return (ra.err);
-}
+ *voffp = ra->voff;
-struct recvendsyncarg {
- char *tosnap;
- uint64_t creation_time;
- uint64_t toguid;
-};
+ for (n = list_remove_head(&ra->ignore_obj_list); n != NULL;
+ n = list_remove_head(&ra->ignore_obj_list)) {
+ kmem_free(n, sizeof (*n));
+ }
+ list_destroy(&ra->ignore_obj_list);
+ kmem_free(ra, sizeof (*ra));
+ kmem_free(rwa, sizeof (*rwa));
+ return (err);
+}
static int
-recv_end_check(void *arg1, void *arg2, dmu_tx_t *tx)
+dmu_recv_end_check(void *arg, dmu_tx_t *tx)
{
- dsl_dataset_t *ds = arg1;
- struct recvendsyncarg *resa = arg2;
+ dmu_recv_cookie_t *drc = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+ int error;
+
+ ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
+
+ if (!drc->drc_newfs) {
+ dsl_dataset_t *origin_head;
- return (dsl_dataset_snapshot_check(ds, resa->tosnap, tx));
+ error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
+ if (error != 0)
+ return (error);
+ if (drc->drc_force) {
+ /*
+ * We will destroy any snapshots in tofs (i.e. before
+ * origin_head) that are after the origin (which is
+ * the snap before drc_ds, because drc_ds can not
+ * have any snaps of its own).
+ */
+ uint64_t obj;
+
+ obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
+ while (obj !=
+ dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
+ dsl_dataset_t *snap;
+ error = dsl_dataset_hold_obj(dp, obj, FTAG,
+ &snap);
+ if (error != 0)
+ break;
+ if (snap->ds_dir != origin_head->ds_dir)
+ error = SET_ERROR(EINVAL);
+ if (error == 0) {
+ error = dsl_destroy_snapshot_check_impl(
+ snap, B_FALSE);
+ }
+ obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
+ dsl_dataset_rele(snap, FTAG);
+ if (error != 0)
+ break;
+ }
+ if (error != 0) {
+ dsl_dataset_rele(origin_head, FTAG);
+ return (error);
+ }
+ }
+ error = dsl_dataset_clone_swap_check_impl(drc->drc_ds,
+ origin_head, drc->drc_force, drc->drc_owner, tx);
+ if (error != 0) {
+ dsl_dataset_rele(origin_head, FTAG);
+ return (error);
+ }
+ error = dsl_dataset_snapshot_check_impl(origin_head,
+ drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
+ dsl_dataset_rele(origin_head, FTAG);
+ if (error != 0)
+ return (error);
+
+ error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
+ } else {
+ error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
+ drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
+ }
+ return (error);
}
static void
-recv_end_sync(void *arg1, void *arg2, dmu_tx_t *tx)
+dmu_recv_end_sync(void *arg, dmu_tx_t *tx)
{
- dsl_dataset_t *ds = arg1;
- struct recvendsyncarg *resa = arg2;
+ dmu_recv_cookie_t *drc = arg;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+
+ spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
+ tx, "snap=%s", drc->drc_tosnap);
+
+ if (!drc->drc_newfs) {
+ dsl_dataset_t *origin_head;
+
+ VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
+ &origin_head));
- dsl_dataset_snapshot_sync(ds, resa->tosnap, tx);
+ if (drc->drc_force) {
+ /*
+ * Destroy any snapshots of drc_tofs (origin_head)
+ * after the origin (the snap before drc_ds).
+ */
+ uint64_t obj;
+
+ obj = dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
+ while (obj !=
+ dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj) {
+ dsl_dataset_t *snap;
+ VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG,
+ &snap));
+ ASSERT3P(snap->ds_dir, ==, origin_head->ds_dir);
+ obj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
+ dsl_destroy_snapshot_sync_impl(snap,
+ B_FALSE, tx);
+ dsl_dataset_rele(snap, FTAG);
+ }
+ }
+ VERIFY3P(drc->drc_ds->ds_prev, ==,
+ origin_head->ds_prev);
+
+ dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
+ origin_head, tx);
+ dsl_dataset_snapshot_sync_impl(origin_head,
+ drc->drc_tosnap, tx);
+
+ /* set snapshot's creation time and guid */
+ dmu_buf_will_dirty(origin_head->ds_prev->ds_dbuf, tx);
+ dsl_dataset_phys(origin_head->ds_prev)->ds_creation_time =
+ drc->drc_drrb->drr_creation_time;
+ dsl_dataset_phys(origin_head->ds_prev)->ds_guid =
+ drc->drc_drrb->drr_toguid;
+ dsl_dataset_phys(origin_head->ds_prev)->ds_flags &=
+ ~DS_FLAG_INCONSISTENT;
+
+ dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
+ dsl_dataset_phys(origin_head)->ds_flags &=
+ ~DS_FLAG_INCONSISTENT;
+
+ dsl_dataset_rele(origin_head, FTAG);
+ dsl_destroy_head_sync_impl(drc->drc_ds, tx);
+
+ if (drc->drc_owner != NULL)
+ VERIFY3P(origin_head->ds_owner, ==, drc->drc_owner);
+ } else {
+ dsl_dataset_t *ds = drc->drc_ds;
- /* set snapshot's creation time and guid */
- dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
- ds->ds_prev->ds_phys->ds_creation_time = resa->creation_time;
- ds->ds_prev->ds_phys->ds_guid = resa->toguid;
- ds->ds_prev->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
+ dsl_dataset_snapshot_sync_impl(ds, drc->drc_tosnap, tx);
- dmu_buf_will_dirty(ds->ds_dbuf, tx);
- ds->ds_phys->ds_flags &= ~DS_FLAG_INCONSISTENT;
+ /* set snapshot's creation time and guid */
+ dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
+ dsl_dataset_phys(ds->ds_prev)->ds_creation_time =
+ drc->drc_drrb->drr_creation_time;
+ dsl_dataset_phys(ds->ds_prev)->ds_guid =
+ drc->drc_drrb->drr_toguid;
+ dsl_dataset_phys(ds->ds_prev)->ds_flags &=
+ ~DS_FLAG_INCONSISTENT;
+
+ dmu_buf_will_dirty(ds->ds_dbuf, tx);
+ dsl_dataset_phys(ds)->ds_flags &= ~DS_FLAG_INCONSISTENT;
+ }
+ drc->drc_newsnapobj = dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
+ zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
+ /*
+ * Release the hold from dmu_recv_begin. This must be done before
+ * we return to open context, so that when we free the dataset's dnode,
+ * we can evict its bonus buffer.
+ */
+ dsl_dataset_disown(drc->drc_ds, dmu_recv_tag);
+ drc->drc_ds = NULL;
}
static int
-add_ds_to_guidmap(avl_tree_t *guid_map, dsl_dataset_t *ds)
+add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj)
{
- dsl_pool_t *dp = ds->ds_dir->dd_pool;
- uint64_t snapobj = ds->ds_phys->ds_prev_snap_obj;
+ dsl_pool_t *dp;
dsl_dataset_t *snapds;
guid_map_entry_t *gmep;
int err;
ASSERT(guid_map != NULL);
- rw_enter(&dp->dp_config_rwlock, RW_READER);
- err = dsl_dataset_hold_obj(dp, snapobj, guid_map, &snapds);
+ err = dsl_pool_hold(name, FTAG, &dp);
+ if (err != 0)
+ return (err);
+ gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
+ err = dsl_dataset_hold_obj(dp, snapobj, gmep, &snapds);
if (err == 0) {
- gmep = kmem_alloc(sizeof (guid_map_entry_t), KM_SLEEP);
- gmep->guid = snapds->ds_phys->ds_guid;
+ gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
gmep->gme_ds = snapds;
avl_add(guid_map, gmep);
+ dsl_dataset_long_hold(snapds, gmep);
+ } else {
+ kmem_free(gmep, sizeof (*gmep));
}
- rw_exit(&dp->dp_config_rwlock);
+ dsl_pool_rele(dp, FTAG);
return (err);
}
+static int dmu_recv_end_modified_blocks = 3;
+
static int
dmu_recv_existing_end(dmu_recv_cookie_t *drc)
{
- struct recvendsyncarg resa;
- dsl_dataset_t *ds = drc->drc_logical_ds;
- int err, myerr;
+ int error;
+
+#ifdef _KERNEL
+ char *name;
/*
- * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
- * expects it to have a ds_user_ptr (and zil), but clone_swap()
- * can close it.
+ * We will be destroying the ds; make sure its origin is unmounted if
+ * necessary.
*/
- txg_wait_synced(ds->ds_dir->dd_pool, 0);
-
- if (dsl_dataset_tryown(ds, FALSE, dmu_recv_tag)) {
- err = dsl_dataset_clone_swap(drc->drc_real_ds, ds,
- drc->drc_force);
- if (err)
- goto out;
- } else {
- mutex_exit(&ds->ds_recvlock);
- dsl_dataset_rele(ds, dmu_recv_tag);
- (void) dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag,
- B_FALSE);
- return (EBUSY);
- }
-
- resa.creation_time = drc->drc_drrb->drr_creation_time;
- resa.toguid = drc->drc_drrb->drr_toguid;
- resa.tosnap = drc->drc_tosnap;
+ name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
+ dsl_dataset_name(drc->drc_ds, name);
+ zfs_destroy_unmount_origin(name);
+ kmem_free(name, MAXNAMELEN);
+#endif
- err = dsl_sync_task_do(ds->ds_dir->dd_pool,
- recv_end_check, recv_end_sync, ds, &resa, 3);
- if (err) {
- /* swap back */
- (void) dsl_dataset_clone_swap(drc->drc_real_ds, ds, B_TRUE);
- }
+ error = dsl_sync_task(drc->drc_tofs,
+ dmu_recv_end_check, dmu_recv_end_sync, drc,
+ dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
-out:
- mutex_exit(&ds->ds_recvlock);
- if (err == 0 && drc->drc_guid_to_ds_map != NULL)
- (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
- dsl_dataset_disown(ds, dmu_recv_tag);
- myerr = dsl_dataset_destroy(drc->drc_real_ds, dmu_recv_tag, B_FALSE);
- ASSERT3U(myerr, ==, 0);
- return (err);
+ if (error != 0)
+ dmu_recv_cleanup_ds(drc);
+ return (error);
}
static int
dmu_recv_new_end(dmu_recv_cookie_t *drc)
{
- struct recvendsyncarg resa;
- dsl_dataset_t *ds = drc->drc_logical_ds;
- int err;
-
- /*
- * XXX hack; seems the ds is still dirty and dsl_pool_zil_clean()
- * expects it to have a ds_user_ptr (and zil), but clone_swap()
- * can close it.
- */
- txg_wait_synced(ds->ds_dir->dd_pool, 0);
-
- resa.creation_time = drc->drc_drrb->drr_creation_time;
- resa.toguid = drc->drc_drrb->drr_toguid;
- resa.tosnap = drc->drc_tosnap;
-
- err = dsl_sync_task_do(ds->ds_dir->dd_pool,
- recv_end_check, recv_end_sync, ds, &resa, 3);
- if (err) {
- /* clean up the fs we just recv'd into */
- (void) dsl_dataset_destroy(ds, dmu_recv_tag, B_FALSE);
- } else {
- if (drc->drc_guid_to_ds_map != NULL)
- (void) add_ds_to_guidmap(drc->drc_guid_to_ds_map, ds);
- /* release the hold from dmu_recv_begin */
- dsl_dataset_disown(ds, dmu_recv_tag);
+ int error;
+
+ error = dsl_sync_task(drc->drc_tofs,
+ dmu_recv_end_check, dmu_recv_end_sync, drc,
+ dmu_recv_end_modified_blocks, ZFS_SPACE_CHECK_NORMAL);
+
+ if (error != 0) {
+ dmu_recv_cleanup_ds(drc);
+ } else if (drc->drc_guid_to_ds_map != NULL) {
+ (void) add_ds_to_guidmap(drc->drc_tofs,
+ drc->drc_guid_to_ds_map,
+ drc->drc_newsnapobj);
}
- return (err);
+ return (error);
}
int
-dmu_recv_end(dmu_recv_cookie_t *drc)
+dmu_recv_end(dmu_recv_cookie_t *drc, void *owner)
{
- if (drc->drc_logical_ds != drc->drc_real_ds)
- return (dmu_recv_existing_end(drc));
- else
+ drc->drc_owner = owner;
+
+ if (drc->drc_newfs)
return (dmu_recv_new_end(drc));
+ else
+ return (dmu_recv_existing_end(drc));
}
+
+/*
+ * Return TRUE if this objset is currently being received into.
+ */
+boolean_t
+dmu_objset_is_receiving(objset_t *os)
+{
+ return (os->os_dsl_dataset != NULL &&
+ os->os_dsl_dataset->ds_owner == dmu_recv_tag);
+}
+
+#if defined(_KERNEL)
+module_param(zfs_send_corrupt_data, int, 0644);
+MODULE_PARM_DESC(zfs_send_corrupt_data, "Allow sending corrupt data");
+#endif