* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
- * or http://www.opensolaris.org/os/licensing.
+ * or https://opensource.org/licenses/CDDL-1.0.
* See the License for the specific language governing permissions
* and limitations under the License.
*
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2011, 2018 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
+ * Copyright (c) 2019, Klara Inc.
+ * Copyright (c) 2019, Allan Jude
+ * Copyright (c) 2019 Datto Inc.
+ * Copyright (c) 2022 Axcient.
*/
+#include <sys/arc.h>
+#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_send.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
-#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
+#include <sys/zfs_file.h>
+
+static uint_t zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
+static uint_t zfs_recv_queue_ff = 20;
+static uint_t zfs_recv_write_batch_size = 1024 * 1024;
+static int zfs_recv_best_effort_corrective = 0;
-int zfs_recv_queue_length = SPA_MAXBLOCKSIZE;
-int zfs_recv_queue_ff = 20;
+static const void *const dmu_recv_tag = "dmu_recv_tag";
+const char *const recv_clone_name = "%recv";
-static char *dmu_recv_tag = "dmu_recv_tag";
-const char *recv_clone_name = "%recv";
+typedef enum {
+ ORNS_NO,
+ ORNS_YES,
+ ORNS_MAYBE
+} or_need_sync_t;
static int receive_read_payload_and_next_header(dmu_recv_cookie_t *ra, int len,
void *buf);
dmu_replay_record_t header;
void *payload; /* Pointer to a buffer containing the payload */
/*
- * If the record is a write, pointer to the arc_buf_t containing the
+ * If the record is a WRITE or SPILL, pointer to the abd containing the
* payload.
*/
- arc_buf_t *arc_buf;
+ abd_t *abd;
int payload_size;
uint64_t bytes_read; /* bytes read from stream when record created */
boolean_t eos_marker; /* Marks the end of the stream */
bqueue_t q;
/*
- * These three args are used to signal to the main thread that we're
- * done.
+ * These three members are used to signal to the main thread when
+ * we're done.
*/
kmutex_t mutex;
kcondvar_t cv;
boolean_t done;
int err;
- /* A map from guid to dataset to help handle dedup'd streams. */
- avl_tree_t *guid_to_ds_map;
+ const char *tofs;
+ boolean_t heal;
boolean_t resumable;
boolean_t raw; /* DMU_BACKUP_FEATURE_RAW set */
boolean_t spill; /* DRR_FLAG_SPILL_BLOCK set */
+ boolean_t full; /* this is a full send stream */
uint64_t last_object;
uint64_t last_offset;
uint64_t max_object; /* highest object ID referenced in stream */
uint64_t bytes_read; /* bytes read when current record created */
+ list_t write_batch;
+
/* Encryption parameters for the last received DRR_OBJECT_RANGE */
boolean_t or_crypt_params_present;
uint64_t or_firstobj;
uint8_t or_iv[ZIO_DATA_IV_LEN];
uint8_t or_mac[ZIO_DATA_MAC_LEN];
boolean_t or_byteorder;
-};
+ zio_t *heal_pio;
-typedef struct guid_map_entry {
- uint64_t guid;
- boolean_t raw;
- dsl_dataset_t *gme_ds;
- avl_node_t avlnode;
-} guid_map_entry_t;
+ /* Keep track of DRR_FREEOBJECTS right after DRR_OBJECT_RANGE */
+ or_need_sync_t or_need_sync;
+};
typedef struct dmu_recv_begin_arg {
const char *drba_origin;
dmu_recv_cookie_t *drba_cookie;
cred_t *drba_cred;
+ proc_t *drba_proc;
dsl_crypto_params_t *drba_dcp;
} dmu_recv_begin_arg_t;
DO64(drr_write.drr_key.ddk_prop);
DO64(drr_write.drr_compressed_size);
break;
- case DRR_WRITE_BYREF:
- DO64(drr_write_byref.drr_object);
- DO64(drr_write_byref.drr_offset);
- DO64(drr_write_byref.drr_length);
- DO64(drr_write_byref.drr_toguid);
- DO64(drr_write_byref.drr_refguid);
- DO64(drr_write_byref.drr_refobject);
- DO64(drr_write_byref.drr_refoffset);
- ZIO_CHECKSUM_BSWAP(&drr->drr_u.drr_write_byref.
- drr_key.ddk_cksum);
- DO64(drr_write_byref.drr_key.ddk_prop);
- break;
case DRR_WRITE_EMBEDDED:
DO64(drr_write_embedded.drr_object);
DO64(drr_write_embedded.drr_offset);
return (ret);
}
+/*
+ * If we previously received a stream with --large-block, we don't support
+ * receiving an incremental on top of it without --large-block. This avoids
+ * forcing a read-modify-write or trying to re-aggregate a string of WRITE
+ * records.
+ */
+static int
+recv_check_large_blocks(dsl_dataset_t *ds, uint64_t featureflags)
+{
+ if (dsl_dataset_feature_is_active(ds, SPA_FEATURE_LARGE_BLOCKS) &&
+ !(featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS))
+ return (SET_ERROR(ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH));
+ return (0);
+}
+
static int
recv_begin_check_existing_impl(dmu_recv_begin_arg_t *drba, dsl_dataset_t *ds,
uint64_t fromguid, uint64_t featureflags)
{
- uint64_t val;
+ uint64_t obj;
uint64_t children;
int error;
+ dsl_dataset_t *snap;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
boolean_t raw = (featureflags & DMU_BACKUP_FEATURE_RAW) != 0;
/* Temporary clone name must not exist. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, recv_clone_name,
- 8, 1, &val);
+ 8, 1, &obj);
if (error != ENOENT)
return (error == 0 ? SET_ERROR(EBUSY) : error);
if (dsl_dataset_has_resume_receive_state(ds))
return (SET_ERROR(EBUSY));
- /* New snapshot name must not exist. */
+ /* New snapshot name must not exist if we're not healing it. */
error = zap_lookup(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj,
- drba->drba_cookie->drc_tosnap, 8, 1, &val);
- if (error != ENOENT)
+ drba->drba_cookie->drc_tosnap, 8, 1, &obj);
+ if (drba->drba_cookie->drc_heal) {
+ if (error != 0)
+ return (error);
+ } else if (error != ENOENT) {
return (error == 0 ? SET_ERROR(EEXIST) : error);
+ }
/* Must not have children if receiving a ZVOL. */
error = zap_count(dp->dp_meta_objset,
* against that limit.
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1, ZFS_PROP_SNAPSHOT_LIMIT,
- NULL, drba->drba_cred);
+ NULL, drba->drba_cred, drba->drba_proc);
if (error != 0)
return (error);
- if (fromguid != 0) {
- dsl_dataset_t *snap;
+ if (drba->drba_cookie->drc_heal) {
+ /* Encryption is incompatible with embedded data. */
+ if (encrypted && embed)
+ return (SET_ERROR(EINVAL));
+
+ /* Healing is not supported when in 'force' mode. */
+ if (drba->drba_cookie->drc_force)
+ return (SET_ERROR(EINVAL));
+
+ /* Must have keys loaded if doing encrypted non-raw recv. */
+ if (encrypted && !raw) {
+ if (spa_keystore_lookup_key(dp->dp_spa, ds->ds_object,
+ NULL, NULL) != 0)
+ return (SET_ERROR(EACCES));
+ }
+
+ error = dsl_dataset_hold_obj(dp, obj, FTAG, &snap);
+ if (error != 0)
+ return (error);
+
+ /*
+ * When not doing best effort corrective recv healing can only
+ * be done if the send stream is for the same snapshot as the
+ * one we are trying to heal.
+ */
+ if (zfs_recv_best_effort_corrective == 0 &&
+ drba->drba_cookie->drc_drrb->drr_toguid !=
+ dsl_dataset_phys(snap)->ds_guid) {
+ dsl_dataset_rele(snap, FTAG);
+ return (SET_ERROR(ENOTSUP));
+ }
+ dsl_dataset_rele(snap, FTAG);
+ } else if (fromguid != 0) {
+ /* Sanity check the incremental recv */
uint64_t obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
/* Can't perform a raw receive on top of a non-raw receive */
return (SET_ERROR(EINVAL));
}
+ error = recv_check_large_blocks(snap, featureflags);
+ if (error != 0) {
+ dsl_dataset_rele(snap, FTAG);
+ return (error);
+ }
+
dsl_dataset_rele(snap, FTAG);
} else {
- /* if full, then must be forced */
+ /* If full and not healing then must be forced. */
if (!drba->drba_cookie->drc_force)
return (SET_ERROR(EEXIST));
}
return (0);
-
}
/*
return (SET_ERROR(ENOTSUP));
/*
- * LZ4 compressed, embedded, mooched, large blocks, and large_dnodes
- * in the stream can only be used if those pool features are enabled
- * because we don't attempt to decompress / un-embed / un-mooch /
- * split up the blocks / dnodes during the receive process.
+ * LZ4 compressed, ZSTD compressed, embedded, mooched, large blocks,
+ * and large_dnodes in the stream can only be used if those pool
+ * features are enabled because we don't attempt to decompress /
+ * un-embed / un-mooch / split up the blocks / dnodes during the
+ * receive process.
*/
if ((featureflags & DMU_BACKUP_FEATURE_LZ4) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_LZ4_COMPRESS))
return (SET_ERROR(ENOTSUP));
+ if ((featureflags & DMU_BACKUP_FEATURE_ZSTD) &&
+ !spa_feature_is_enabled(spa, SPA_FEATURE_ZSTD_COMPRESS))
+ return (SET_ERROR(ENOTSUP));
if ((featureflags & DMU_BACKUP_FEATURE_EMBED_DATA) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA))
return (SET_ERROR(ENOTSUP));
struct drr_begin *drrb = drba->drba_cookie->drc_drrb;
uint64_t fromguid = drrb->drr_fromguid;
int flags = drrb->drr_flags;
- ds_hold_flags_t dsflags = 0;
+ ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
if (!(flags & DRR_FLAG_SPILL_BLOCK))
return (SET_ERROR(ZFS_ERR_SPILL_BLOCK_FLAG_MISSING));
} else {
- dsflags |= DS_HOLD_FLAG_DECRYPT;
+ /*
+ * We support unencrypted datasets below encrypted ones now,
+ * so add the DS_HOLD_FLAG_DECRYPT flag only if we are dealing
+ * with a dataset we may encrypt.
+ */
+ if (drba->drba_dcp == NULL ||
+ drba->drba_dcp->cp_crypt != ZIO_CRYPT_OFF) {
+ dsflags |= DS_HOLD_FLAG_DECRYPT;
+ }
}
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
char buf[ZFS_MAX_DATASET_NAME_LEN];
objset_t *os;
+ /* healing recv must be done "into" an existing snapshot */
+ if (drba->drba_cookie->drc_heal == B_TRUE)
+ return (SET_ERROR(ENOTSUP));
+
/*
* If it's a non-clone incremental, we are missing the
* target fs, so fail the recv.
* filesystems and increment those counts during begin_sync).
*/
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
- ZFS_PROP_FILESYSTEM_LIMIT, NULL, drba->drba_cred);
+ ZFS_PROP_FILESYSTEM_LIMIT, NULL,
+ drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(ds->ds_dir, 1,
- ZFS_PROP_SNAPSHOT_LIMIT, NULL, drba->drba_cred);
+ ZFS_PROP_SNAPSHOT_LIMIT, NULL,
+ drba->drba_cred, drba->drba_proc);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
}
+ error = recv_check_large_blocks(ds, featureflags);
+ if (error != 0) {
+ dsl_dataset_rele_flags(origin, dsflags, FTAG);
+ dsl_dataset_rele_flags(ds, dsflags, FTAG);
+ return (error);
+ }
+
dsl_dataset_rele_flags(origin, dsflags, FTAG);
}
dsl_dataset_t *ds, *newds;
objset_t *os;
uint64_t dsobj;
- ds_hold_flags_t dsflags = 0;
+ ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
int error;
uint64_t crflags = 0;
dsl_crypto_params_t dummy_dcp = { 0 };
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error == 0) {
- /* create temporary clone */
+ /* Create temporary clone unless we're doing corrective recv */
dsl_dataset_t *snap = NULL;
if (drba->drba_cookie->drc_fromsnapobj != 0) {
drba->drba_cookie->drc_fromsnapobj, FTAG, &snap));
ASSERT3P(dcp, ==, NULL);
}
- dsobj = dsl_dataset_create_sync(ds->ds_dir, recv_clone_name,
- snap, crflags, drba->drba_cred, dcp, tx);
+ if (drc->drc_heal) {
+ /* When healing we want to use the provided snapshot */
+ VERIFY0(dsl_dataset_snap_lookup(ds, drc->drc_tosnap,
+ &dsobj));
+ } else {
+ dsobj = dsl_dataset_create_sync(ds->ds_dir,
+ recv_clone_name, snap, crflags, drba->drba_cred,
+ dcp, tx);
+ }
if (drba->drba_cookie->drc_fromsnapobj != 0)
dsl_dataset_rele(snap, FTAG);
dsl_dataset_rele_flags(ds, dsflags, FTAG);
drba->drba_cookie->drc_raw = B_TRUE;
}
-
if (featureflags & DMU_BACKUP_FEATURE_REDACTED) {
uint64_t *redact_snaps;
uint_t numredactsnaps;
*/
rrw_enter(&newds->ds_bp_rwlock, RW_READER, FTAG);
if (BP_IS_HOLE(dsl_dataset_get_blkptr(newds)) &&
- (featureflags & DMU_BACKUP_FEATURE_RAW) == 0) {
+ (featureflags & DMU_BACKUP_FEATURE_RAW) == 0 &&
+ !drc->drc_heal) {
(void) dmu_objset_create_impl(dp->dp_spa,
newds, dsl_dataset_get_blkptr(newds), drrb->drr_type, tx);
}
rrw_exit(&newds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = newds;
+ drba->drba_cookie->drc_os = os;
- spa_history_log_internal_ds(newds, "receive", tx, "");
+ spa_history_log_internal_ds(newds, "receive", tx, " ");
}
static int
dsl_pool_t *dp = dmu_tx_pool(tx);
struct drr_begin *drrb = drc->drc_drrb;
int error;
- ds_hold_flags_t dsflags = 0;
+ ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
dsl_dataset_t *ds;
const char *tofs = drc->drc_tofs;
dsflags |= DS_HOLD_FLAG_DECRYPT;
}
+ boolean_t recvexist = B_TRUE;
if (dsl_dataset_hold_flags(dp, recvname, dsflags, FTAG, &ds) != 0) {
/* %recv does not exist; continue in tofs */
+ recvexist = B_FALSE;
error = dsl_dataset_hold_flags(dp, tofs, dsflags, FTAG, &ds);
if (error != 0)
return (error);
}
+ /*
+ * Resume of full/newfs recv on existing dataset should be done with
+ * force flag
+ */
+ if (recvexist && drrb->drr_fromguid == 0 && !drc->drc_force) {
+ dsl_dataset_rele_flags(ds, dsflags, FTAG);
+ return (SET_ERROR(ZFS_ERR_RESUME_EXISTS));
+ }
+
/* check that ds is marked inconsistent */
if (!DS_IS_INCONSISTENT(ds)) {
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (SET_ERROR(EINVAL));
}
+ if (ds->ds_prev != NULL && drrb->drr_fromguid != 0)
+ drc->drc_fromsnapobj = ds->ds_prev->ds_object;
+
/*
* If we're resuming, and the send is redacted, then the original send
* must have been redacted, and must have been redacted with respect to
}
}
}
+
+ error = recv_check_large_blocks(ds, drc->drc_featureflags);
+ if (error != 0) {
+ dsl_dataset_rele_flags(ds, dsflags, FTAG);
+ return (error);
+ }
+
dsl_dataset_rele_flags(ds, dsflags, FTAG);
return (0);
}
const char *tofs = drba->drba_cookie->drc_tofs;
uint64_t featureflags = drba->drba_cookie->drc_featureflags;
dsl_dataset_t *ds;
- ds_hold_flags_t dsflags = 0;
+ ds_hold_flags_t dsflags = DS_HOLD_FLAG_NONE;
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
rrw_exit(&ds->ds_bp_rwlock, FTAG);
drba->drba_cookie->drc_ds = ds;
+ VERIFY0(dmu_objset_from_ds(ds, &drba->drba_cookie->drc_os));
+ drba->drba_cookie->drc_should_save = B_TRUE;
- spa_history_log_internal_ds(ds, "resume receive", tx, "");
+ spa_history_log_internal_ds(ds, "resume receive", tx, " ");
}
/*
* succeeds; otherwise we will leak the holds on the datasets.
*/
int
-dmu_recv_begin(char *tofs, char *tosnap, dmu_replay_record_t *drr_begin,
- boolean_t force, boolean_t resumable, nvlist_t *localprops,
- nvlist_t *hidden_args, char *origin, dmu_recv_cookie_t *drc, vnode_t *vp,
+dmu_recv_begin(const char *tofs, const char *tosnap,
+ dmu_replay_record_t *drr_begin, boolean_t force, boolean_t heal,
+ boolean_t resumable, nvlist_t *localprops, nvlist_t *hidden_args,
+ const char *origin, dmu_recv_cookie_t *drc, zfs_file_t *fp,
offset_t *voffp)
{
dmu_recv_begin_arg_t drba = { 0 };
- int err;
+ int err = 0;
- bzero(drc, sizeof (dmu_recv_cookie_t));
+ memset(drc, 0, sizeof (dmu_recv_cookie_t));
drc->drc_drr_begin = drr_begin;
drc->drc_drrb = &drr_begin->drr_u.drr_begin;
drc->drc_tosnap = tosnap;
drc->drc_tofs = tofs;
drc->drc_force = force;
+ drc->drc_heal = heal;
drc->drc_resumable = resumable;
drc->drc_cred = CRED();
+ drc->drc_proc = curproc;
drc->drc_clone = (origin != NULL);
if (drc->drc_drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
return (SET_ERROR(EINVAL));
}
- drc->drc_vp = vp;
+ drc->drc_fp = fp;
drc->drc_voff = *voffp;
drc->drc_featureflags =
DMU_GET_FEATUREFLAGS(drc->drc_drrb->drr_versioninfo);
uint32_t payloadlen = drc->drc_drr_begin->drr_payloadlen;
- void *payload = NULL;
- if (payloadlen != 0)
- payload = kmem_alloc(payloadlen, KM_SLEEP);
- err = receive_read_payload_and_next_header(drc, payloadlen,
- payload);
- if (err != 0) {
- kmem_free(payload, payloadlen);
- return (err);
- }
+ /*
+ * Since OpenZFS 2.0.0, we have enforced a 64MB limit in userspace
+ * configurable via ZFS_SENDRECV_MAX_NVLIST. We enforce 256MB as a hard
+ * upper limit. Systems with less than 1GB of RAM will see a lower
+ * limit from `arc_all_memory() / 4`.
+ */
+ if (payloadlen > (MIN((1U << 28), arc_all_memory() / 4)))
+ return (E2BIG);
+
+
if (payloadlen != 0) {
+ void *payload = vmem_alloc(payloadlen, KM_SLEEP);
+ /*
+ * For compatibility with recursive send streams, we don't do
+ * this here if the stream could be part of a package. Instead,
+ * we'll do it in dmu_recv_stream. If we pull the next header
+ * too early, and it's the END record, we break the `recv_skip`
+ * logic.
+ */
+
+ err = receive_read_payload_and_next_header(drc, payloadlen,
+ payload);
+ if (err != 0) {
+ vmem_free(payload, payloadlen);
+ return (err);
+ }
err = nvlist_unpack(payload, payloadlen, &drc->drc_begin_nvl,
KM_SLEEP);
- kmem_free(payload, payloadlen);
+ vmem_free(payload, payloadlen);
if (err != 0) {
kmem_free(drc->drc_next_rrd,
sizeof (*drc->drc_next_rrd));
drba.drba_origin = origin;
drba.drba_cookie = drc;
drba.drba_cred = CRED();
+ drba.drba_proc = curproc;
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RESUMING) {
err = dsl_sync_task(tofs,
dmu_recv_resume_begin_check, dmu_recv_resume_begin_sync,
&drba, 5, ZFS_SPACE_CHECK_NORMAL);
} else {
-
/*
* For non-raw, non-incremental, non-resuming receives the
* user can specify encryption parameters on the command line
return (err);
}
-static int
-guid_compare(const void *arg1, const void *arg2)
-{
- const guid_map_entry_t *gmep1 = (const guid_map_entry_t *)arg1;
- const guid_map_entry_t *gmep2 = (const guid_map_entry_t *)arg2;
+/*
+ * Holds data need for corrective recv callback
+ */
+typedef struct cr_cb_data {
+ uint64_t size;
+ zbookmark_phys_t zb;
+ spa_t *spa;
+} cr_cb_data_t;
- return (AVL_CMP(gmep1->guid, gmep2->guid));
+static void
+corrective_read_done(zio_t *zio)
+{
+ cr_cb_data_t *data = zio->io_private;
+ /* Corruption corrected; update error log if needed */
+ if (zio->io_error == 0)
+ spa_remove_error(data->spa, &data->zb, &zio->io_bp->blk_birth);
+ kmem_free(data, sizeof (cr_cb_data_t));
+ abd_free(zio->io_abd);
}
-static void
-free_guid_map_onexit(void *arg)
+/*
+ * zio_rewrite the data pointed to by bp with the data from the rrd's abd.
+ */
+static int
+do_corrective_recv(struct receive_writer_arg *rwa, struct drr_write *drrw,
+ struct receive_record_arg *rrd, blkptr_t *bp)
{
- avl_tree_t *ca = arg;
- void *cookie = NULL;
- guid_map_entry_t *gmep;
+ int err;
+ zio_t *io;
+ zbookmark_phys_t zb;
+ dnode_t *dn;
+ abd_t *abd = rrd->abd;
+ zio_cksum_t bp_cksum = bp->blk_cksum;
+ zio_flag_t flags = ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_RETRY |
+ ZIO_FLAG_CANFAIL;
+
+ if (rwa->raw)
+ flags |= ZIO_FLAG_RAW;
+
+ err = dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn);
+ if (err != 0)
+ return (err);
+ SET_BOOKMARK(&zb, dmu_objset_id(rwa->os), drrw->drr_object, 0,
+ dbuf_whichblock(dn, 0, drrw->drr_offset));
+ dnode_rele(dn, FTAG);
- while ((gmep = avl_destroy_nodes(ca, &cookie)) != NULL) {
- ds_hold_flags_t dsflags = DS_HOLD_FLAG_DECRYPT;
+ if (!rwa->raw && DRR_WRITE_COMPRESSED(drrw)) {
+ /* Decompress the stream data */
+ abd_t *dabd = abd_alloc_linear(
+ drrw->drr_logical_size, B_FALSE);
+ err = zio_decompress_data(drrw->drr_compressiontype,
+ abd, abd_to_buf(dabd), abd_get_size(abd),
+ abd_get_size(dabd), NULL);
- if (gmep->raw) {
- gmep->gme_ds->ds_objset->os_raw_receive = B_FALSE;
- dsflags &= ~DS_HOLD_FLAG_DECRYPT;
+ if (err != 0) {
+ abd_free(dabd);
+ return (err);
}
+ /* Swap in the newly decompressed data into the abd */
+ abd_free(abd);
+ abd = dabd;
+ }
+
+ if (!rwa->raw && BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
+ /* Recompress the data */
+ abd_t *cabd = abd_alloc_linear(BP_GET_PSIZE(bp),
+ B_FALSE);
+ void *buf = abd_to_buf(cabd);
+ uint64_t csize = zio_compress_data(BP_GET_COMPRESS(bp),
+ abd, &buf, abd_get_size(abd),
+ rwa->os->os_complevel);
+ abd_zero_off(cabd, csize, BP_GET_PSIZE(bp) - csize);
+ /* Swap in newly compressed data into the abd */
+ abd_free(abd);
+ abd = cabd;
+ flags |= ZIO_FLAG_RAW_COMPRESS;
+ }
+
+ /*
+ * The stream is not encrypted but the data on-disk is.
+ * We need to re-encrypt the buf using the same
+ * encryption type, salt, iv, and mac that was used to encrypt
+ * the block previosly.
+ */
+ if (!rwa->raw && BP_USES_CRYPT(bp)) {
+ dsl_dataset_t *ds;
+ dsl_crypto_key_t *dck = NULL;
+ uint8_t salt[ZIO_DATA_SALT_LEN];
+ uint8_t iv[ZIO_DATA_IV_LEN];
+ uint8_t mac[ZIO_DATA_MAC_LEN];
+ boolean_t no_crypt = B_FALSE;
+ dsl_pool_t *dp = dmu_objset_pool(rwa->os);
+ abd_t *eabd = abd_alloc_linear(BP_GET_PSIZE(bp), B_FALSE);
+
+ zio_crypt_decode_params_bp(bp, salt, iv);
+ zio_crypt_decode_mac_bp(bp, mac);
+
+ dsl_pool_config_enter(dp, FTAG);
+ err = dsl_dataset_hold_flags(dp, rwa->tofs,
+ DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
+ if (err != 0) {
+ dsl_pool_config_exit(dp, FTAG);
+ abd_free(eabd);
+ return (SET_ERROR(EACCES));
+ }
+
+ /* Look up the key from the spa's keystore */
+ err = spa_keystore_lookup_key(rwa->os->os_spa,
+ zb.zb_objset, FTAG, &dck);
+ if (err != 0) {
+ dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
+ FTAG);
+ dsl_pool_config_exit(dp, FTAG);
+ abd_free(eabd);
+ return (SET_ERROR(EACCES));
+ }
+
+ err = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
+ BP_GET_TYPE(bp), BP_SHOULD_BYTESWAP(bp), salt, iv,
+ mac, abd_get_size(abd), abd, eabd, &no_crypt);
+
+ spa_keystore_dsl_key_rele(rwa->os->os_spa, dck, FTAG);
+ dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
+ dsl_pool_config_exit(dp, FTAG);
+
+ ASSERT0(no_crypt);
+ if (err != 0) {
+ abd_free(eabd);
+ return (err);
+ }
+ /* Swap in the newly encrypted data into the abd */
+ abd_free(abd);
+ abd = eabd;
+
+ /*
+ * We want to prevent zio_rewrite() from trying to
+ * encrypt the data again
+ */
+ flags |= ZIO_FLAG_RAW_ENCRYPT;
+ }
+ rrd->abd = abd;
+
+ io = zio_rewrite(NULL, rwa->os->os_spa, bp->blk_birth, bp, abd,
+ BP_GET_PSIZE(bp), NULL, NULL, ZIO_PRIORITY_SYNC_WRITE, flags, &zb);
- dsl_dataset_disown(gmep->gme_ds, dsflags, gmep);
- kmem_free(gmep, sizeof (guid_map_entry_t));
+ ASSERT(abd_get_size(abd) == BP_GET_LSIZE(bp) ||
+ abd_get_size(abd) == BP_GET_PSIZE(bp));
+
+ /* compute new bp checksum value and make sure it matches the old one */
+ zio_checksum_compute(io, BP_GET_CHECKSUM(bp), abd, abd_get_size(abd));
+ if (!ZIO_CHECKSUM_EQUAL(bp_cksum, io->io_bp->blk_cksum)) {
+ zio_destroy(io);
+ if (zfs_recv_best_effort_corrective != 0)
+ return (0);
+ return (SET_ERROR(ECKSUM));
}
- avl_destroy(ca);
- kmem_free(ca, sizeof (avl_tree_t));
+
+ /* Correct the corruption in place */
+ err = zio_wait(io);
+ if (err == 0) {
+ cr_cb_data_t *cb_data =
+ kmem_alloc(sizeof (cr_cb_data_t), KM_SLEEP);
+ cb_data->spa = rwa->os->os_spa;
+ cb_data->size = drrw->drr_logical_size;
+ cb_data->zb = zb;
+ /* Test if healing worked by re-reading the bp */
+ err = zio_wait(zio_read(rwa->heal_pio, rwa->os->os_spa, bp,
+ abd_alloc_for_io(drrw->drr_logical_size, B_FALSE),
+ drrw->drr_logical_size, corrective_read_done,
+ cb_data, ZIO_PRIORITY_ASYNC_READ, flags, NULL));
+ }
+ if (err != 0 && zfs_recv_best_effort_corrective != 0)
+ err = 0;
+
+ return (err);
}
static int
(drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) != 0);
while (done < len) {
- ssize_t resid;
-
- drc->drc_err = vn_rdwr(UIO_READ, drc->drc_vp,
- (char *)buf + done, len - done,
- drc->drc_voff, UIO_SYSSPACE, FAPPEND,
- RLIM64_INFINITY, CRED(), &resid);
-
- if (resid == len - done) {
+ ssize_t resid = len - done;
+ zfs_file_t *fp = drc->drc_fp;
+ int err = zfs_file_read(fp, (char *)buf + done,
+ len - done, &resid);
+ if (err == 0 && resid == len - done) {
/*
- * Note: ECKSUM indicates that the receive
- * was interrupted and can potentially be resumed.
+ * Note: ECKSUM or ZFS_ERR_STREAM_TRUNCATED indicates
+ * that the receive was interrupted and can
+ * potentially be resumed.
*/
- drc->drc_err = SET_ERROR(ECKSUM);
+ err = SET_ERROR(ZFS_ERR_STREAM_TRUNCATED);
}
drc->drc_voff += len - done - resid;
done = len - resid;
- if (drc->drc_err != 0)
- return (drc->drc_err);
+ if (err != 0)
+ return (err);
}
drc->drc_bytes_read += len;
rwa->os->os_dsl_dataset->ds_resume_bytes[txgoff] = rwa->bytes_read;
}
+static int
+receive_object_is_same_generation(objset_t *os, uint64_t object,
+ dmu_object_type_t old_bonus_type, dmu_object_type_t new_bonus_type,
+ const void *new_bonus, boolean_t *samegenp)
+{
+ zfs_file_info_t zoi;
+ int err;
+
+ dmu_buf_t *old_bonus_dbuf;
+ err = dmu_bonus_hold(os, object, FTAG, &old_bonus_dbuf);
+ if (err != 0)
+ return (err);
+ err = dmu_get_file_info(os, old_bonus_type, old_bonus_dbuf->db_data,
+ &zoi);
+ dmu_buf_rele(old_bonus_dbuf, FTAG);
+ if (err != 0)
+ return (err);
+ uint64_t old_gen = zoi.zfi_generation;
+
+ err = dmu_get_file_info(os, new_bonus_type, new_bonus, &zoi);
+ if (err != 0)
+ return (err);
+ uint64_t new_gen = zoi.zfi_generation;
+
+ *samegenp = (old_gen == new_gen);
+ return (0);
+}
+
+static int
+receive_handle_existing_object(const struct receive_writer_arg *rwa,
+ const struct drr_object *drro, const dmu_object_info_t *doi,
+ const void *bonus_data,
+ uint64_t *object_to_hold, uint32_t *new_blksz)
+{
+ uint32_t indblksz = drro->drr_indblkshift ?
+ 1ULL << drro->drr_indblkshift : 0;
+ int nblkptr = deduce_nblkptr(drro->drr_bonustype,
+ drro->drr_bonuslen);
+ uint8_t dn_slots = drro->drr_dn_slots != 0 ?
+ drro->drr_dn_slots : DNODE_MIN_SLOTS;
+ boolean_t do_free_range = B_FALSE;
+ int err;
+
+ *object_to_hold = drro->drr_object;
+
+ /* nblkptr should be bounded by the bonus size and type */
+ if (rwa->raw && nblkptr != drro->drr_nblkptr)
+ return (SET_ERROR(EINVAL));
+
+ /*
+ * After the previous send stream, the sending system may
+ * have freed this object, and then happened to re-allocate
+ * this object number in a later txg. In this case, we are
+ * receiving a different logical file, and the block size may
+ * appear to be different. i.e. we may have a different
+ * block size for this object than what the send stream says.
+ * In this case we need to remove the object's contents,
+ * so that its structure can be changed and then its contents
+ * entirely replaced by subsequent WRITE records.
+ *
+ * If this is a -L (--large-block) incremental stream, and
+ * the previous stream was not -L, the block size may appear
+ * to increase. i.e. we may have a smaller block size for
+ * this object than what the send stream says. In this case
+ * we need to keep the object's contents and block size
+ * intact, so that we don't lose parts of the object's
+ * contents that are not changed by this incremental send
+ * stream.
+ *
+ * We can distinguish between the two above cases by using
+ * the ZPL's generation number (see
+ * receive_object_is_same_generation()). However, we only
+ * want to rely on the generation number when absolutely
+ * necessary, because with raw receives, the generation is
+ * encrypted. We also want to minimize dependence on the
+ * ZPL, so that other types of datasets can also be received
+ * (e.g. ZVOLs, although note that ZVOLS currently do not
+ * reallocate their objects or change their structure).
+ * Therefore, we check a number of different cases where we
+ * know it is safe to discard the object's contents, before
+ * using the ZPL's generation number to make the above
+ * distinction.
+ */
+ if (drro->drr_blksz != doi->doi_data_block_size) {
+ if (rwa->raw) {
+ /*
+ * RAW streams always have large blocks, so
+ * we are sure that the data is not needed
+ * due to changing --large-block to be on.
+ * Which is fortunate since the bonus buffer
+ * (which contains the ZPL generation) is
+ * encrypted, and the key might not be
+ * loaded.
+ */
+ do_free_range = B_TRUE;
+ } else if (rwa->full) {
+ /*
+ * This is a full send stream, so it always
+ * replaces what we have. Even if the
+ * generation numbers happen to match, this
+ * can not actually be the same logical file.
+ * This is relevant when receiving a full
+ * send as a clone.
+ */
+ do_free_range = B_TRUE;
+ } else if (drro->drr_type !=
+ DMU_OT_PLAIN_FILE_CONTENTS ||
+ doi->doi_type != DMU_OT_PLAIN_FILE_CONTENTS) {
+ /*
+ * PLAIN_FILE_CONTENTS are the only type of
+ * objects that have ever been stored with
+ * large blocks, so we don't need the special
+ * logic below. ZAP blocks can shrink (when
+ * there's only one block), so we don't want
+ * to hit the error below about block size
+ * only increasing.
+ */
+ do_free_range = B_TRUE;
+ } else if (doi->doi_max_offset <=
+ doi->doi_data_block_size) {
+ /*
+ * There is only one block. We can free it,
+ * because its contents will be replaced by a
+ * WRITE record. This can not be the no-L ->
+ * -L case, because the no-L case would have
+ * resulted in multiple blocks. If we
+ * supported -L -> no-L, it would not be safe
+ * to free the file's contents. Fortunately,
+ * that is not allowed (see
+ * recv_check_large_blocks()).
+ */
+ do_free_range = B_TRUE;
+ } else {
+ boolean_t is_same_gen;
+ err = receive_object_is_same_generation(rwa->os,
+ drro->drr_object, doi->doi_bonus_type,
+ drro->drr_bonustype, bonus_data, &is_same_gen);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
+
+ if (is_same_gen) {
+ /*
+ * This is the same logical file, and
+ * the block size must be increasing.
+ * It could only decrease if
+ * --large-block was changed to be
+ * off, which is checked in
+ * recv_check_large_blocks().
+ */
+ if (drro->drr_blksz <=
+ doi->doi_data_block_size)
+ return (SET_ERROR(EINVAL));
+ /*
+ * We keep the existing blocksize and
+ * contents.
+ */
+ *new_blksz =
+ doi->doi_data_block_size;
+ } else {
+ do_free_range = B_TRUE;
+ }
+ }
+ }
+
+ /* nblkptr can only decrease if the object was reallocated */
+ if (nblkptr < doi->doi_nblkptr)
+ do_free_range = B_TRUE;
+
+ /* number of slots can only change on reallocation */
+ if (dn_slots != doi->doi_dnodesize >> DNODE_SHIFT)
+ do_free_range = B_TRUE;
+
+ /*
+ * For raw sends we also check a few other fields to
+ * ensure we are preserving the objset structure exactly
+ * as it was on the receive side:
+ * - A changed indirect block size
+ * - A smaller nlevels
+ */
+ if (rwa->raw) {
+ if (indblksz != doi->doi_metadata_block_size)
+ do_free_range = B_TRUE;
+ if (drro->drr_nlevels < doi->doi_indirection)
+ do_free_range = B_TRUE;
+ }
+
+ if (do_free_range) {
+ err = dmu_free_long_range(rwa->os, drro->drr_object,
+ 0, DMU_OBJECT_END);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
+ }
+
+ /*
+ * The dmu does not currently support decreasing nlevels or changing
+ * indirect block size if there is already one, same as changing the
+ * number of of dnode slots on an object. For non-raw sends this
+ * does not matter and the new object can just use the previous one's
+ * parameters. For raw sends, however, the structure of the received
+ * dnode (including indirects and dnode slots) must match that of the
+ * send side. Therefore, instead of using dmu_object_reclaim(), we
+ * must free the object completely and call dmu_object_claim_dnsize()
+ * instead.
+ */
+ if ((rwa->raw && ((doi->doi_indirection > 1 &&
+ indblksz != doi->doi_metadata_block_size) ||
+ drro->drr_nlevels < doi->doi_indirection)) ||
+ dn_slots != doi->doi_dnodesize >> DNODE_SHIFT) {
+ err = dmu_free_long_object(rwa->os, drro->drr_object);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
+
+ txg_wait_synced(dmu_objset_pool(rwa->os), 0);
+ *object_to_hold = DMU_NEW_OBJECT;
+ }
+
+ /*
+ * For raw receives, free everything beyond the new incoming
+ * maxblkid. Normally this would be done with a DRR_FREE
+ * record that would come after this DRR_OBJECT record is
+ * processed. However, for raw receives we manually set the
+ * maxblkid from the drr_maxblkid and so we must first free
+ * everything above that blkid to ensure the DMU is always
+ * consistent with itself. We will never free the first block
+ * of the object here because a maxblkid of 0 could indicate
+ * an object with a single block or one with no blocks. This
+ * free may be skipped when dmu_free_long_range() was called
+ * above since it covers the entire object's contents.
+ */
+ if (rwa->raw && *object_to_hold != DMU_NEW_OBJECT && !do_free_range) {
+ err = dmu_free_long_range(rwa->os, drro->drr_object,
+ (drro->drr_maxblkid + 1) * doi->doi_data_block_size,
+ DMU_OBJECT_END);
+ if (err != 0)
+ return (SET_ERROR(EINVAL));
+ }
+ return (0);
+}
+
noinline static int
receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
void *data)
{
dmu_object_info_t doi;
dmu_tx_t *tx;
- uint64_t object;
int err;
+ uint32_t new_blksz = drro->drr_blksz;
uint8_t dn_slots = drro->drr_dn_slots != 0 ?
drro->drr_dn_slots : DNODE_MIN_SLOTS;
* Raw receives will also check that the indirect structure of the
* dnode hasn't changed.
*/
+ uint64_t object_to_hold;
if (err == 0) {
- uint32_t indblksz = drro->drr_indblkshift ?
- 1ULL << drro->drr_indblkshift : 0;
- int nblkptr = deduce_nblkptr(drro->drr_bonustype,
- drro->drr_bonuslen);
- boolean_t did_free = B_FALSE;
-
- object = drro->drr_object;
-
- /* nblkptr should be bounded by the bonus size and type */
- if (rwa->raw && nblkptr != drro->drr_nblkptr)
- return (SET_ERROR(EINVAL));
-
- /*
- * Check for indicators that the object was freed and
- * reallocated. For all sends, these indicators are:
- * - A changed block size
- * - A smaller nblkptr
- * - A changed dnode size
- * For raw sends we also check a few other fields to
- * ensure we are preserving the objset structure exactly
- * as it was on the receive side:
- * - A changed indirect block size
- * - A smaller nlevels
- */
- if (drro->drr_blksz != doi.doi_data_block_size ||
- nblkptr < doi.doi_nblkptr ||
- dn_slots != doi.doi_dnodesize >> DNODE_SHIFT ||
- (rwa->raw &&
- (indblksz != doi.doi_metadata_block_size ||
- drro->drr_nlevels < doi.doi_indirection))) {
- err = dmu_free_long_range(rwa->os, drro->drr_object,
- 0, DMU_OBJECT_END);
- if (err != 0)
- return (SET_ERROR(EINVAL));
- else
- did_free = B_TRUE;
- }
-
- /*
- * The dmu does not currently support decreasing nlevels
- * or changing the number of dnode slots on an object. For
- * non-raw sends, this does not matter and the new object
- * can just use the previous one's nlevels. For raw sends,
- * however, the structure of the received dnode (including
- * nlevels and dnode slots) must match that of the send
- * side. Therefore, instead of using dmu_object_reclaim(),
- * we must free the object completely and call
- * dmu_object_claim_dnsize() instead.
- */
- if ((rwa->raw && drro->drr_nlevels < doi.doi_indirection) ||
- dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
- err = dmu_free_long_object(rwa->os, drro->drr_object);
- if (err != 0)
- return (SET_ERROR(EINVAL));
-
- txg_wait_synced(dmu_objset_pool(rwa->os), 0);
- object = DMU_NEW_OBJECT;
- }
-
- /*
- * For raw receives, free everything beyond the new incoming
- * maxblkid. Normally this would be done with a DRR_FREE
- * record that would come after this DRR_OBJECT record is
- * processed. However, for raw receives we manually set the
- * maxblkid from the drr_maxblkid and so we must first free
- * everything above that blkid to ensure the DMU is always
- * consistent with itself. We will never free the first block
- * of the object here because a maxblkid of 0 could indicate
- * an object with a single block or one with no blocks. This
- * free may be skipped when dmu_free_long_range() was called
- * above since it covers the entire object's contents.
- */
- if (rwa->raw && object != DMU_NEW_OBJECT && !did_free) {
- err = dmu_free_long_range(rwa->os, drro->drr_object,
- (drro->drr_maxblkid + 1) * doi.doi_data_block_size,
- DMU_OBJECT_END);
- if (err != 0)
- return (SET_ERROR(EINVAL));
- }
+ err = receive_handle_existing_object(rwa, drro, &doi, data,
+ &object_to_hold, &new_blksz);
+ if (err != 0)
+ return (err);
} else if (err == EEXIST) {
/*
* The object requested is currently an interior slot of a
return (SET_ERROR(EINVAL));
/* object was freed and we are about to allocate a new one */
- object = DMU_NEW_OBJECT;
+ object_to_hold = DMU_NEW_OBJECT;
} else {
+ /*
+ * If the only record in this range so far was DRR_FREEOBJECTS
+ * with at least one actually freed object, it's possible that
+ * the block will now be converted to a hole. We need to wait
+ * for the txg to sync to prevent races.
+ */
+ if (rwa->or_need_sync == ORNS_YES)
+ txg_wait_synced(dmu_objset_pool(rwa->os), 0);
+
/* object is free and we are about to allocate a new one */
- object = DMU_NEW_OBJECT;
+ object_to_hold = DMU_NEW_OBJECT;
}
+ /* Only relevant for the first object in the range */
+ rwa->or_need_sync = ORNS_NO;
+
/*
* If this is a multi-slot dnode there is a chance that this
* object will expand into a slot that is already used by
}
tx = dmu_tx_create(rwa->os);
- dmu_tx_hold_bonus(tx, object);
- dmu_tx_hold_write(tx, object, 0, 0);
+ dmu_tx_hold_bonus(tx, object_to_hold);
+ dmu_tx_hold_write(tx, object_to_hold, 0, 0);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
return (err);
}
- if (object == DMU_NEW_OBJECT) {
+ if (object_to_hold == DMU_NEW_OBJECT) {
/* Currently free, wants to be allocated */
err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
+ drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, tx);
} else if (drro->drr_type != doi.doi_type ||
- drro->drr_blksz != doi.doi_data_block_size ||
+ new_blksz != doi.doi_data_block_size ||
drro->drr_bonustype != doi.doi_bonus_type ||
drro->drr_bonuslen != doi.doi_bonus_size) {
/* Currently allocated, but with different properties */
err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
- drro->drr_type, drro->drr_blksz,
+ drro->drr_type, new_blksz,
drro->drr_bonustype, drro->drr_bonuslen,
dn_slots << DNODE_SHIFT, rwa->spill ?
DRR_OBJECT_HAS_SPILL(drro->drr_flags) : B_FALSE, tx);
* For non-new objects block size and indirect block
* shift cannot change and nlevels can only increase.
*/
+ ASSERT3U(new_blksz, ==, drro->drr_blksz);
VERIFY0(dmu_object_set_blocksize(rwa->os, drro->drr_object,
drro->drr_blksz, drro->drr_indblkshift, tx));
VERIFY0(dmu_object_set_nlevels(rwa->os, drro->drr_object,
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
- bcopy(data, db->db_data, DRR_OBJECT_PAYLOAD_SIZE(drro));
+ memcpy(db->db_data, data, DRR_OBJECT_PAYLOAD_SIZE(drro));
/*
* Raw bonus buffers have their byteorder determined by the
dmu_buf_rele(db, FTAG);
dnode_rele(dn, FTAG);
}
+
+ /*
+ * If the receive fails, we want the resume stream to start with the
+ * same record that we last successfully received. There is no way to
+ * request resume from the object record, but we can benefit from the
+ * fact that sender always sends object record before anything else,
+ * after which it will "resend" data at offset 0 and resume normally.
+ */
+ save_resume_state(rwa, drro->drr_object, 0, tx);
+
dmu_tx_commit(tx);
return (0);
}
-/* ARGSUSED */
noinline static int
receive_freeobjects(struct receive_writer_arg *rwa,
struct drr_freeobjects *drrfo)
if (err != 0)
return (err);
+
+ if (rwa->or_need_sync == ORNS_MAYBE)
+ rwa->or_need_sync = ORNS_YES;
}
if (next_err != ESRCH)
return (next_err);
return (0);
}
-noinline static int
-receive_write(struct receive_writer_arg *rwa, struct drr_write *drrw,
- arc_buf_t *abuf)
+/*
+ * Note: if this fails, the caller will clean up any records left on the
+ * rwa->write_batch list.
+ */
+static int
+flush_write_batch_impl(struct receive_writer_arg *rwa)
{
- int err;
- dmu_tx_t *tx;
dnode_t *dn;
+ int err;
- if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
- !DMU_OT_IS_VALID(drrw->drr_type))
+ if (dnode_hold(rwa->os, rwa->last_object, FTAG, &dn) != 0)
return (SET_ERROR(EINVAL));
- /*
- * For resuming to work, records must be in increasing order
- * by (object, offset).
- */
- if (drrw->drr_object < rwa->last_object ||
- (drrw->drr_object == rwa->last_object &&
- drrw->drr_offset < rwa->last_offset)) {
- return (SET_ERROR(EINVAL));
- }
- rwa->last_object = drrw->drr_object;
- rwa->last_offset = drrw->drr_offset;
+ struct receive_record_arg *last_rrd = list_tail(&rwa->write_batch);
+ struct drr_write *last_drrw = &last_rrd->header.drr_u.drr_write;
- if (rwa->last_object > rwa->max_object)
- rwa->max_object = rwa->last_object;
+ struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
+ struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
- if (dmu_object_info(rwa->os, drrw->drr_object, NULL) != 0)
- return (SET_ERROR(EINVAL));
+ ASSERT3U(rwa->last_object, ==, last_drrw->drr_object);
+ ASSERT3U(rwa->last_offset, ==, last_drrw->drr_offset);
- tx = dmu_tx_create(rwa->os);
- dmu_tx_hold_write(tx, drrw->drr_object,
- drrw->drr_offset, drrw->drr_logical_size);
+ dmu_tx_t *tx = dmu_tx_create(rwa->os);
+ dmu_tx_hold_write_by_dnode(tx, dn, first_drrw->drr_offset,
+ last_drrw->drr_offset - first_drrw->drr_offset +
+ last_drrw->drr_logical_size);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
+ dnode_rele(dn, FTAG);
return (err);
}
- if (rwa->byteswap && !arc_is_encrypted(abuf) &&
- arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
- dmu_object_byteswap_t byteswap =
- DMU_OT_BYTESWAP(drrw->drr_type);
- dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
- DRR_WRITE_PAYLOAD_SIZE(drrw));
- }
+ struct receive_record_arg *rrd;
+ while ((rrd = list_head(&rwa->write_batch)) != NULL) {
+ struct drr_write *drrw = &rrd->header.drr_u.drr_write;
+ abd_t *abd = rrd->abd;
- /* use the bonus buf to look up the dnode in dmu_assign_arcbuf */
- VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
- err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
- if (err != 0) {
- dnode_rele(dn, FTAG);
- dmu_tx_commit(tx);
- return (err);
+ ASSERT3U(drrw->drr_object, ==, rwa->last_object);
+
+ if (drrw->drr_logical_size != dn->dn_datablksz) {
+ /*
+ * The WRITE record is larger than the object's block
+ * size. We must be receiving an incremental
+ * large-block stream into a dataset that previously did
+ * a non-large-block receive. Lightweight writes must
+ * be exactly one block, so we need to decompress the
+ * data (if compressed) and do a normal dmu_write().
+ */
+ ASSERT3U(drrw->drr_logical_size, >, dn->dn_datablksz);
+ if (DRR_WRITE_COMPRESSED(drrw)) {
+ abd_t *decomp_abd =
+ abd_alloc_linear(drrw->drr_logical_size,
+ B_FALSE);
+
+ err = zio_decompress_data(
+ drrw->drr_compressiontype,
+ abd, abd_to_buf(decomp_abd),
+ abd_get_size(abd),
+ abd_get_size(decomp_abd), NULL);
+
+ if (err == 0) {
+ dmu_write_by_dnode(dn,
+ drrw->drr_offset,
+ drrw->drr_logical_size,
+ abd_to_buf(decomp_abd), tx);
+ }
+ abd_free(decomp_abd);
+ } else {
+ dmu_write_by_dnode(dn,
+ drrw->drr_offset,
+ drrw->drr_logical_size,
+ abd_to_buf(abd), tx);
+ }
+ if (err == 0)
+ abd_free(abd);
+ } else {
+ zio_prop_t zp = {0};
+ dmu_write_policy(rwa->os, dn, 0, 0, &zp);
+
+ zio_flag_t zio_flags = 0;
+
+ if (rwa->raw) {
+ zp.zp_encrypt = B_TRUE;
+ zp.zp_compress = drrw->drr_compressiontype;
+ zp.zp_byteorder = ZFS_HOST_BYTEORDER ^
+ !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
+ rwa->byteswap;
+ memcpy(zp.zp_salt, drrw->drr_salt,
+ ZIO_DATA_SALT_LEN);
+ memcpy(zp.zp_iv, drrw->drr_iv,
+ ZIO_DATA_IV_LEN);
+ memcpy(zp.zp_mac, drrw->drr_mac,
+ ZIO_DATA_MAC_LEN);
+ if (DMU_OT_IS_ENCRYPTED(zp.zp_type)) {
+ zp.zp_nopwrite = B_FALSE;
+ zp.zp_copies = MIN(zp.zp_copies,
+ SPA_DVAS_PER_BP - 1);
+ }
+ zio_flags |= ZIO_FLAG_RAW;
+ } else if (DRR_WRITE_COMPRESSED(drrw)) {
+ ASSERT3U(drrw->drr_compressed_size, >, 0);
+ ASSERT3U(drrw->drr_logical_size, >=,
+ drrw->drr_compressed_size);
+ zp.zp_compress = drrw->drr_compressiontype;
+ zio_flags |= ZIO_FLAG_RAW_COMPRESS;
+ } else if (rwa->byteswap) {
+ /*
+ * Note: compressed blocks never need to be
+ * byteswapped, because WRITE records for
+ * metadata blocks are never compressed. The
+ * exception is raw streams, which are written
+ * in the original byteorder, and the byteorder
+ * bit is preserved in the BP by setting
+ * zp_byteorder above.
+ */
+ dmu_object_byteswap_t byteswap =
+ DMU_OT_BYTESWAP(drrw->drr_type);
+ dmu_ot_byteswap[byteswap].ob_func(
+ abd_to_buf(abd),
+ DRR_WRITE_PAYLOAD_SIZE(drrw));
+ }
+
+ /*
+ * Since this data can't be read until the receive
+ * completes, we can do a "lightweight" write for
+ * improved performance.
+ */
+ err = dmu_lightweight_write_by_dnode(dn,
+ drrw->drr_offset, abd, &zp, zio_flags, tx);
+ }
+
+ if (err != 0) {
+ /*
+ * This rrd is left on the list, so the caller will
+ * free it (and the abd).
+ */
+ break;
+ }
+
+ /*
+ * Note: If the receive fails, we want the resume stream to
+ * start with the same record that we last successfully
+ * received (as opposed to the next record), so that we can
+ * verify that we are resuming from the correct location.
+ */
+ save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
+
+ list_remove(&rwa->write_batch, rrd);
+ kmem_free(rrd, sizeof (*rrd));
}
- dnode_rele(dn, FTAG);
- /*
- * Note: If the receive fails, we want the resume stream to start
- * with the same record that we last successfully received (as opposed
- * to the next record), so that we can verify that we are
- * resuming from the correct location.
- */
- save_resume_state(rwa, drrw->drr_object, drrw->drr_offset, tx);
dmu_tx_commit(tx);
-
- return (0);
+ dnode_rele(dn, FTAG);
+ return (err);
}
-/*
- * Handle a DRR_WRITE_BYREF record. This record is used in dedup'ed
- * streams to refer to a copy of the data that is already on the
- * system because it came in earlier in the stream. This function
- * finds the earlier copy of the data, and uses that copy instead of
- * data from the stream to fulfill this write.
- */
noinline static int
-receive_write_byref(struct receive_writer_arg *rwa,
- struct drr_write_byref *drrwbr)
+flush_write_batch(struct receive_writer_arg *rwa)
{
- dmu_tx_t *tx;
- int err;
- guid_map_entry_t gmesrch;
- guid_map_entry_t *gmep;
- avl_index_t where;
- objset_t *ref_os = NULL;
- int flags = DMU_READ_PREFETCH;
- dmu_buf_t *dbp;
-
- if (drrwbr->drr_offset + drrwbr->drr_length < drrwbr->drr_offset)
- return (SET_ERROR(EINVAL));
-
- /*
- * If the GUID of the referenced dataset is different from the
- * GUID of the target dataset, find the referenced dataset.
- */
- if (drrwbr->drr_toguid != drrwbr->drr_refguid) {
- gmesrch.guid = drrwbr->drr_refguid;
- if ((gmep = avl_find(rwa->guid_to_ds_map, &gmesrch,
- &where)) == NULL) {
- return (SET_ERROR(EINVAL));
+ if (list_is_empty(&rwa->write_batch))
+ return (0);
+ int err = rwa->err;
+ if (err == 0)
+ err = flush_write_batch_impl(rwa);
+ if (err != 0) {
+ struct receive_record_arg *rrd;
+ while ((rrd = list_remove_head(&rwa->write_batch)) != NULL) {
+ abd_free(rrd->abd);
+ kmem_free(rrd, sizeof (*rrd));
}
- if (dmu_objset_from_ds(gmep->gme_ds, &ref_os))
- return (SET_ERROR(EINVAL));
- } else {
- ref_os = rwa->os;
}
+ ASSERT(list_is_empty(&rwa->write_batch));
+ return (err);
+}
- if (drrwbr->drr_object > rwa->max_object)
- rwa->max_object = drrwbr->drr_object;
+noinline static int
+receive_process_write_record(struct receive_writer_arg *rwa,
+ struct receive_record_arg *rrd)
+{
+ int err = 0;
- if (rwa->raw)
- flags |= DMU_READ_NO_DECRYPT;
+ ASSERT3U(rrd->header.drr_type, ==, DRR_WRITE);
+ struct drr_write *drrw = &rrd->header.drr_u.drr_write;
- /* may return either a regular db or an encrypted one */
- err = dmu_buf_hold(ref_os, drrwbr->drr_refobject,
- drrwbr->drr_refoffset, FTAG, &dbp, flags);
- if (err != 0)
- return (err);
+ if (drrw->drr_offset + drrw->drr_logical_size < drrw->drr_offset ||
+ !DMU_OT_IS_VALID(drrw->drr_type))
+ return (SET_ERROR(EINVAL));
- tx = dmu_tx_create(rwa->os);
+ if (rwa->heal) {
+ blkptr_t *bp;
+ dmu_buf_t *dbp;
+ int flags = DB_RF_CANFAIL;
- dmu_tx_hold_write(tx, drrwbr->drr_object,
- drrwbr->drr_offset, drrwbr->drr_length);
- err = dmu_tx_assign(tx, TXG_WAIT);
- if (err != 0) {
- dmu_tx_abort(tx);
+ if (rwa->raw)
+ flags |= DB_RF_NO_DECRYPT;
+
+ if (rwa->byteswap) {
+ dmu_object_byteswap_t byteswap =
+ DMU_OT_BYTESWAP(drrw->drr_type);
+ dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(rrd->abd),
+ DRR_WRITE_PAYLOAD_SIZE(drrw));
+ }
+
+ err = dmu_buf_hold_noread(rwa->os, drrw->drr_object,
+ drrw->drr_offset, FTAG, &dbp);
+ if (err != 0)
+ return (err);
+
+ /* Try to read the object to see if it needs healing */
+ err = dbuf_read((dmu_buf_impl_t *)dbp, NULL, flags);
+ /*
+ * We only try to heal when dbuf_read() returns a ECKSUMs.
+ * Other errors (even EIO) get returned to caller.
+ * EIO indicates that the device is not present/accessible,
+ * so writing to it will likely fail.
+ * If the block is healthy, we don't want to overwrite it
+ * unnecessarily.
+ */
+ if (err != ECKSUM) {
+ dmu_buf_rele(dbp, FTAG);
+ return (err);
+ }
+ /* Make sure the on-disk block and recv record sizes match */
+ if (drrw->drr_logical_size != dbp->db_size) {
+ err = ENOTSUP;
+ dmu_buf_rele(dbp, FTAG);
+ return (err);
+ }
+ /* Get the block pointer for the corrupted block */
+ bp = dmu_buf_get_blkptr(dbp);
+ err = do_corrective_recv(rwa, drrw, rrd, bp);
+ dmu_buf_rele(dbp, FTAG);
return (err);
}
- if (rwa->raw) {
- dmu_copy_from_buf(rwa->os, drrwbr->drr_object,
- drrwbr->drr_offset, dbp, tx);
- } else {
- dmu_write(rwa->os, drrwbr->drr_object,
- drrwbr->drr_offset, drrwbr->drr_length, dbp->db_data, tx);
+ /*
+ * For resuming to work, records must be in increasing order
+ * by (object, offset).
+ */
+ if (drrw->drr_object < rwa->last_object ||
+ (drrw->drr_object == rwa->last_object &&
+ drrw->drr_offset < rwa->last_offset)) {
+ return (SET_ERROR(EINVAL));
}
- dmu_buf_rele(dbp, FTAG);
- /* See comment in restore_write. */
- save_resume_state(rwa, drrwbr->drr_object, drrwbr->drr_offset, tx);
- dmu_tx_commit(tx);
- return (0);
+ struct receive_record_arg *first_rrd = list_head(&rwa->write_batch);
+ struct drr_write *first_drrw = &first_rrd->header.drr_u.drr_write;
+ uint64_t batch_size =
+ MIN(zfs_recv_write_batch_size, DMU_MAX_ACCESS / 2);
+ if (first_rrd != NULL &&
+ (drrw->drr_object != first_drrw->drr_object ||
+ drrw->drr_offset >= first_drrw->drr_offset + batch_size)) {
+ err = flush_write_batch(rwa);
+ if (err != 0)
+ return (err);
+ }
+
+ rwa->last_object = drrw->drr_object;
+ rwa->last_offset = drrw->drr_offset;
+
+ if (rwa->last_object > rwa->max_object)
+ rwa->max_object = rwa->last_object;
+
+ list_insert_tail(&rwa->write_batch, rrd);
+ /*
+ * Return EAGAIN to indicate that we will use this rrd again,
+ * so the caller should not free it
+ */
+ return (EAGAIN);
}
static int
static int
receive_spill(struct receive_writer_arg *rwa, struct drr_spill *drrs,
- arc_buf_t *abuf)
+ abd_t *abd)
{
- dmu_tx_t *tx;
dmu_buf_t *db, *db_spill;
int err;
* the DRR_FLAG_SPILL_BLOCK flag.
*/
if (rwa->spill && DRR_SPILL_IS_UNMODIFIED(drrs->drr_flags)) {
- dmu_return_arcbuf(abuf);
+ abd_free(abd);
return (0);
}
return (err);
}
- tx = dmu_tx_create(rwa->os);
+ dmu_tx_t *tx = dmu_tx_create(rwa->os);
dmu_tx_hold_spill(tx, db->db_object);
* size of the provided arc_buf_t.
*/
if (db_spill->db_size != drrs->drr_length) {
- dmu_buf_will_fill(db_spill, tx);
- VERIFY(0 == dbuf_spill_set_blksz(db_spill,
+ dmu_buf_will_fill(db_spill, tx, B_FALSE);
+ VERIFY0(dbuf_spill_set_blksz(db_spill,
drrs->drr_length, tx));
}
- if (rwa->byteswap && !arc_is_encrypted(abuf) &&
- arc_get_compression(abuf) == ZIO_COMPRESS_OFF) {
- dmu_object_byteswap_t byteswap =
- DMU_OT_BYTESWAP(drrs->drr_type);
- dmu_ot_byteswap[byteswap].ob_func(abuf->b_data,
- DRR_SPILL_PAYLOAD_SIZE(drrs));
+ arc_buf_t *abuf;
+ if (rwa->raw) {
+ boolean_t byteorder = ZFS_HOST_BYTEORDER ^
+ !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
+ rwa->byteswap;
+
+ abuf = arc_loan_raw_buf(dmu_objset_spa(rwa->os),
+ drrs->drr_object, byteorder, drrs->drr_salt,
+ drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
+ drrs->drr_compressed_size, drrs->drr_length,
+ drrs->drr_compressiontype, 0);
+ } else {
+ abuf = arc_loan_buf(dmu_objset_spa(rwa->os),
+ DMU_OT_IS_METADATA(drrs->drr_type),
+ drrs->drr_length);
+ if (rwa->byteswap) {
+ dmu_object_byteswap_t byteswap =
+ DMU_OT_BYTESWAP(drrs->drr_type);
+ dmu_ot_byteswap[byteswap].ob_func(abd_to_buf(abd),
+ DRR_SPILL_PAYLOAD_SIZE(drrs));
+ }
}
+ memcpy(abuf->b_data, abd_to_buf(abd), DRR_SPILL_PAYLOAD_SIZE(drrs));
+ abd_free(abd);
dbuf_assign_arcbuf((dmu_buf_impl_t *)db_spill, abuf, tx);
dmu_buf_rele(db, FTAG);
return (0);
}
-/* ARGSUSED */
noinline static int
receive_free(struct receive_writer_arg *rwa, struct drr_free *drrf)
{
rwa->or_crypt_params_present = B_TRUE;
rwa->or_firstobj = drror->drr_firstobj;
rwa->or_numslots = drror->drr_numslots;
- bcopy(drror->drr_salt, rwa->or_salt, ZIO_DATA_SALT_LEN);
- bcopy(drror->drr_iv, rwa->or_iv, ZIO_DATA_IV_LEN);
- bcopy(drror->drr_mac, rwa->or_mac, ZIO_DATA_MAC_LEN);
+ memcpy(rwa->or_salt, drror->drr_salt, ZIO_DATA_SALT_LEN);
+ memcpy(rwa->or_iv, drror->drr_iv, ZIO_DATA_IV_LEN);
+ memcpy(rwa->or_mac, drror->drr_mac, ZIO_DATA_MAC_LEN);
rwa->or_byteorder = byteorder;
+ rwa->or_need_sync = ORNS_MAYBE;
+
return (0);
}
* Until we have the ability to redact large ranges of data efficiently, we
* process these records as frees.
*/
-/* ARGSUSED */
noinline static int
receive_redact(struct receive_writer_arg *rwa, struct drr_redact *drrr)
{
dmu_recv_cleanup_ds(dmu_recv_cookie_t *drc)
{
dsl_dataset_t *ds = drc->drc_ds;
- ds_hold_flags_t dsflags = (drc->drc_raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
+ ds_hold_flags_t dsflags;
+ dsflags = (drc->drc_raw) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
/*
* Wait for the txg sync before cleaning up the receive. For
* resumable receives, this ensures that our resume state has
ds->ds_objset->os_raw_receive = B_FALSE;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
- if (drc->drc_resumable && !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
+ if (drc->drc_resumable && drc->drc_should_save &&
+ !BP_IS_HOLE(dsl_dataset_get_blkptr(ds))) {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
} else {
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dataset_name(ds, name);
dsl_dataset_disown(ds, dsflags, dmu_recv_tag);
- (void) dsl_destroy_head(name);
+ if (!drc->drc_heal)
+ (void) dsl_destroy_head(name);
}
}
* numbers in the ignore list. In practice, we receive up to 32 object records
* before receiving write records, so the list can have up to 32 nodes in it.
*/
-/* ARGSUSED */
static void
receive_read_prefetch(dmu_recv_cookie_t *drc, uint64_t object, uint64_t offset,
uint64_t length)
case DRR_WRITE:
{
struct drr_write *drrw = &drc->drc_rrd->header.drr_u.drr_write;
- arc_buf_t *abuf;
- boolean_t is_meta = DMU_OT_IS_METADATA(drrw->drr_type);
-
- if (drc->drc_raw) {
- boolean_t byteorder = ZFS_HOST_BYTEORDER ^
- !!DRR_IS_RAW_BYTESWAPPED(drrw->drr_flags) ^
- drc->drc_byteswap;
-
- abuf = arc_loan_raw_buf(dmu_objset_spa(drc->drc_os),
- drrw->drr_object, byteorder, drrw->drr_salt,
- drrw->drr_iv, drrw->drr_mac, drrw->drr_type,
- drrw->drr_compressed_size, drrw->drr_logical_size,
- drrw->drr_compressiontype);
- } else if (DRR_WRITE_COMPRESSED(drrw)) {
- ASSERT3U(drrw->drr_compressed_size, >, 0);
- ASSERT3U(drrw->drr_logical_size, >=,
- drrw->drr_compressed_size);
- ASSERT(!is_meta);
- abuf = arc_loan_compressed_buf(
- dmu_objset_spa(drc->drc_os),
- drrw->drr_compressed_size, drrw->drr_logical_size,
- drrw->drr_compressiontype);
- } else {
- abuf = arc_loan_buf(dmu_objset_spa(drc->drc_os),
- is_meta, drrw->drr_logical_size);
- }
-
- err = receive_read_payload_and_next_header(drc,
- DRR_WRITE_PAYLOAD_SIZE(drrw), abuf->b_data);
+ int size = DRR_WRITE_PAYLOAD_SIZE(drrw);
+ abd_t *abd = abd_alloc_linear(size, B_FALSE);
+ err = receive_read_payload_and_next_header(drc, size,
+ abd_to_buf(abd));
if (err != 0) {
- dmu_return_arcbuf(abuf);
+ abd_free(abd);
return (err);
}
- drc->drc_rrd->arc_buf = abuf;
+ drc->drc_rrd->abd = abd;
receive_read_prefetch(drc, drrw->drr_object, drrw->drr_offset,
drrw->drr_logical_size);
return (err);
}
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref *drrwb =
- &drc->drc_rrd->header.drr_u.drr_write_byref;
- err = receive_read_payload_and_next_header(drc, 0, NULL);
- receive_read_prefetch(drc, drrwb->drr_object, drrwb->drr_offset,
- drrwb->drr_length);
- return (err);
- }
case DRR_WRITE_EMBEDDED:
{
struct drr_write_embedded *drrwe =
case DRR_SPILL:
{
struct drr_spill *drrs = &drc->drc_rrd->header.drr_u.drr_spill;
- arc_buf_t *abuf;
- /* DRR_SPILL records are either raw or uncompressed */
- if (drc->drc_raw) {
- boolean_t byteorder = ZFS_HOST_BYTEORDER ^
- !!DRR_IS_RAW_BYTESWAPPED(drrs->drr_flags) ^
- drc->drc_byteswap;
-
- abuf = arc_loan_raw_buf(dmu_objset_spa(drc->drc_os),
- drrs->drr_object, byteorder, drrs->drr_salt,
- drrs->drr_iv, drrs->drr_mac, drrs->drr_type,
- drrs->drr_compressed_size, drrs->drr_length,
- drrs->drr_compressiontype);
- } else {
- abuf = arc_loan_buf(dmu_objset_spa(drc->drc_os),
- DMU_OT_IS_METADATA(drrs->drr_type),
- drrs->drr_length);
- }
- err = receive_read_payload_and_next_header(drc,
- DRR_SPILL_PAYLOAD_SIZE(drrs), abuf->b_data);
+ int size = DRR_SPILL_PAYLOAD_SIZE(drrs);
+ abd_t *abd = abd_alloc_linear(size, B_FALSE);
+ err = receive_read_payload_and_next_header(drc, size,
+ abd_to_buf(abd));
if (err != 0)
- dmu_return_arcbuf(abuf);
+ abd_free(abd);
else
- drc->drc_rrd->arc_buf = abuf;
+ drc->drc_rrd->abd = abd;
return (err);
}
case DRR_OBJECT_RANGE:
dprintf("drr_type = OBJECT obj = %llu type = %u "
"bonustype = %u blksz = %u bonuslen = %u cksumtype = %u "
"compress = %u dn_slots = %u err = %d\n",
- drro->drr_object, drro->drr_type, drro->drr_bonustype,
- drro->drr_blksz, drro->drr_bonuslen,
+ (u_longlong_t)drro->drr_object, drro->drr_type,
+ drro->drr_bonustype, drro->drr_blksz, drro->drr_bonuslen,
drro->drr_checksumtype, drro->drr_compress,
drro->drr_dn_slots, err);
break;
&rrd->header.drr_u.drr_freeobjects;
dprintf("drr_type = FREEOBJECTS firstobj = %llu "
"numobjs = %llu err = %d\n",
- drrfo->drr_firstobj, drrfo->drr_numobjs, err);
+ (u_longlong_t)drrfo->drr_firstobj,
+ (u_longlong_t)drrfo->drr_numobjs, err);
break;
}
case DRR_WRITE:
dprintf("drr_type = WRITE obj = %llu type = %u offset = %llu "
"lsize = %llu cksumtype = %u flags = %u "
"compress = %u psize = %llu err = %d\n",
- drrw->drr_object, drrw->drr_type, drrw->drr_offset,
- drrw->drr_logical_size, drrw->drr_checksumtype,
- drrw->drr_flags, drrw->drr_compressiontype,
- drrw->drr_compressed_size, err);
+ (u_longlong_t)drrw->drr_object, drrw->drr_type,
+ (u_longlong_t)drrw->drr_offset,
+ (u_longlong_t)drrw->drr_logical_size,
+ drrw->drr_checksumtype, drrw->drr_flags,
+ drrw->drr_compressiontype,
+ (u_longlong_t)drrw->drr_compressed_size, err);
break;
}
case DRR_WRITE_BYREF:
"length = %llu toguid = %llx refguid = %llx "
"refobject = %llu refoffset = %llu cksumtype = %u "
"flags = %u err = %d\n",
- drrwbr->drr_object, drrwbr->drr_offset,
- drrwbr->drr_length, drrwbr->drr_toguid,
- drrwbr->drr_refguid, drrwbr->drr_refobject,
- drrwbr->drr_refoffset, drrwbr->drr_checksumtype,
- drrwbr->drr_flags, err);
+ (u_longlong_t)drrwbr->drr_object,
+ (u_longlong_t)drrwbr->drr_offset,
+ (u_longlong_t)drrwbr->drr_length,
+ (u_longlong_t)drrwbr->drr_toguid,
+ (u_longlong_t)drrwbr->drr_refguid,
+ (u_longlong_t)drrwbr->drr_refobject,
+ (u_longlong_t)drrwbr->drr_refoffset,
+ drrwbr->drr_checksumtype, drrwbr->drr_flags, err);
break;
}
case DRR_WRITE_EMBEDDED:
dprintf("drr_type = WRITE_EMBEDDED obj = %llu offset = %llu "
"length = %llu compress = %u etype = %u lsize = %u "
"psize = %u err = %d\n",
- drrwe->drr_object, drrwe->drr_offset, drrwe->drr_length,
+ (u_longlong_t)drrwe->drr_object,
+ (u_longlong_t)drrwe->drr_offset,
+ (u_longlong_t)drrwe->drr_length,
drrwe->drr_compression, drrwe->drr_etype,
drrwe->drr_lsize, drrwe->drr_psize, err);
break;
struct drr_free *drrf = &rrd->header.drr_u.drr_free;
dprintf("drr_type = FREE obj = %llu offset = %llu "
"length = %lld err = %d\n",
- drrf->drr_object, drrf->drr_offset, drrf->drr_length,
+ (u_longlong_t)drrf->drr_object,
+ (u_longlong_t)drrf->drr_offset,
+ (longlong_t)drrf->drr_length,
err);
break;
}
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
dprintf("drr_type = SPILL obj = %llu length = %llu "
- "err = %d\n", drrs->drr_object, drrs->drr_length, err);
+ "err = %d\n", (u_longlong_t)drrs->drr_object,
+ (u_longlong_t)drrs->drr_length, err);
break;
}
case DRR_OBJECT_RANGE:
&rrd->header.drr_u.drr_object_range;
dprintf("drr_type = OBJECT_RANGE firstobj = %llu "
"numslots = %llu flags = %u err = %d\n",
- drror->drr_firstobj, drror->drr_numslots,
+ (u_longlong_t)drror->drr_firstobj,
+ (u_longlong_t)drror->drr_numslots,
drror->drr_flags, err);
break;
}
ASSERT3U(rrd->bytes_read, >=, rwa->bytes_read);
rwa->bytes_read = rrd->bytes_read;
+ /* We can only heal write records; other ones get ignored */
+ if (rwa->heal && rrd->header.drr_type != DRR_WRITE) {
+ if (rrd->abd != NULL) {
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
+ } else if (rrd->payload != NULL) {
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ }
+ return (0);
+ }
+
+ if (!rwa->heal && rrd->header.drr_type != DRR_WRITE) {
+ err = flush_write_batch(rwa);
+ if (err != 0) {
+ if (rrd->abd != NULL) {
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
+ rrd->payload = NULL;
+ } else if (rrd->payload != NULL) {
+ kmem_free(rrd->payload, rrd->payload_size);
+ rrd->payload = NULL;
+ }
+
+ return (err);
+ }
+ }
+
switch (rrd->header.drr_type) {
case DRR_OBJECT:
{
}
case DRR_WRITE:
{
- struct drr_write *drrw = &rrd->header.drr_u.drr_write;
- err = receive_write(rwa, drrw, rrd->arc_buf);
- /* if receive_write() is successful, it consumes the arc_buf */
- if (err != 0)
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
- rrd->payload = NULL;
- break;
- }
- case DRR_WRITE_BYREF:
- {
- struct drr_write_byref *drrwbr =
- &rrd->header.drr_u.drr_write_byref;
- err = receive_write_byref(rwa, drrwbr);
+ err = receive_process_write_record(rwa, rrd);
+ if (rwa->heal) {
+ /*
+ * If healing - always free the abd after processing
+ */
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
+ } else if (err != EAGAIN) {
+ /*
+ * On success, a non-healing
+ * receive_process_write_record() returns
+ * EAGAIN to indicate that we do not want to free
+ * the rrd or arc_buf.
+ */
+ ASSERT(err != 0);
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
+ }
break;
}
case DRR_WRITE_EMBEDDED:
case DRR_SPILL:
{
struct drr_spill *drrs = &rrd->header.drr_u.drr_spill;
- err = receive_spill(rwa, drrs, rrd->arc_buf);
+ err = receive_spill(rwa, drrs, rrd->abd);
if (err != 0)
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
rrd->payload = NULL;
break;
}
* dmu_recv_stream's worker thread; pull records off the queue, and then call
* receive_process_record When we're done, signal the main thread and exit.
*/
-static void
+static __attribute__((noreturn)) void
receive_writer_thread(void *arg)
{
struct receive_writer_arg *rwa = arg;
* on the queue, but we need to clear everything in it before we
* can exit.
*/
+ int err = 0;
if (rwa->err == 0) {
- rwa->err = receive_process_record(rwa, rrd);
- } else if (rrd->arc_buf != NULL) {
- dmu_return_arcbuf(rrd->arc_buf);
- rrd->arc_buf = NULL;
+ err = receive_process_record(rwa, rrd);
+ } else if (rrd->abd != NULL) {
+ abd_free(rrd->abd);
+ rrd->abd = NULL;
rrd->payload = NULL;
} else if (rrd->payload != NULL) {
kmem_free(rrd->payload, rrd->payload_size);
rrd->payload = NULL;
}
- kmem_free(rrd, sizeof (*rrd));
+ /*
+ * EAGAIN indicates that this record has been saved (on
+ * raw->write_batch), and will be used again, so we don't
+ * free it.
+ * When healing data we always need to free the record.
+ */
+ if (err != EAGAIN || rwa->heal) {
+ if (rwa->err == 0)
+ rwa->err = err;
+ kmem_free(rrd, sizeof (*rrd));
+ }
}
kmem_free(rrd, sizeof (*rrd));
+
+ if (rwa->heal) {
+ zio_wait(rwa->heal_pio);
+ } else {
+ int err = flush_write_batch(rwa);
+ if (rwa->err == 0)
+ rwa->err = err;
+ }
mutex_enter(&rwa->mutex);
rwa->done = B_TRUE;
cv_signal(&rwa->cv);
* NB: callers *must* call dmu_recv_end() if this succeeds.
*/
int
-dmu_recv_stream(dmu_recv_cookie_t *drc, int cleanup_fd,
- uint64_t *action_handlep, offset_t *voffp)
+dmu_recv_stream(dmu_recv_cookie_t *drc, offset_t *voffp)
{
int err = 0;
struct receive_writer_arg *rwa = kmem_zalloc(sizeof (*rwa), KM_SLEEP);
- if (dsl_dataset_is_zapified(drc->drc_ds)) {
- uint64_t bytes;
+ if (dsl_dataset_has_resume_receive_state(drc->drc_ds)) {
+ uint64_t bytes = 0;
(void) zap_lookup(drc->drc_ds->ds_dir->dd_pool->dp_meta_objset,
drc->drc_ds->ds_object, DS_FIELD_RESUME_BYTES,
sizeof (bytes), 1, &bytes);
DMU_SUBSTREAM);
ASSERT3U(drc->drc_drrb->drr_type, <, DMU_OST_NUMTYPES);
- /*
- * Open the objset we are modifying.
- */
- VERIFY0(dmu_objset_from_ds(drc->drc_ds, &drc->drc_os));
ASSERT(dsl_dataset_phys(drc->drc_ds)->ds_flags & DS_FLAG_INCONSISTENT);
ASSERT0(drc->drc_os->os_encrypted &&
(drc->drc_featureflags & DMU_BACKUP_FEATURE_EMBED_DATA));
- /* if this stream is dedup'ed, set up the avl tree for guid mapping */
- if (drc->drc_featureflags & DMU_BACKUP_FEATURE_DEDUP) {
- minor_t minor;
-
- if (cleanup_fd == -1) {
- err = SET_ERROR(EBADF);
- goto out;
- }
- err = zfs_onexit_fd_hold(cleanup_fd, &minor);
- if (err != 0) {
- cleanup_fd = -1;
- goto out;
- }
-
- if (*action_handlep == 0) {
- rwa->guid_to_ds_map =
- kmem_alloc(sizeof (avl_tree_t), KM_SLEEP);
- avl_create(rwa->guid_to_ds_map, guid_compare,
- sizeof (guid_map_entry_t),
- offsetof(guid_map_entry_t, avlnode));
- err = zfs_onexit_add_cb(minor,
- free_guid_map_onexit, rwa->guid_to_ds_map,
- action_handlep);
- if (err != 0)
- goto out;
- } else {
- err = zfs_onexit_cb_data(minor, *action_handlep,
- (void **)&rwa->guid_to_ds_map);
- if (err != 0)
- goto out;
- }
-
- drc->drc_guid_to_ds_map = rwa->guid_to_ds_map;
- }
-
/* handle DSL encryption key payload */
if (drc->drc_featureflags & DMU_BACKUP_FEATURE_RAW) {
nvlist_t *keynvl = NULL;
if (err != 0)
goto out;
- /*
- * If this is a new dataset we set the key immediately.
- * Otherwise we don't want to change the key until we
- * are sure the rest of the receive succeeded so we stash
- * the keynvl away until then.
- */
- err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
- drc->drc_ds->ds_object, drc->drc_fromsnapobj,
- drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
- if (err != 0)
- goto out;
+ if (!drc->drc_heal) {
+ /*
+ * If this is a new dataset we set the key immediately.
+ * Otherwise we don't want to change the key until we
+ * are sure the rest of the receive succeeded so we
+ * stash the keynvl away until then.
+ */
+ err = dsl_crypto_recv_raw(spa_name(drc->drc_os->os_spa),
+ drc->drc_ds->ds_object, drc->drc_fromsnapobj,
+ drc->drc_drrb->drr_type, keynvl, drc->drc_newfs);
+ if (err != 0)
+ goto out;
+ }
/* see comment in dmu_recv_end_sync() */
drc->drc_ivset_guid = 0;
goto out;
}
+ /*
+ * For compatibility with recursive send streams, we do this here,
+ * rather than in dmu_recv_begin. If we pull the next header too
+ * early, and it's the END record, we break the `recv_skip` logic.
+ */
+ if (drc->drc_drr_begin->drr_payloadlen == 0) {
+ err = receive_read_payload_and_next_header(drc, 0, NULL);
+ if (err != 0)
+ goto out;
+ }
+
+ /*
+ * If we failed before this point we will clean up any new resume
+ * state that was created. Now that we've gotten past the initial
+ * checks we are ok to retain that resume state.
+ */
+ drc->drc_should_save = B_TRUE;
+
(void) bqueue_init(&rwa->q, zfs_recv_queue_ff,
MAX(zfs_recv_queue_length, 2 * zfs_max_recordsize),
offsetof(struct receive_record_arg, node));
mutex_init(&rwa->mutex, NULL, MUTEX_DEFAULT, NULL);
rwa->os = drc->drc_os;
rwa->byteswap = drc->drc_byteswap;
+ rwa->heal = drc->drc_heal;
+ rwa->tofs = drc->drc_tofs;
rwa->resumable = drc->drc_resumable;
rwa->raw = drc->drc_raw;
rwa->spill = drc->drc_spill;
+ rwa->full = (drc->drc_drr_begin->drr_u.drr_begin.drr_fromguid == 0);
rwa->os->os_raw_receive = drc->drc_raw;
+ if (drc->drc_heal) {
+ rwa->heal_pio = zio_root(drc->drc_os->os_spa, NULL, NULL,
+ ZIO_FLAG_GODFATHER);
+ }
+ list_create(&rwa->write_batch, sizeof (struct receive_record_arg),
+ offsetof(struct receive_record_arg, node.bqn_node));
(void) thread_create(NULL, 0, receive_writer_thread, rwa, 0, curproc,
TS_RUN, minclsyspri);
cv_destroy(&rwa->cv);
mutex_destroy(&rwa->mutex);
bqueue_destroy(&rwa->q);
+ list_destroy(&rwa->write_batch);
if (err == 0)
err = rwa->err;
if (drc->drc_next_rrd != NULL)
kmem_free(drc->drc_next_rrd, sizeof (*drc->drc_next_rrd));
+ /*
+ * The objset will be invalidated by dmu_recv_end() when we do
+ * dsl_dataset_clone_swap_sync_impl().
+ */
+ drc->drc_os = NULL;
+
kmem_free(rwa, sizeof (*rwa));
nvlist_free(drc->drc_begin_nvl);
- if ((drc->drc_featureflags & DMU_BACKUP_FEATURE_DEDUP) &&
- (cleanup_fd != -1))
- zfs_onexit_fd_rele(cleanup_fd);
if (err != 0) {
/*
ASSERT3P(drc->drc_ds->ds_owner, ==, dmu_recv_tag);
- if (!drc->drc_newfs) {
+ if (drc->drc_heal) {
+ error = 0;
+ } else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
error = dsl_dataset_hold(dp, drc->drc_tofs, FTAG, &origin_head);
return (error);
}
error = dsl_dataset_snapshot_check_impl(origin_head,
- drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
+ drc->drc_tosnap, tx, B_TRUE, 1,
+ drc->drc_cred, drc->drc_proc);
dsl_dataset_rele(origin_head, FTAG);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(drc->drc_ds, 1);
} else {
error = dsl_dataset_snapshot_check_impl(drc->drc_ds,
- drc->drc_tosnap, tx, B_TRUE, 1, drc->drc_cred);
+ drc->drc_tosnap, tx, B_TRUE, 1,
+ drc->drc_cred, drc->drc_proc);
}
return (error);
}
dmu_recv_cookie_t *drc = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
boolean_t encrypted = drc->drc_ds->ds_dir->dd_crypto_obj != 0;
+ uint64_t newsnapobj = 0;
spa_history_log_internal_ds(drc->drc_ds, "finish receiving",
tx, "snap=%s", drc->drc_tosnap);
drc->drc_ds->ds_objset->os_raw_receive = B_FALSE;
- if (!drc->drc_newfs) {
+ if (drc->drc_heal) {
+ if (drc->drc_keynvl != NULL) {
+ nvlist_free(drc->drc_keynvl);
+ drc->drc_keynvl = NULL;
+ }
+ } else if (!drc->drc_newfs) {
dsl_dataset_t *origin_head;
VERIFY0(dsl_dataset_hold(dp, drc->drc_tofs, FTAG,
dsl_dataset_clone_swap_sync_impl(drc->drc_ds,
origin_head, tx);
+ /*
+ * The objset was evicted by dsl_dataset_clone_swap_sync_impl,
+ * so drc_os is no longer valid.
+ */
+ drc->drc_os = NULL;
+
dsl_dataset_snapshot_sync_impl(origin_head,
drc->drc_tosnap, tx);
dsl_dataset_phys(origin_head)->ds_flags &=
~DS_FLAG_INCONSISTENT;
- drc->drc_newsnapobj =
+ newsnapobj =
dsl_dataset_phys(origin_head)->ds_prev_snap_obj;
dsl_dataset_rele(origin_head, FTAG);
(void) zap_remove(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_REDACT_BOOKMARK_SNAPS, tx);
}
- drc->drc_newsnapobj =
+ newsnapobj =
dsl_dataset_phys(drc->drc_ds)->ds_prev_snap_obj;
}
* tunable is set, in which case we will leave the newly-generated
* value.
*/
- if (drc->drc_raw && drc->drc_ivset_guid != 0) {
- dmu_object_zapify(dp->dp_meta_objset, drc->drc_newsnapobj,
+ if (!drc->drc_heal && drc->drc_raw && drc->drc_ivset_guid != 0) {
+ dmu_object_zapify(dp->dp_meta_objset, newsnapobj,
DMU_OT_DSL_DATASET, tx);
- VERIFY0(zap_update(dp->dp_meta_objset, drc->drc_newsnapobj,
+ VERIFY0(zap_update(dp->dp_meta_objset, newsnapobj,
DS_FIELD_IVSET_GUID, sizeof (uint64_t), 1,
&drc->drc_ivset_guid, tx));
}
- zvol_create_minors(dp->dp_spa, drc->drc_tofs, B_TRUE);
-
/*
* Release the hold from dmu_recv_begin. This must be done before
* we return to open context, so that when we free the dataset's dnode
drc->drc_ds = NULL;
}
-static int
-add_ds_to_guidmap(const char *name, avl_tree_t *guid_map, uint64_t snapobj,
- boolean_t raw)
-{
- dsl_pool_t *dp;
- dsl_dataset_t *snapds;
- guid_map_entry_t *gmep;
- objset_t *os;
- ds_hold_flags_t dsflags = (raw) ? 0 : DS_HOLD_FLAG_DECRYPT;
- int err;
-
- ASSERT(guid_map != NULL);
-
- err = dsl_pool_hold(name, FTAG, &dp);
- if (err != 0)
- return (err);
- gmep = kmem_alloc(sizeof (*gmep), KM_SLEEP);
- err = dsl_dataset_own_obj(dp, snapobj, dsflags, gmep, &snapds);
-
- if (err == 0) {
- /*
- * If this is a deduplicated raw send stream, we need
- * to make sure that we can still read raw blocks from
- * earlier datasets in the stream, so we set the
- * os_raw_receive flag now.
- */
- if (raw) {
- err = dmu_objset_from_ds(snapds, &os);
- if (err != 0) {
- dsl_dataset_disown(snapds, dsflags, FTAG);
- dsl_pool_rele(dp, FTAG);
- kmem_free(gmep, sizeof (*gmep));
- return (err);
- }
- os->os_raw_receive = B_TRUE;
- }
-
- gmep->raw = raw;
- gmep->guid = dsl_dataset_phys(snapds)->ds_guid;
- gmep->gme_ds = snapds;
- avl_add(guid_map, gmep);
- } else {
- kmem_free(gmep, sizeof (*gmep));
- }
-
- dsl_pool_rele(dp, FTAG);
- return (err);
-}
-
static int dmu_recv_end_modified_blocks = 3;
static int
if (error != 0) {
dmu_recv_cleanup_ds(drc);
nvlist_free(drc->drc_keynvl);
- } else if (drc->drc_guid_to_ds_map != NULL) {
- (void) add_ds_to_guidmap(drc->drc_tofs, drc->drc_guid_to_ds_map,
- drc->drc_newsnapobj, drc->drc_raw);
+ } else if (!drc->drc_heal) {
+ if (drc->drc_newfs) {
+ zvol_create_minor(drc->drc_tofs);
+ }
+ char *snapname = kmem_asprintf("%s@%s",
+ drc->drc_tofs, drc->drc_tosnap);
+ zvol_create_minor(snapname);
+ kmem_strfree(snapname);
}
return (error);
}
os->os_dsl_dataset->ds_owner == dmu_recv_tag);
}
-#if defined(_KERNEL)
-module_param(zfs_recv_queue_length, int, 0644);
-MODULE_PARM_DESC(zfs_recv_queue_length, "Maximum receive queue length");
+ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_length, UINT, ZMOD_RW,
+ "Maximum receive queue length");
-module_param(zfs_recv_queue_ff, int, 0644);
-MODULE_PARM_DESC(zfs_recv_queue_ff, "Receive queue fill fraction");
-#endif
+ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, queue_ff, UINT, ZMOD_RW,
+ "Receive queue fill fraction");
+
+ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, write_batch_size, UINT, ZMOD_RW,
+ "Maximum amount of writes to batch into one transaction");
+
+ZFS_MODULE_PARAM(zfs_recv, zfs_recv_, best_effort_corrective, INT, ZMOD_RW,
+ "Ignore errors during corrective receive");
+/* END CSTYLED */