* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
+ * Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
*/
#include <sys/dmu.h>
uint64_t fromguid, uint64_t featureflags)
{
uint64_t val;
+ uint64_t children;
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
boolean_t encrypted = ds->ds_dir->dd_crypto_obj != 0;
if (error != ENOENT)
return (error == 0 ? EEXIST : error);
+ /* must not have children if receiving a ZVOL */
+ error = zap_count(dp->dp_meta_objset,
+ dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &children);
+ if (error != 0)
+ return (error);
+ if (drba->drba_cookie->drc_drrb->drr_type != DMU_OST_ZFS &&
+ children > 0)
+ return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
+
/*
* Check snapshot limit before receiving. We'll recheck again at the
* end, but might as well abort before receiving if we're already over
} else if (error == ENOENT) {
/* target fs does not exist; must be a full backup or clone */
char buf[ZFS_MAX_DATASET_NAME_LEN];
+ objset_t *os;
/*
* If it's a non-clone incremental, we are missing the
return (error);
}
+ /* can't recv below anything but filesystems (eg. no ZVOLs) */
+ error = dmu_objset_from_ds(ds, &os);
+ if (error != 0) {
+ dsl_dataset_rele_flags(ds, dsflags, FTAG);
+ return (error);
+ }
+ if (dmu_objset_type(os) != DMU_OST_ZFS) {
+ dsl_dataset_rele_flags(ds, dsflags, FTAG);
+ return (SET_ERROR(ZFS_ERR_WRONG_PARENT));
+ }
+
if (drba->drba_origin != NULL) {
dsl_dataset_t *origin;
dsl_dataset_rele_flags(origin,
dsflags, FTAG);
}
+
dsl_dataset_rele_flags(ds, dsflags, FTAG);
error = 0;
}
if (data != NULL) {
dmu_buf_t *db;
+ dnode_t *dn;
uint32_t flags = DMU_READ_NO_PREFETCH;
if (rwa->raw)
flags |= DMU_READ_NO_DECRYPT;
- VERIFY0(dmu_bonus_hold_impl(rwa->os, drro->drr_object,
- FTAG, flags, &db));
+ VERIFY0(dnode_hold(rwa->os, drro->drr_object, FTAG, &dn));
+ VERIFY0(dmu_bonus_hold_by_dnode(dn, FTAG, &db, flags));
+
dmu_buf_will_dirty(db, tx);
ASSERT3U(db->db_size, >=, drro->drr_bonuslen);
DRR_OBJECT_PAYLOAD_SIZE(drro));
}
dmu_buf_rele(db, FTAG);
+ dnode_rele(dn, FTAG);
}
dmu_tx_commit(tx);
}
VERIFY0(dnode_hold(rwa->os, drrw->drr_object, FTAG, &dn));
- dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
+ err = dmu_assign_arcbuf_by_dnode(dn, drrw->drr_offset, abuf, tx);
+ if (err != 0) {
+ dnode_rele(dn, FTAG);
+ dmu_tx_commit(tx);
+ return (err);
+ }
dnode_rele(dn, FTAG);
/*
ra->rrd->payload_size = len;
ra->rrd->bytes_read = ra->bytes_read;
}
+ } else {
+ ASSERT3P(buf, ==, NULL);
}
ra->prev_cksum = ra->cksum;
{
struct drr_object *drro = &ra->rrd->header.drr_u.drr_object;
uint32_t size = DRR_OBJECT_PAYLOAD_SIZE(drro);
- void *buf = kmem_zalloc(size, KM_SLEEP);
+ void *buf = NULL;
dmu_object_info_t doi;
+ if (size != 0)
+ buf = kmem_zalloc(size, KM_SLEEP);
+
err = receive_read_payload_and_next_header(ra, size, buf);
if (err != 0) {
kmem_free(buf, size);