]> git.proxmox.com Git - zfsonlinux.git/blob - zfs-patches/0031-Fix-zfs-recv-of-non-large_dnode-send-streams.patch
bump version to 0.7.11-pve1~bpo1
[zfsonlinux.git] / zfs-patches / 0031-Fix-zfs-recv-of-non-large_dnode-send-streams.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Tom Caputi <tcaputi@datto.com>
3 Date: Thu, 28 Jun 2018 17:55:11 -0400
4 Subject: [PATCH] Fix 'zfs recv' of non large_dnode send streams
5
6 Currently, there is a bug where older send streams without the
7 DMU_BACKUP_FEATURE_LARGE_DNODE flag are not handled correctly.
8 The code in receive_object() fails to handle cases where
9 drro->drr_dn_slots is set to 0, which is always the case when the
10 sending code does not support this feature flag. This patch fixes
11 the issue by ensuring that that a value of 0 is treated as
12 DNODE_MIN_SLOTS.
13
14 Tested-by: DHE <git@dehacked.net>
15 Reviewed-by: Brian Behlendorf <behlendorf1@llnl.gov>
16 Signed-off-by: Tom Caputi <tcaputi@datto.com>
17 Closes #7617
18 Closes #7662
19
20 Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com>
21 ---
22 module/zfs/dmu_object.c | 3 +++
23 module/zfs/dmu_send.c | 33 +++++++++++++++++++++++++++------
24 2 files changed, 30 insertions(+), 6 deletions(-)
25
26 diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c
27 index 1fc71d10..40c25362 100644
28 --- a/module/zfs/dmu_object.c
29 +++ b/module/zfs/dmu_object.c
30 @@ -261,6 +261,9 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
31 int dn_slots = dnodesize >> DNODE_SHIFT;
32 int err;
33
34 + if (dn_slots == 0)
35 + dn_slots = DNODE_MIN_SLOTS;
36 +
37 if (object == DMU_META_DNODE_OBJECT)
38 return (SET_ERROR(EBADF));
39
40 diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c
41 index 1de0f316..13aae960 100644
42 --- a/module/zfs/dmu_send.c
43 +++ b/module/zfs/dmu_send.c
44 @@ -2139,6 +2139,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
45 dmu_tx_t *tx;
46 uint64_t object;
47 int err;
48 + uint8_t dn_slots = drro->drr_dn_slots != 0 ?
49 + drro->drr_dn_slots : DNODE_MIN_SLOTS;
50
51 if (drro->drr_type == DMU_OT_NONE ||
52 !DMU_OT_IS_VALID(drro->drr_type) ||
53 @@ -2150,7 +2152,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
54 drro->drr_blksz > spa_maxblocksize(dmu_objset_spa(rwa->os)) ||
55 drro->drr_bonuslen >
56 DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(rwa->os))) ||
57 - drro->drr_dn_slots >
58 + dn_slots >
59 (spa_maxdnodesize(dmu_objset_spa(rwa->os)) >> DNODE_SHIFT)) {
60 return (SET_ERROR(EINVAL));
61 }
62 @@ -2177,12 +2179,31 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
63
64 if (drro->drr_blksz != doi.doi_data_block_size ||
65 nblkptr < doi.doi_nblkptr ||
66 - drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
67 + dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
68 err = dmu_free_long_range(rwa->os, drro->drr_object,
69 0, DMU_OBJECT_END);
70 if (err != 0)
71 return (SET_ERROR(EINVAL));
72 }
73 +
74 + /*
75 + * The dmu does not currently support decreasing nlevels
76 + * on an object. For non-raw sends, this does not matter
77 + * and the new object can just use the previous one's nlevels.
78 + * For raw sends, however, the structure of the received dnode
79 + * (including nlevels) must match that of the send side.
80 + * Therefore, instead of using dmu_object_reclaim(), we must
81 + * free the object completely and call dmu_object_claim_dnsize()
82 + * instead.
83 + */
84 + if (dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) {
85 + err = dmu_free_long_object(rwa->os, drro->drr_object);
86 + if (err != 0)
87 + return (SET_ERROR(EINVAL));
88 +
89 + txg_wait_synced(dmu_objset_pool(rwa->os), 0);
90 + object = DMU_NEW_OBJECT;
91 + }
92 } else if (err == EEXIST) {
93 /*
94 * The object requested is currently an interior slot of a
95 @@ -2204,9 +2225,9 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
96 * another object from the previous snapshot. We must free
97 * these objects before we attempt to allocate the new dnode.
98 */
99 - if (drro->drr_dn_slots > 1) {
100 + if (dn_slots > 1) {
101 for (uint64_t slot = drro->drr_object + 1;
102 - slot < drro->drr_object + drro->drr_dn_slots;
103 + slot < drro->drr_object + dn_slots;
104 slot++) {
105 dmu_object_info_t slot_doi;
106
107 @@ -2238,7 +2259,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
108 err = dmu_object_claim_dnsize(rwa->os, drro->drr_object,
109 drro->drr_type, drro->drr_blksz,
110 drro->drr_bonustype, drro->drr_bonuslen,
111 - drro->drr_dn_slots << DNODE_SHIFT, tx);
112 + dn_slots << DNODE_SHIFT, tx);
113 } else if (drro->drr_type != doi.doi_type ||
114 drro->drr_blksz != doi.doi_data_block_size ||
115 drro->drr_bonustype != doi.doi_bonus_type ||
116 @@ -2247,7 +2268,7 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro,
117 err = dmu_object_reclaim_dnsize(rwa->os, drro->drr_object,
118 drro->drr_type, drro->drr_blksz,
119 drro->drr_bonustype, drro->drr_bonuslen,
120 - drro->drr_dn_slots << DNODE_SHIFT, tx);
121 + dn_slots << DNODE_SHIFT, tx);
122 }
123 if (err != 0) {
124 dmu_tx_commit(tx);