]>
Commit | Line | Data |
---|---|---|
a010b409 SI |
1 | From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001 |
2 | From: Tim Chase <tim@chase2k.com> | |
3 | Date: Mon, 27 Aug 2018 10:28:32 -0400 | |
4 | Subject: [PATCH] Fix problems receiving reallocated dnodes | |
5 | ||
6 | This is a port of 047116ac - Raw sends must be able to decrease nlevels, | |
7 | to the zfs-0.7-stable branch. It includes the various fixes to the | |
8 | problem of receiving incremental streams which include reallocated dnodes | |
9 | in which the number of dnode slots has changed but excludes the parts | |
10 | which are related to raw streams. | |
11 | ||
12 | From 047116ac: | |
13 | ||
14 | Currently, when a raw zfs send file includes a | |
15 | DRR_OBJECT record that would decrease the number of | |
16 | levels of an existing object, the object is reallocated | |
17 | with dmu_object_reclaim() which creates the new dnode | |
18 | using the old object's nlevels. For non-raw sends this | |
19 | doesn't really matter, but raw sends require that | |
20 | nlevels on the receive side match that of the send | |
21 | side so that the checksum-of-MAC tree can be properly | |
22 | maintained. This patch corrects the issue by freeing | |
23 | the object completely before allocating it again in | |
24 | this case. | |
25 | ||
26 | This patch also corrects several issues with | |
27 | dnode_hold_impl() and related functions that prevented | |
28 | dnodes (particularly multi-slot dnodes) from being | |
29 | reallocated properly due to the fact that existing | |
30 | dnodes were not being fully cleaned up when they | |
31 | were freed. | |
32 | ||
33 | This patch adds a test to make sure that zfs recv | |
34 | functions properly with incremental streams containing | |
35 | dnodes of different sizes. | |
36 | ||
37 | This also includes a one-liner fix from loli10K to fix a test failure: | |
38 | https://github.com/zfsonlinux/zfs/pull/7792#discussion_r212769264 | |
39 | ||
40 | Authored-by: Tom Caputi <tcaputi@datto.com> | |
41 | Reviewed by: Matthew Ahrens <mahrens@delphix.com> | |
42 | Reviewed-by: Jorgen Lundman <lundman@lundman.net> | |
43 | Signed-off-by: Tom Caputi <tcaputi@datto.com> | |
44 | Signed-off-by: Brian Behlendorf <behlendorf1@llnl.gov> | |
45 | Signed-off-by: Tim Chase <tim@chase2k.com> | |
46 | Ported-by: Tim Chase <tim@chase2k.com> | |
47 | ||
48 | Closes #6821 | |
49 | Closes #6864 | |
50 | ||
51 | NOTE: This is the first of the port of 3 related patches patches to the | |
52 | zfs-0.7-release branch of ZoL. The other two patches should immediately | |
53 | follow this one. | |
54 | ||
55 | Signed-off-by: Stoiko Ivanov <s.ivanov@proxmox.com> | |
56 | --- | |
57 | cmd/ztest/ztest.c | 25 +++++- | |
58 | include/sys/dnode.h | 6 ++ | |
59 | lib/libzfs/libzfs_sendrecv.c | 1 + | |
60 | module/zfs/dmu_object.c | 1 - | |
61 | module/zfs/dmu_send.c | 51 +++++++++-- | |
62 | module/zfs/dnode.c | 84 +++++++++++++++++-- | |
63 | module/zfs/dnode_sync.c | 2 + | |
64 | tests/runfiles/linux.run | 2 +- | |
65 | tests/zfs-tests/tests/functional/rsend/Makefile.am | 3 +- | |
66 | .../functional/rsend/send_realloc_dnode_size.ksh | 98 ++++++++++++++++++++++ | |
67 | 10 files changed, 258 insertions(+), 15 deletions(-) | |
68 | create mode 100644 tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh | |
69 | ||
70 | diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c | |
71 | index 1a320b03..a410eeef 100644 | |
72 | --- a/cmd/ztest/ztest.c | |
73 | +++ b/cmd/ztest/ztest.c | |
74 | @@ -197,7 +197,8 @@ extern uint64_t metaslab_gang_bang; | |
75 | extern uint64_t metaslab_df_alloc_threshold; | |
76 | extern int metaslab_preload_limit; | |
77 | extern boolean_t zfs_compressed_arc_enabled; | |
78 | -extern int zfs_abd_scatter_enabled; | |
79 | +extern int zfs_abd_scatter_enabled; | |
80 | +extern int dmu_object_alloc_chunk_shift; | |
81 | ||
82 | static ztest_shared_opts_t *ztest_shared_opts; | |
83 | static ztest_shared_opts_t ztest_opts; | |
84 | @@ -310,6 +311,7 @@ static ztest_shared_callstate_t *ztest_shared_callstate; | |
85 | ztest_func_t ztest_dmu_read_write; | |
86 | ztest_func_t ztest_dmu_write_parallel; | |
87 | ztest_func_t ztest_dmu_object_alloc_free; | |
88 | +ztest_func_t ztest_dmu_object_next_chunk; | |
89 | ztest_func_t ztest_dmu_commit_callbacks; | |
90 | ztest_func_t ztest_zap; | |
91 | ztest_func_t ztest_zap_parallel; | |
92 | @@ -357,6 +359,7 @@ ztest_info_t ztest_info[] = { | |
93 | ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always), | |
94 | ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always), | |
95 | ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always), | |
96 | + ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes), | |
97 | ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always), | |
98 | ZTI_INIT(ztest_zap, 30, &zopt_always), | |
99 | ZTI_INIT(ztest_zap_parallel, 100, &zopt_always), | |
100 | @@ -3927,6 +3930,26 @@ ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id) | |
101 | umem_free(od, size); | |
102 | } | |
103 | ||
104 | +/* | |
105 | + * Rewind the global allocator to verify object allocation backfilling. | |
106 | + */ | |
107 | +void | |
108 | +ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id) | |
109 | +{ | |
110 | + objset_t *os = zd->zd_os; | |
111 | + int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift; | |
112 | + uint64_t object; | |
113 | + | |
114 | + /* | |
115 | + * Rewind the global allocator randomly back to a lower object number | |
116 | + * to force backfilling and reclamation of recently freed dnodes. | |
117 | + */ | |
118 | + mutex_enter(&os->os_obj_lock); | |
119 | + object = ztest_random(os->os_obj_next_chunk); | |
120 | + os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk); | |
121 | + mutex_exit(&os->os_obj_lock); | |
122 | +} | |
123 | + | |
124 | #undef OD_ARRAY_SIZE | |
125 | #define OD_ARRAY_SIZE 2 | |
126 | ||
127 | diff --git a/include/sys/dnode.h b/include/sys/dnode.h | |
128 | index c7efe559..ea7defe1 100644 | |
129 | --- a/include/sys/dnode.h | |
130 | +++ b/include/sys/dnode.h | |
131 | @@ -360,6 +360,7 @@ int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off, | |
132 | int minlvl, uint64_t blkfill, uint64_t txg); | |
133 | void dnode_evict_dbufs(dnode_t *dn); | |
134 | void dnode_evict_bonus(dnode_t *dn); | |
135 | +void dnode_free_interior_slots(dnode_t *dn); | |
136 | ||
137 | #define DNODE_IS_CACHEABLE(_dn) \ | |
138 | ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \ | |
139 | @@ -454,6 +455,11 @@ typedef struct dnode_stats { | |
140 | */ | |
141 | kstat_named_t dnode_hold_free_txg; | |
142 | /* | |
143 | + * Number of times dnode_free_interior_slots() needed to retry | |
144 | + * acquiring a slot zrl lock due to contention. | |
145 | + */ | |
146 | + kstat_named_t dnode_free_interior_lock_retry; | |
147 | + /* | |
148 | * Number of new dnodes allocated by dnode_allocate(). | |
149 | */ | |
150 | kstat_named_t dnode_allocate; | |
151 | diff --git a/lib/libzfs/libzfs_sendrecv.c b/lib/libzfs/libzfs_sendrecv.c | |
152 | index c5acd21a..cadf16cc 100644 | |
153 | --- a/lib/libzfs/libzfs_sendrecv.c | |
154 | +++ b/lib/libzfs/libzfs_sendrecv.c | |
155 | @@ -3577,6 +3577,7 @@ zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap, | |
156 | } | |
157 | ||
158 | newfs = B_TRUE; | |
159 | + *cp = '/'; | |
160 | } | |
161 | ||
162 | if (flags->verbose) { | |
163 | diff --git a/module/zfs/dmu_object.c b/module/zfs/dmu_object.c | |
164 | index e7412b75..f53da407 100644 | |
165 | --- a/module/zfs/dmu_object.c | |
166 | +++ b/module/zfs/dmu_object.c | |
167 | @@ -275,7 +275,6 @@ dmu_object_reclaim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot, | |
168 | return (err); | |
169 | } | |
170 | ||
171 | - | |
172 | int | |
173 | dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx) | |
174 | { | |
175 | diff --git a/module/zfs/dmu_send.c b/module/zfs/dmu_send.c | |
176 | index cdbc1cd1..148b5ff8 100644 | |
177 | --- a/module/zfs/dmu_send.c | |
178 | +++ b/module/zfs/dmu_send.c | |
179 | @@ -2156,10 +2156,8 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, | |
180 | } | |
181 | ||
182 | err = dmu_object_info(rwa->os, drro->drr_object, &doi); | |
183 | - | |
184 | - if (err != 0 && err != ENOENT) | |
185 | + if (err != 0 && err != ENOENT && err != EEXIST) | |
186 | return (SET_ERROR(EINVAL)); | |
187 | - object = err == 0 ? drro->drr_object : DMU_NEW_OBJECT; | |
188 | ||
189 | if (drro->drr_object > rwa->max_object) | |
190 | rwa->max_object = drro->drr_object; | |
191 | @@ -2175,13 +2173,56 @@ receive_object(struct receive_writer_arg *rwa, struct drr_object *drro, | |
192 | nblkptr = deduce_nblkptr(drro->drr_bonustype, | |
193 | drro->drr_bonuslen); | |
194 | ||
195 | + object = drro->drr_object; | |
196 | + | |
197 | if (drro->drr_blksz != doi.doi_data_block_size || | |
198 | - nblkptr < doi.doi_nblkptr) { | |
199 | + nblkptr < doi.doi_nblkptr || | |
200 | + drro->drr_dn_slots != doi.doi_dnodesize >> DNODE_SHIFT) { | |
201 | err = dmu_free_long_range(rwa->os, drro->drr_object, | |
202 | 0, DMU_OBJECT_END); | |
203 | if (err != 0) | |
204 | return (SET_ERROR(EINVAL)); | |
205 | } | |
206 | + } else if (err == EEXIST) { | |
207 | + /* | |
208 | + * The object requested is currently an interior slot of a | |
209 | + * multi-slot dnode. This will be resolved when the next txg | |
210 | + * is synced out, since the send stream will have told us | |
211 | + * to free this slot when we freed the associated dnode | |
212 | + * earlier in the stream. | |
213 | + */ | |
214 | + txg_wait_synced(dmu_objset_pool(rwa->os), 0); | |
215 | + object = drro->drr_object; | |
216 | + } else { | |
217 | + /* object is free and we are about to allocate a new one */ | |
218 | + object = DMU_NEW_OBJECT; | |
219 | + } | |
220 | + | |
221 | + /* | |
222 | + * If this is a multi-slot dnode there is a chance that this | |
223 | + * object will expand into a slot that is already used by | |
224 | + * another object from the previous snapshot. We must free | |
225 | + * these objects before we attempt to allocate the new dnode. | |
226 | + */ | |
227 | + if (drro->drr_dn_slots > 1) { | |
228 | + for (uint64_t slot = drro->drr_object + 1; | |
229 | + slot < drro->drr_object + drro->drr_dn_slots; | |
230 | + slot++) { | |
231 | + dmu_object_info_t slot_doi; | |
232 | + | |
233 | + err = dmu_object_info(rwa->os, slot, &slot_doi); | |
234 | + if (err == ENOENT || err == EEXIST) | |
235 | + continue; | |
236 | + else if (err != 0) | |
237 | + return (err); | |
238 | + | |
239 | + err = dmu_free_long_object(rwa->os, slot); | |
240 | + | |
241 | + if (err != 0) | |
242 | + return (err); | |
243 | + } | |
244 | + | |
245 | + txg_wait_synced(dmu_objset_pool(rwa->os), 0); | |
246 | } | |
247 | ||
248 | tx = dmu_tx_create(rwa->os); | |
249 | @@ -2732,7 +2773,7 @@ receive_read_record(struct receive_arg *ra) | |
250 | * See receive_read_prefetch for an explanation why we're | |
251 | * storing this object in the ignore_obj_list. | |
252 | */ | |
253 | - if (err == ENOENT || | |
254 | + if (err == ENOENT || err == EEXIST || | |
255 | (err == 0 && doi.doi_data_block_size != drro->drr_blksz)) { | |
256 | objlist_insert(&ra->ignore_objlist, drro->drr_object); | |
257 | err = 0; | |
258 | diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c | |
259 | index e05a4d0a..df6a4872 100644 | |
260 | --- a/module/zfs/dnode.c | |
261 | +++ b/module/zfs/dnode.c | |
262 | @@ -55,6 +55,7 @@ dnode_stats_t dnode_stats = { | |
263 | { "dnode_hold_free_overflow", KSTAT_DATA_UINT64 }, | |
264 | { "dnode_hold_free_refcount", KSTAT_DATA_UINT64 }, | |
265 | { "dnode_hold_free_txg", KSTAT_DATA_UINT64 }, | |
266 | + { "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 }, | |
267 | { "dnode_allocate", KSTAT_DATA_UINT64 }, | |
268 | { "dnode_reallocate", KSTAT_DATA_UINT64 }, | |
269 | { "dnode_buf_evict", KSTAT_DATA_UINT64 }, | |
270 | @@ -516,7 +517,8 @@ dnode_destroy(dnode_t *dn) | |
271 | mutex_exit(&os->os_lock); | |
272 | ||
273 | /* the dnode can no longer move, so we can release the handle */ | |
274 | - zrl_remove(&dn->dn_handle->dnh_zrlock); | |
275 | + if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock)) | |
276 | + zrl_remove(&dn->dn_handle->dnh_zrlock); | |
277 | ||
278 | dn->dn_allocated_txg = 0; | |
279 | dn->dn_free_txg = 0; | |
280 | @@ -662,6 +664,8 @@ dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, | |
281 | DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)))); | |
282 | ||
283 | dn_slots = dn_slots > 0 ? dn_slots : DNODE_MIN_SLOTS; | |
284 | + | |
285 | + dnode_free_interior_slots(dn); | |
286 | DNODE_STAT_BUMP(dnode_reallocate); | |
287 | ||
288 | /* clean up any unreferenced dbufs */ | |
289 | @@ -1062,19 +1066,73 @@ dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr) | |
290 | } | |
291 | ||
292 | static boolean_t | |
293 | -dnode_check_slots(dnode_children_t *children, int idx, int slots, void *ptr) | |
294 | +dnode_check_slots_free(dnode_children_t *children, int idx, int slots) | |
295 | { | |
296 | ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
297 | ||
298 | for (int i = idx; i < idx + slots; i++) { | |
299 | dnode_handle_t *dnh = &children->dnc_children[i]; | |
300 | - if (dnh->dnh_dnode != ptr) | |
301 | + dnode_t *dn = dnh->dnh_dnode; | |
302 | + | |
303 | + if (dn == DN_SLOT_FREE) { | |
304 | + continue; | |
305 | + } else if (DN_SLOT_IS_PTR(dn)) { | |
306 | + mutex_enter(&dn->dn_mtx); | |
307 | + dmu_object_type_t type = dn->dn_type; | |
308 | + mutex_exit(&dn->dn_mtx); | |
309 | + | |
310 | + if (type != DMU_OT_NONE) | |
311 | + return (B_FALSE); | |
312 | + | |
313 | + continue; | |
314 | + } else { | |
315 | return (B_FALSE); | |
316 | + } | |
317 | + | |
318 | + return (B_FALSE); | |
319 | } | |
320 | ||
321 | return (B_TRUE); | |
322 | } | |
323 | ||
324 | +static void | |
325 | +dnode_reclaim_slots(dnode_children_t *children, int idx, int slots) | |
326 | +{ | |
327 | + ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
328 | + | |
329 | + for (int i = idx; i < idx + slots; i++) { | |
330 | + dnode_handle_t *dnh = &children->dnc_children[i]; | |
331 | + | |
332 | + ASSERT(zrl_is_locked(&dnh->dnh_zrlock)); | |
333 | + | |
334 | + if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { | |
335 | + ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE); | |
336 | + dnode_destroy(dnh->dnh_dnode); | |
337 | + dnh->dnh_dnode = DN_SLOT_FREE; | |
338 | + } | |
339 | + } | |
340 | +} | |
341 | + | |
342 | +void | |
343 | +dnode_free_interior_slots(dnode_t *dn) | |
344 | +{ | |
345 | + dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db); | |
346 | + int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT; | |
347 | + int idx = (dn->dn_object & (epb - 1)) + 1; | |
348 | + int slots = dn->dn_num_slots - 1; | |
349 | + | |
350 | + if (slots == 0) | |
351 | + return; | |
352 | + | |
353 | + ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK); | |
354 | + | |
355 | + while (!dnode_slots_tryenter(children, idx, slots)) | |
356 | + DNODE_STAT_BUMP(dnode_free_interior_lock_retry); | |
357 | + | |
358 | + dnode_set_slots(children, idx, slots, DN_SLOT_FREE); | |
359 | + dnode_slots_rele(children, idx, slots); | |
360 | +} | |
361 | + | |
362 | void | |
363 | dnode_special_close(dnode_handle_t *dnh) | |
364 | { | |
365 | @@ -1355,7 +1413,7 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, | |
366 | while (dn == DN_SLOT_UNINIT) { | |
367 | dnode_slots_hold(dnc, idx, slots); | |
368 | ||
369 | - if (!dnode_check_slots(dnc, idx, slots, DN_SLOT_FREE)) { | |
370 | + if (!dnode_check_slots_free(dnc, idx, slots)) { | |
371 | DNODE_STAT_BUMP(dnode_hold_free_misses); | |
372 | dnode_slots_rele(dnc, idx, slots); | |
373 | dbuf_rele(db, FTAG); | |
374 | @@ -1368,15 +1426,29 @@ dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots, | |
375 | continue; | |
376 | } | |
377 | ||
378 | - if (!dnode_check_slots(dnc, idx, slots, DN_SLOT_FREE)) { | |
379 | + if (!dnode_check_slots_free(dnc, idx, slots)) { | |
380 | DNODE_STAT_BUMP(dnode_hold_free_lock_misses); | |
381 | dnode_slots_rele(dnc, idx, slots); | |
382 | dbuf_rele(db, FTAG); | |
383 | return (SET_ERROR(ENOSPC)); | |
384 | } | |
385 | ||
386 | + /* | |
387 | + * Allocated but otherwise free dnodes which would | |
388 | + * be in the interior of a multi-slot dnodes need | |
389 | + * to be freed. Single slot dnodes can be safely | |
390 | + * re-purposed as a performance optimization. | |
391 | + */ | |
392 | + if (slots > 1) | |
393 | + dnode_reclaim_slots(dnc, idx + 1, slots - 1); | |
394 | + | |
395 | dnh = &dnc->dnc_children[idx]; | |
396 | - dn = dnode_create(os, dn_block + idx, db, object, dnh); | |
397 | + if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) { | |
398 | + dn = dnh->dnh_dnode; | |
399 | + } else { | |
400 | + dn = dnode_create(os, dn_block + idx, db, | |
401 | + object, dnh); | |
402 | + } | |
403 | } | |
404 | ||
405 | mutex_enter(&dn->dn_mtx); | |
406 | diff --git a/module/zfs/dnode_sync.c b/module/zfs/dnode_sync.c | |
407 | index 742d962b..8d65e385 100644 | |
408 | --- a/module/zfs/dnode_sync.c | |
409 | +++ b/module/zfs/dnode_sync.c | |
410 | @@ -533,6 +533,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) | |
411 | if (dn->dn_allocated_txg != dn->dn_free_txg) | |
412 | dmu_buf_will_dirty(&dn->dn_dbuf->db, tx); | |
413 | bzero(dn->dn_phys, sizeof (dnode_phys_t) * dn->dn_num_slots); | |
414 | + dnode_free_interior_slots(dn); | |
415 | ||
416 | mutex_enter(&dn->dn_mtx); | |
417 | dn->dn_type = DMU_OT_NONE; | |
418 | @@ -540,6 +541,7 @@ dnode_sync_free(dnode_t *dn, dmu_tx_t *tx) | |
419 | dn->dn_allocated_txg = 0; | |
420 | dn->dn_free_txg = 0; | |
421 | dn->dn_have_spill = B_FALSE; | |
422 | + dn->dn_num_slots = 1; | |
423 | mutex_exit(&dn->dn_mtx); | |
424 | ||
425 | ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); | |
426 | diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run | |
427 | index 69e9eb26..d8fe6f3a 100644 | |
428 | --- a/tests/runfiles/linux.run | |
429 | +++ b/tests/runfiles/linux.run | |
430 | @@ -605,7 +605,7 @@ tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', | |
431 | 'send-c_lz4_disabled', 'send-c_recv_lz4_disabled', | |
432 | 'send-c_mixed_compression', 'send-c_stream_size_estimate', 'send-cD', | |
433 | 'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize', | |
434 | - 'send-c_recv_dedup', 'send_freeobjects'] | |
435 | + 'send-c_recv_dedup', 'send_freeobjects', 'send_realloc_dnode_size'] | |
436 | tags = ['functional', 'rsend'] | |
437 | ||
438 | [tests/functional/scrub_mirror] | |
439 | diff --git a/tests/zfs-tests/tests/functional/rsend/Makefile.am b/tests/zfs-tests/tests/functional/rsend/Makefile.am | |
440 | index 6b1aa8b3..a2837d1a 100644 | |
441 | --- a/tests/zfs-tests/tests/functional/rsend/Makefile.am | |
442 | +++ b/tests/zfs-tests/tests/functional/rsend/Makefile.am | |
443 | @@ -36,7 +36,8 @@ dist_pkgdata_SCRIPTS = \ | |
444 | send-c_volume.ksh \ | |
445 | send-c_zstreamdump.ksh \ | |
446 | send-cpL_varied_recsize.ksh \ | |
447 | - send_freeobjects.ksh | |
448 | + send_freeobjects.ksh \ | |
449 | + send_realloc_dnode_size.ksh | |
450 | ||
451 | dist_pkgdata_DATA = \ | |
452 | rsend.cfg \ | |
453 | diff --git a/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh | |
454 | new file mode 100644 | |
455 | index 00000000..20676394 | |
456 | --- /dev/null | |
457 | +++ b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh | |
458 | @@ -0,0 +1,98 @@ | |
459 | +#!/bin/ksh | |
460 | + | |
461 | +# | |
462 | +# This file and its contents are supplied under the terms of the | |
463 | +# Common Development and Distribution License ("CDDL"), version 1.0. | |
464 | +# You may only use this file in accordance with the terms of version | |
465 | +# 1.0 of the CDDL. | |
466 | +# | |
467 | +# A full copy of the text of the CDDL should have accompanied this | |
468 | +# source. A copy of the CDDL is also available via the Internet at | |
469 | +# http://www.illumos.org/license/CDDL. | |
470 | +# | |
471 | + | |
472 | +# | |
473 | +# Copyright (c) 2017 by Lawrence Livermore National Security, LLC. | |
474 | +# | |
475 | + | |
476 | +. $STF_SUITE/include/libtest.shlib | |
477 | +. $STF_SUITE/tests/functional/rsend/rsend.kshlib | |
478 | + | |
479 | +# | |
480 | +# Description: | |
481 | +# Verify incremental receive properly handles objects with changed | |
482 | +# dnode slot count. | |
483 | +# | |
484 | +# Strategy: | |
485 | +# 1. Populate a dataset with 1k byte dnodes and snapshot | |
486 | +# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects | |
487 | +# get recycled numbers and formerly "interior" dnode slots get assigned | |
488 | +# to new objects | |
489 | +# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects | |
490 | +# overlap with recently recycled and formerly "normal" dnode slots get | |
491 | +# assigned to new objects | |
492 | +# 4. Generate initial and incremental streams | |
493 | +# 5. Verify initial and incremental streams can be received | |
494 | +# | |
495 | + | |
496 | +verify_runnable "both" | |
497 | + | |
498 | +log_assert "Verify incremental receive handles objects with changed dnode size" | |
499 | + | |
500 | +function cleanup | |
501 | +{ | |
502 | + rm -f $BACKDIR/fs-dn-legacy | |
503 | + rm -f $BACKDIR/fs-dn-1k | |
504 | + rm -f $BACKDIR/fs-dn-2k | |
505 | + | |
506 | + if datasetexists $POOL/fs ; then | |
507 | + log_must zfs destroy -rR $POOL/fs | |
508 | + fi | |
509 | + | |
510 | + if datasetexists $POOL/newfs ; then | |
511 | + log_must zfs destroy -rR $POOL/newfs | |
512 | + fi | |
513 | +} | |
514 | + | |
515 | +log_onexit cleanup | |
516 | + | |
517 | +# 1. Populate a dataset with 1k byte dnodes and snapshot | |
518 | +log_must zfs create -o dnodesize=1k $POOL/fs | |
519 | +log_must mk_files 200 262144 0 $POOL/fs | |
520 | +log_must zfs snapshot $POOL/fs@a | |
521 | + | |
522 | +# 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects | |
523 | +# get recycled numbers and formerly "interior" dnode slots get assigned | |
524 | +# to new objects | |
525 | +rm /$POOL/fs/* | |
526 | + | |
527 | +log_must zfs unmount $POOL/fs | |
528 | +log_must zfs set dnodesize=legacy $POOL/fs | |
529 | +log_must zfs mount $POOL/fs | |
530 | + | |
531 | +log_must mk_files 200 262144 0 $POOL/fs | |
532 | +log_must zfs snapshot $POOL/fs@b | |
533 | + | |
534 | +# 3. Remove objects, set dnodesize=2k, and remount dataset so new objects | |
535 | +# overlap with recently recycled and formerly "normal" dnode slots get | |
536 | +# assigned to new objects | |
537 | +rm /$POOL/fs/* | |
538 | + | |
539 | +log_must zfs unmount $POOL/fs | |
540 | +log_must zfs set dnodesize=2k $POOL/fs | |
541 | +log_must zfs mount $POOL/fs | |
542 | + | |
543 | +mk_files 200 262144 0 $POOL/fs | |
544 | +log_must zfs snapshot $POOL/fs@c | |
545 | + | |
546 | +# 4. Generate initial and incremental streams | |
547 | +log_must eval "zfs send $POOL/fs@a > $BACKDIR/fs-dn-1k" | |
548 | +log_must eval "zfs send -i $POOL/fs@a $POOL/fs@b > $BACKDIR/fs-dn-legacy" | |
549 | +log_must eval "zfs send -i $POOL/fs@b $POOL/fs@c > $BACKDIR/fs-dn-2k" | |
550 | + | |
551 | +# 5. Verify initial and incremental streams can be received | |
552 | +log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-1k" | |
553 | +log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-legacy" | |
554 | +log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-2k" | |
555 | + | |
556 | +log_pass "Verify incremental receive handles objects with changed dnode size" |