]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_removal.c
panic in removal_remap test on 4K devices
[mirror_zfs.git] / module / zfs / vdev_removal.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
25 */
26
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
29 #include <sys/dmu.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/zap.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/metaslab.h>
34 #include <sys/metaslab_impl.h>
35 #include <sys/uberblock_impl.h>
36 #include <sys/txg.h>
37 #include <sys/avl.h>
38 #include <sys/bpobj.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/dsl_dir.h>
42 #include <sys/arc.h>
43 #include <sys/zfeature.h>
44 #include <sys/vdev_indirect_births.h>
45 #include <sys/vdev_indirect_mapping.h>
46 #include <sys/abd.h>
47 #include <sys/vdev_initialize.h>
48 #include <sys/vdev_trim.h>
49 #include <sys/trace_vdev.h>
50
51 /*
52 * This file contains the necessary logic to remove vdevs from a
53 * storage pool. Currently, the only devices that can be removed
54 * are log, cache, and spare devices; and top level vdevs from a pool
55 * w/o raidz or mirrors. (Note that members of a mirror can be removed
56 * by the detach operation.)
57 *
58 * Log vdevs are removed by evacuating them and then turning the vdev
59 * into a hole vdev while holding spa config locks.
60 *
61 * Top level vdevs are removed and converted into an indirect vdev via
62 * a multi-step process:
63 *
64 * - Disable allocations from this device (spa_vdev_remove_top).
65 *
66 * - From a new thread (spa_vdev_remove_thread), copy data from
67 * the removing vdev to a different vdev. The copy happens in open
68 * context (spa_vdev_copy_impl) and issues a sync task
69 * (vdev_mapping_sync) so the sync thread can update the partial
70 * indirect mappings in core and on disk.
71 *
72 * - If a free happens during a removal, it is freed from the
73 * removing vdev, and if it has already been copied, from the new
74 * location as well (free_from_removing_vdev).
75 *
76 * - After the removal is completed, the copy thread converts the vdev
77 * into an indirect vdev (vdev_remove_complete) before instructing
78 * the sync thread to destroy the space maps and finish the removal
79 * (spa_finish_removal).
80 */
81
82 typedef struct vdev_copy_arg {
83 metaslab_t *vca_msp;
84 uint64_t vca_outstanding_bytes;
85 uint64_t vca_read_error_bytes;
86 uint64_t vca_write_error_bytes;
87 kcondvar_t vca_cv;
88 kmutex_t vca_lock;
89 } vdev_copy_arg_t;
90
91 /*
92 * The maximum amount of memory we can use for outstanding i/o while
93 * doing a device removal. This determines how much i/o we can have
94 * in flight concurrently.
95 */
96 int zfs_remove_max_copy_bytes = 64 * 1024 * 1024;
97
98 /*
99 * The largest contiguous segment that we will attempt to allocate when
100 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
101 * there is a performance problem with attempting to allocate large blocks,
102 * consider decreasing this.
103 *
104 * See also the accessor function spa_remove_max_segment().
105 */
106 int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
107
108 /*
109 * Ignore hard IO errors during device removal. When set if a device
110 * encounters hard IO error during the removal process the removal will
111 * not be cancelled. This can result in a normally recoverable block
112 * becoming permanently damaged and is not recommended.
113 */
114 int zfs_removal_ignore_errors = 0;
115
116 /*
117 * Allow a remap segment to span free chunks of at most this size. The main
118 * impact of a larger span is that we will read and write larger, more
119 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth
120 * for iops. The value here was chosen to align with
121 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular
122 * reads (but there's no reason it has to be the same).
123 *
124 * Additionally, a higher span will have the following relatively minor
125 * effects:
126 * - the mapping will be smaller, since one entry can cover more allocated
127 * segments
128 * - more of the fragmentation in the removing device will be preserved
129 * - we'll do larger allocations, which may fail and fall back on smaller
130 * allocations
131 */
132 int vdev_removal_max_span = 32 * 1024;
133
134 /*
135 * This is used by the test suite so that it can ensure that certain
136 * actions happen while in the middle of a removal.
137 */
138 int zfs_removal_suspend_progress = 0;
139
140 #define VDEV_REMOVAL_ZAP_OBJS "lzap"
141
142 static void spa_vdev_remove_thread(void *arg);
143 static int spa_vdev_remove_cancel_impl(spa_t *spa);
144
145 static void
146 spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
147 {
148 VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
149 DMU_POOL_DIRECTORY_OBJECT,
150 DMU_POOL_REMOVING, sizeof (uint64_t),
151 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
152 &spa->spa_removing_phys, tx));
153 }
154
155 static nvlist_t *
156 spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
157 {
158 for (int i = 0; i < count; i++) {
159 uint64_t guid =
160 fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
161
162 if (guid == target_guid)
163 return (nvpp[i]);
164 }
165
166 return (NULL);
167 }
168
169 static void
170 spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
171 nvlist_t *dev_to_remove)
172 {
173 nvlist_t **newdev = NULL;
174
175 if (count > 1)
176 newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
177
178 for (int i = 0, j = 0; i < count; i++) {
179 if (dev[i] == dev_to_remove)
180 continue;
181 VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
182 }
183
184 VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
185 VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
186
187 for (int i = 0; i < count - 1; i++)
188 nvlist_free(newdev[i]);
189
190 if (count > 1)
191 kmem_free(newdev, (count - 1) * sizeof (void *));
192 }
193
194 static spa_vdev_removal_t *
195 spa_vdev_removal_create(vdev_t *vd)
196 {
197 spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
198 mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
199 cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
200 svr->svr_allocd_segs = range_tree_create(NULL, NULL);
201 svr->svr_vdev_id = vd->vdev_id;
202
203 for (int i = 0; i < TXG_SIZE; i++) {
204 svr->svr_frees[i] = range_tree_create(NULL, NULL);
205 list_create(&svr->svr_new_segments[i],
206 sizeof (vdev_indirect_mapping_entry_t),
207 offsetof(vdev_indirect_mapping_entry_t, vime_node));
208 }
209
210 return (svr);
211 }
212
213 void
214 spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
215 {
216 for (int i = 0; i < TXG_SIZE; i++) {
217 ASSERT0(svr->svr_bytes_done[i]);
218 ASSERT0(svr->svr_max_offset_to_sync[i]);
219 range_tree_destroy(svr->svr_frees[i]);
220 list_destroy(&svr->svr_new_segments[i]);
221 }
222
223 range_tree_destroy(svr->svr_allocd_segs);
224 mutex_destroy(&svr->svr_lock);
225 cv_destroy(&svr->svr_cv);
226 kmem_free(svr, sizeof (*svr));
227 }
228
229 /*
230 * This is called as a synctask in the txg in which we will mark this vdev
231 * as removing (in the config stored in the MOS).
232 *
233 * It begins the evacuation of a toplevel vdev by:
234 * - initializing the spa_removing_phys which tracks this removal
235 * - computing the amount of space to remove for accounting purposes
236 * - dirtying all dbufs in the spa_config_object
237 * - creating the spa_vdev_removal
238 * - starting the spa_vdev_remove_thread
239 */
240 static void
241 vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
242 {
243 int vdev_id = (uintptr_t)arg;
244 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
245 vdev_t *vd = vdev_lookup_top(spa, vdev_id);
246 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
247 objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
248 spa_vdev_removal_t *svr = NULL;
249 ASSERTV(uint64_t txg = dmu_tx_get_txg(tx));
250
251 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
252 svr = spa_vdev_removal_create(vd);
253
254 ASSERT(vd->vdev_removing);
255 ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
256
257 spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
258 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
259 /*
260 * By activating the OBSOLETE_COUNTS feature, we prevent
261 * the pool from being downgraded and ensure that the
262 * refcounts are precise.
263 */
264 spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
265 uint64_t one = 1;
266 VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
267 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
268 &one, tx));
269 ASSERTV(boolean_t are_precise);
270 ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise));
271 ASSERT3B(are_precise, ==, B_TRUE);
272 }
273
274 vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
275 vd->vdev_indirect_mapping =
276 vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
277 vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
278 vd->vdev_indirect_births =
279 vdev_indirect_births_open(mos, vic->vic_births_object);
280 spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
281 spa->spa_removing_phys.sr_start_time = gethrestime_sec();
282 spa->spa_removing_phys.sr_end_time = 0;
283 spa->spa_removing_phys.sr_state = DSS_SCANNING;
284 spa->spa_removing_phys.sr_to_copy = 0;
285 spa->spa_removing_phys.sr_copied = 0;
286
287 /*
288 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
289 * there may be space in the defer tree, which is free, but still
290 * counted in vs_alloc.
291 */
292 for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
293 metaslab_t *ms = vd->vdev_ms[i];
294 if (ms->ms_sm == NULL)
295 continue;
296
297 spa->spa_removing_phys.sr_to_copy +=
298 metaslab_allocated_space(ms);
299
300 /*
301 * Space which we are freeing this txg does not need to
302 * be copied.
303 */
304 spa->spa_removing_phys.sr_to_copy -=
305 range_tree_space(ms->ms_freeing);
306
307 ASSERT0(range_tree_space(ms->ms_freed));
308 for (int t = 0; t < TXG_SIZE; t++)
309 ASSERT0(range_tree_space(ms->ms_allocating[t]));
310 }
311
312 /*
313 * Sync tasks are called before metaslab_sync(), so there should
314 * be no already-synced metaslabs in the TXG_CLEAN list.
315 */
316 ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
317
318 spa_sync_removing_state(spa, tx);
319
320 /*
321 * All blocks that we need to read the most recent mapping must be
322 * stored on concrete vdevs. Therefore, we must dirty anything that
323 * is read before spa_remove_init(). Specifically, the
324 * spa_config_object. (Note that although we already modified the
325 * spa_config_object in spa_sync_removing_state, that may not have
326 * modified all blocks of the object.)
327 */
328 dmu_object_info_t doi;
329 VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
330 for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
331 dmu_buf_t *dbuf;
332 VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
333 offset, FTAG, &dbuf, 0));
334 dmu_buf_will_dirty(dbuf, tx);
335 offset += dbuf->db_size;
336 dmu_buf_rele(dbuf, FTAG);
337 }
338
339 /*
340 * Now that we've allocated the im_object, dirty the vdev to ensure
341 * that the object gets written to the config on disk.
342 */
343 vdev_config_dirty(vd);
344
345 zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
346 "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
347 vic->vic_mapping_object);
348
349 spa_history_log_internal(spa, "vdev remove started", tx,
350 "%s vdev %llu %s", spa_name(spa), vd->vdev_id,
351 (vd->vdev_path != NULL) ? vd->vdev_path : "-");
352 /*
353 * Setting spa_vdev_removal causes subsequent frees to call
354 * free_from_removing_vdev(). Note that we don't need any locking
355 * because we are the sync thread, and metaslab_free_impl() is only
356 * called from syncing context (potentially from a zio taskq thread,
357 * but in any case only when there are outstanding free i/os, which
358 * there are not).
359 */
360 ASSERT3P(spa->spa_vdev_removal, ==, NULL);
361 spa->spa_vdev_removal = svr;
362 svr->svr_thread = thread_create(NULL, 0,
363 spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri);
364 }
365
366 /*
367 * When we are opening a pool, we must read the mapping for each
368 * indirect vdev in order from most recently removed to least
369 * recently removed. We do this because the blocks for the mapping
370 * of older indirect vdevs may be stored on more recently removed vdevs.
371 * In order to read each indirect mapping object, we must have
372 * initialized all more recently removed vdevs.
373 */
374 int
375 spa_remove_init(spa_t *spa)
376 {
377 int error;
378
379 error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
380 DMU_POOL_DIRECTORY_OBJECT,
381 DMU_POOL_REMOVING, sizeof (uint64_t),
382 sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
383 &spa->spa_removing_phys);
384
385 if (error == ENOENT) {
386 spa->spa_removing_phys.sr_state = DSS_NONE;
387 spa->spa_removing_phys.sr_removing_vdev = -1;
388 spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
389 spa->spa_indirect_vdevs_loaded = B_TRUE;
390 return (0);
391 } else if (error != 0) {
392 return (error);
393 }
394
395 if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
396 /*
397 * We are currently removing a vdev. Create and
398 * initialize a spa_vdev_removal_t from the bonus
399 * buffer of the removing vdevs vdev_im_object, and
400 * initialize its partial mapping.
401 */
402 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
403 vdev_t *vd = vdev_lookup_top(spa,
404 spa->spa_removing_phys.sr_removing_vdev);
405
406 if (vd == NULL) {
407 spa_config_exit(spa, SCL_STATE, FTAG);
408 return (EINVAL);
409 }
410
411 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
412
413 ASSERT(vdev_is_concrete(vd));
414 spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
415 ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id);
416 ASSERT(vd->vdev_removing);
417
418 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
419 spa->spa_meta_objset, vic->vic_mapping_object);
420 vd->vdev_indirect_births = vdev_indirect_births_open(
421 spa->spa_meta_objset, vic->vic_births_object);
422 spa_config_exit(spa, SCL_STATE, FTAG);
423
424 spa->spa_vdev_removal = svr;
425 }
426
427 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
428 uint64_t indirect_vdev_id =
429 spa->spa_removing_phys.sr_prev_indirect_vdev;
430 while (indirect_vdev_id != UINT64_MAX) {
431 vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
432 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
433
434 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
435 vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
436 spa->spa_meta_objset, vic->vic_mapping_object);
437 vd->vdev_indirect_births = vdev_indirect_births_open(
438 spa->spa_meta_objset, vic->vic_births_object);
439
440 indirect_vdev_id = vic->vic_prev_indirect_vdev;
441 }
442 spa_config_exit(spa, SCL_STATE, FTAG);
443
444 /*
445 * Now that we've loaded all the indirect mappings, we can allow
446 * reads from other blocks (e.g. via predictive prefetch).
447 */
448 spa->spa_indirect_vdevs_loaded = B_TRUE;
449 return (0);
450 }
451
452 void
453 spa_restart_removal(spa_t *spa)
454 {
455 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
456
457 if (svr == NULL)
458 return;
459
460 /*
461 * In general when this function is called there is no
462 * removal thread running. The only scenario where this
463 * is not true is during spa_import() where this function
464 * is called twice [once from spa_import_impl() and
465 * spa_async_resume()]. Thus, in the scenario where we
466 * import a pool that has an ongoing removal we don't
467 * want to spawn a second thread.
468 */
469 if (svr->svr_thread != NULL)
470 return;
471
472 if (!spa_writeable(spa))
473 return;
474
475 zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id);
476 svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa,
477 0, &p0, TS_RUN, minclsyspri);
478 }
479
480 /*
481 * Process freeing from a device which is in the middle of being removed.
482 * We must handle this carefully so that we attempt to copy freed data,
483 * and we correctly free already-copied data.
484 */
485 void
486 free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size)
487 {
488 spa_t *spa = vd->vdev_spa;
489 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
490 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
491 uint64_t txg = spa_syncing_txg(spa);
492 uint64_t max_offset_yet = 0;
493
494 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
495 ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
496 vdev_indirect_mapping_object(vim));
497 ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id);
498
499 mutex_enter(&svr->svr_lock);
500
501 /*
502 * Remove the segment from the removing vdev's spacemap. This
503 * ensures that we will not attempt to copy this space (if the
504 * removal thread has not yet visited it), and also ensures
505 * that we know what is actually allocated on the new vdevs
506 * (needed if we cancel the removal).
507 *
508 * Note: we must do the metaslab_free_concrete() with the svr_lock
509 * held, so that the remove_thread can not load this metaslab and then
510 * visit this offset between the time that we metaslab_free_concrete()
511 * and when we check to see if it has been visited.
512 *
513 * Note: The checkpoint flag is set to false as having/taking
514 * a checkpoint and removing a device can't happen at the same
515 * time.
516 */
517 ASSERT(!spa_has_checkpoint(spa));
518 metaslab_free_concrete(vd, offset, size, B_FALSE);
519
520 uint64_t synced_size = 0;
521 uint64_t synced_offset = 0;
522 uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
523 if (offset < max_offset_synced) {
524 /*
525 * The mapping for this offset is already on disk.
526 * Free from the new location.
527 *
528 * Note that we use svr_max_synced_offset because it is
529 * updated atomically with respect to the in-core mapping.
530 * By contrast, vim_max_offset is not.
531 *
532 * This block may be split between a synced entry and an
533 * in-flight or unvisited entry. Only process the synced
534 * portion of it here.
535 */
536 synced_size = MIN(size, max_offset_synced - offset);
537 synced_offset = offset;
538
539 ASSERT3U(max_offset_yet, <=, max_offset_synced);
540 max_offset_yet = max_offset_synced;
541
542 DTRACE_PROBE3(remove__free__synced,
543 spa_t *, spa,
544 uint64_t, offset,
545 uint64_t, synced_size);
546
547 size -= synced_size;
548 offset += synced_size;
549 }
550
551 /*
552 * Look at all in-flight txgs starting from the currently syncing one
553 * and see if a section of this free is being copied. By starting from
554 * this txg and iterating forward, we might find that this region
555 * was copied in two different txgs and handle it appropriately.
556 */
557 for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
558 int txgoff = (txg + i) & TXG_MASK;
559 if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
560 /*
561 * The mapping for this offset is in flight, and
562 * will be synced in txg+i.
563 */
564 uint64_t inflight_size = MIN(size,
565 svr->svr_max_offset_to_sync[txgoff] - offset);
566
567 DTRACE_PROBE4(remove__free__inflight,
568 spa_t *, spa,
569 uint64_t, offset,
570 uint64_t, inflight_size,
571 uint64_t, txg + i);
572
573 /*
574 * We copy data in order of increasing offset.
575 * Therefore the max_offset_to_sync[] must increase
576 * (or be zero, indicating that nothing is being
577 * copied in that txg).
578 */
579 if (svr->svr_max_offset_to_sync[txgoff] != 0) {
580 ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
581 >=, max_offset_yet);
582 max_offset_yet =
583 svr->svr_max_offset_to_sync[txgoff];
584 }
585
586 /*
587 * We've already committed to copying this segment:
588 * we have allocated space elsewhere in the pool for
589 * it and have an IO outstanding to copy the data. We
590 * cannot free the space before the copy has
591 * completed, or else the copy IO might overwrite any
592 * new data. To free that space, we record the
593 * segment in the appropriate svr_frees tree and free
594 * the mapped space later, in the txg where we have
595 * completed the copy and synced the mapping (see
596 * vdev_mapping_sync).
597 */
598 range_tree_add(svr->svr_frees[txgoff],
599 offset, inflight_size);
600 size -= inflight_size;
601 offset += inflight_size;
602
603 /*
604 * This space is already accounted for as being
605 * done, because it is being copied in txg+i.
606 * However, if i!=0, then it is being copied in
607 * a future txg. If we crash after this txg
608 * syncs but before txg+i syncs, then the space
609 * will be free. Therefore we must account
610 * for the space being done in *this* txg
611 * (when it is freed) rather than the future txg
612 * (when it will be copied).
613 */
614 ASSERT3U(svr->svr_bytes_done[txgoff], >=,
615 inflight_size);
616 svr->svr_bytes_done[txgoff] -= inflight_size;
617 svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
618 }
619 }
620 ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
621
622 if (size > 0) {
623 /*
624 * The copy thread has not yet visited this offset. Ensure
625 * that it doesn't.
626 */
627
628 DTRACE_PROBE3(remove__free__unvisited,
629 spa_t *, spa,
630 uint64_t, offset,
631 uint64_t, size);
632
633 if (svr->svr_allocd_segs != NULL)
634 range_tree_clear(svr->svr_allocd_segs, offset, size);
635
636 /*
637 * Since we now do not need to copy this data, for
638 * accounting purposes we have done our job and can count
639 * it as completed.
640 */
641 svr->svr_bytes_done[txg & TXG_MASK] += size;
642 }
643 mutex_exit(&svr->svr_lock);
644
645 /*
646 * Now that we have dropped svr_lock, process the synced portion
647 * of this free.
648 */
649 if (synced_size > 0) {
650 vdev_indirect_mark_obsolete(vd, synced_offset, synced_size);
651
652 /*
653 * Note: this can only be called from syncing context,
654 * and the vdev_indirect_mapping is only changed from the
655 * sync thread, so we don't need svr_lock while doing
656 * metaslab_free_impl_cb.
657 */
658 boolean_t checkpoint = B_FALSE;
659 vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
660 metaslab_free_impl_cb, &checkpoint);
661 }
662 }
663
664 /*
665 * Stop an active removal and update the spa_removing phys.
666 */
667 static void
668 spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
669 {
670 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
671 ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
672
673 /* Ensure the removal thread has completed before we free the svr. */
674 spa_vdev_remove_suspend(spa);
675
676 ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
677
678 if (state == DSS_FINISHED) {
679 spa_removing_phys_t *srp = &spa->spa_removing_phys;
680 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
681 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
682
683 if (srp->sr_prev_indirect_vdev != -1) {
684 vdev_t *pvd;
685 pvd = vdev_lookup_top(spa,
686 srp->sr_prev_indirect_vdev);
687 ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
688 }
689
690 vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
691 srp->sr_prev_indirect_vdev = vd->vdev_id;
692 }
693 spa->spa_removing_phys.sr_state = state;
694 spa->spa_removing_phys.sr_end_time = gethrestime_sec();
695
696 spa->spa_vdev_removal = NULL;
697 spa_vdev_removal_destroy(svr);
698
699 spa_sync_removing_state(spa, tx);
700
701 vdev_config_dirty(spa->spa_root_vdev);
702 }
703
704 static void
705 free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
706 {
707 vdev_t *vd = arg;
708 vdev_indirect_mark_obsolete(vd, offset, size);
709 boolean_t checkpoint = B_FALSE;
710 vdev_indirect_ops.vdev_op_remap(vd, offset, size,
711 metaslab_free_impl_cb, &checkpoint);
712 }
713
714 /*
715 * On behalf of the removal thread, syncs an incremental bit more of
716 * the indirect mapping to disk and updates the in-memory mapping.
717 * Called as a sync task in every txg that the removal thread makes progress.
718 */
719 static void
720 vdev_mapping_sync(void *arg, dmu_tx_t *tx)
721 {
722 spa_vdev_removal_t *svr = arg;
723 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
724 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
725 ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config);
726 uint64_t txg = dmu_tx_get_txg(tx);
727 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
728
729 ASSERT(vic->vic_mapping_object != 0);
730 ASSERT3U(txg, ==, spa_syncing_txg(spa));
731
732 vdev_indirect_mapping_add_entries(vim,
733 &svr->svr_new_segments[txg & TXG_MASK], tx);
734 vdev_indirect_births_add_entry(vd->vdev_indirect_births,
735 vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
736
737 /*
738 * Free the copied data for anything that was freed while the
739 * mapping entries were in flight.
740 */
741 mutex_enter(&svr->svr_lock);
742 range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
743 free_mapped_segment_cb, vd);
744 ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
745 vdev_indirect_mapping_max_offset(vim));
746 svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
747 mutex_exit(&svr->svr_lock);
748
749 spa_sync_removing_state(spa, tx);
750 }
751
752 typedef struct vdev_copy_segment_arg {
753 spa_t *vcsa_spa;
754 dva_t *vcsa_dest_dva;
755 uint64_t vcsa_txg;
756 range_tree_t *vcsa_obsolete_segs;
757 } vdev_copy_segment_arg_t;
758
759 static void
760 unalloc_seg(void *arg, uint64_t start, uint64_t size)
761 {
762 vdev_copy_segment_arg_t *vcsa = arg;
763 spa_t *spa = vcsa->vcsa_spa;
764 blkptr_t bp = { { { {0} } } };
765
766 BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL);
767 BP_SET_LSIZE(&bp, size);
768 BP_SET_PSIZE(&bp, size);
769 BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF);
770 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF);
771 BP_SET_TYPE(&bp, DMU_OT_NONE);
772 BP_SET_LEVEL(&bp, 0);
773 BP_SET_DEDUP(&bp, 0);
774 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
775
776 DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva));
777 DVA_SET_OFFSET(&bp.blk_dva[0],
778 DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start);
779 DVA_SET_ASIZE(&bp.blk_dva[0], size);
780
781 zio_free(spa, vcsa->vcsa_txg, &bp);
782 }
783
784 /*
785 * All reads and writes associated with a call to spa_vdev_copy_segment()
786 * are done.
787 */
788 static void
789 spa_vdev_copy_segment_done(zio_t *zio)
790 {
791 vdev_copy_segment_arg_t *vcsa = zio->io_private;
792
793 range_tree_vacate(vcsa->vcsa_obsolete_segs,
794 unalloc_seg, vcsa);
795 range_tree_destroy(vcsa->vcsa_obsolete_segs);
796 kmem_free(vcsa, sizeof (*vcsa));
797
798 spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa);
799 }
800
801 /*
802 * The write of the new location is done.
803 */
804 static void
805 spa_vdev_copy_segment_write_done(zio_t *zio)
806 {
807 vdev_copy_arg_t *vca = zio->io_private;
808
809 abd_free(zio->io_abd);
810
811 mutex_enter(&vca->vca_lock);
812 vca->vca_outstanding_bytes -= zio->io_size;
813
814 if (zio->io_error != 0)
815 vca->vca_write_error_bytes += zio->io_size;
816
817 cv_signal(&vca->vca_cv);
818 mutex_exit(&vca->vca_lock);
819 }
820
821 /*
822 * The read of the old location is done. The parent zio is the write to
823 * the new location. Allow it to start.
824 */
825 static void
826 spa_vdev_copy_segment_read_done(zio_t *zio)
827 {
828 vdev_copy_arg_t *vca = zio->io_private;
829
830 if (zio->io_error != 0) {
831 mutex_enter(&vca->vca_lock);
832 vca->vca_read_error_bytes += zio->io_size;
833 mutex_exit(&vca->vca_lock);
834 }
835
836 zio_nowait(zio_unique_parent(zio));
837 }
838
839 /*
840 * If the old and new vdevs are mirrors, we will read both sides of the old
841 * mirror, and write each copy to the corresponding side of the new mirror.
842 * If the old and new vdevs have a different number of children, we will do
843 * this as best as possible. Since we aren't verifying checksums, this
844 * ensures that as long as there's a good copy of the data, we'll have a
845 * good copy after the removal, even if there's silent damage to one side
846 * of the mirror. If we're removing a mirror that has some silent damage,
847 * we'll have exactly the same damage in the new location (assuming that
848 * the new location is also a mirror).
849 *
850 * We accomplish this by creating a tree of zio_t's, with as many writes as
851 * there are "children" of the new vdev (a non-redundant vdev counts as one
852 * child, a 2-way mirror has 2 children, etc). Each write has an associated
853 * read from a child of the old vdev. Typically there will be the same
854 * number of children of the old and new vdevs. However, if there are more
855 * children of the new vdev, some child(ren) of the old vdev will be issued
856 * multiple reads. If there are more children of the old vdev, some copies
857 * will be dropped.
858 *
859 * For example, the tree of zio_t's for a 2-way mirror is:
860 *
861 * null
862 * / \
863 * write(new vdev, child 0) write(new vdev, child 1)
864 * | |
865 * read(old vdev, child 0) read(old vdev, child 1)
866 *
867 * Child zio's complete before their parents complete. However, zio's
868 * created with zio_vdev_child_io() may be issued before their children
869 * complete. In this case we need to make sure that the children (reads)
870 * complete before the parents (writes) are *issued*. We do this by not
871 * calling zio_nowait() on each write until its corresponding read has
872 * completed.
873 *
874 * The spa_config_lock must be held while zio's created by
875 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does
876 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
877 * zio is needed to release the spa_config_lock after all the reads and
878 * writes complete. (Note that we can't grab the config lock for each read,
879 * because it is not reentrant - we could deadlock with a thread waiting
880 * for a write lock.)
881 */
882 static void
883 spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio,
884 vdev_t *source_vd, uint64_t source_offset,
885 vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size)
886 {
887 ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0);
888
889 /*
890 * If the destination child in unwritable then there is no point
891 * in issuing the source reads which cannot be written.
892 */
893 if (!vdev_writeable(dest_child_vd))
894 return;
895
896 mutex_enter(&vca->vca_lock);
897 vca->vca_outstanding_bytes += size;
898 mutex_exit(&vca->vca_lock);
899
900 abd_t *abd = abd_alloc_for_io(size, B_FALSE);
901
902 vdev_t *source_child_vd = NULL;
903 if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) {
904 /*
905 * Source and dest are both mirrors. Copy from the same
906 * child id as we are copying to (wrapping around if there
907 * are more dest children than source children). If the
908 * preferred source child is unreadable select another.
909 */
910 for (int i = 0; i < source_vd->vdev_children; i++) {
911 source_child_vd = source_vd->vdev_child[
912 (dest_id + i) % source_vd->vdev_children];
913 if (vdev_readable(source_child_vd))
914 break;
915 }
916 } else {
917 source_child_vd = source_vd;
918 }
919
920 /*
921 * There should always be at least one readable source child or
922 * the pool would be in a suspended state. Somehow selecting an
923 * unreadable child would result in IO errors, the removal process
924 * being cancelled, and the pool reverting to its pre-removal state.
925 */
926 ASSERT3P(source_child_vd, !=, NULL);
927
928 zio_t *write_zio = zio_vdev_child_io(nzio, NULL,
929 dest_child_vd, dest_offset, abd, size,
930 ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL,
931 ZIO_FLAG_CANFAIL,
932 spa_vdev_copy_segment_write_done, vca);
933
934 zio_nowait(zio_vdev_child_io(write_zio, NULL,
935 source_child_vd, source_offset, abd, size,
936 ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL,
937 ZIO_FLAG_CANFAIL,
938 spa_vdev_copy_segment_read_done, vca));
939 }
940
941 /*
942 * Allocate a new location for this segment, and create the zio_t's to
943 * read from the old location and write to the new location.
944 */
945 static int
946 spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs,
947 uint64_t maxalloc, uint64_t txg,
948 vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
949 {
950 metaslab_group_t *mg = vd->vdev_mg;
951 spa_t *spa = vd->vdev_spa;
952 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
953 vdev_indirect_mapping_entry_t *entry;
954 dva_t dst = {{ 0 }};
955 uint64_t start = range_tree_min(segs);
956 ASSERT0(P2PHASE(start, 1 << spa->spa_min_ashift));
957
958 ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE);
959 ASSERT0(P2PHASE(maxalloc, 1 << spa->spa_min_ashift));
960
961 uint64_t size = range_tree_span(segs);
962 if (range_tree_span(segs) > maxalloc) {
963 /*
964 * We can't allocate all the segments. Prefer to end
965 * the allocation at the end of a segment, thus avoiding
966 * additional split blocks.
967 */
968 range_seg_t search;
969 avl_index_t where;
970 search.rs_start = start + maxalloc;
971 search.rs_end = search.rs_start;
972 range_seg_t *rs = avl_find(&segs->rt_root, &search, &where);
973 if (rs == NULL) {
974 rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE);
975 } else {
976 rs = AVL_PREV(&segs->rt_root, rs);
977 }
978 if (rs != NULL) {
979 size = rs->rs_end - start;
980 } else {
981 /*
982 * There are no segments that end before maxalloc.
983 * I.e. the first segment is larger than maxalloc,
984 * so we must split it.
985 */
986 size = maxalloc;
987 }
988 }
989 ASSERT3U(size, <=, maxalloc);
990 ASSERT0(P2PHASE(size, 1 << spa->spa_min_ashift));
991
992 /*
993 * An allocation class might not have any remaining vdevs or space
994 */
995 metaslab_class_t *mc = mg->mg_class;
996 if (mc != spa_normal_class(spa) && mc->mc_groups <= 1)
997 mc = spa_normal_class(spa);
998 int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0,
999 zal, 0);
1000 if (error == ENOSPC && mc != spa_normal_class(spa)) {
1001 error = metaslab_alloc_dva(spa, spa_normal_class(spa), size,
1002 &dst, 0, NULL, txg, 0, zal, 0);
1003 }
1004 if (error != 0)
1005 return (error);
1006
1007 /*
1008 * Determine the ranges that are not actually needed. Offsets are
1009 * relative to the start of the range to be copied (i.e. relative to the
1010 * local variable "start").
1011 */
1012 range_tree_t *obsolete_segs = range_tree_create(NULL, NULL);
1013
1014 range_seg_t *rs = avl_first(&segs->rt_root);
1015 ASSERT3U(rs->rs_start, ==, start);
1016 uint64_t prev_seg_end = rs->rs_end;
1017 while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) {
1018 if (rs->rs_start >= start + size) {
1019 break;
1020 } else {
1021 range_tree_add(obsolete_segs,
1022 prev_seg_end - start,
1023 rs->rs_start - prev_seg_end);
1024 }
1025 prev_seg_end = rs->rs_end;
1026 }
1027 /* We don't end in the middle of an obsolete range */
1028 ASSERT3U(start + size, <=, prev_seg_end);
1029
1030 range_tree_clear(segs, start, size);
1031
1032 /*
1033 * We can't have any padding of the allocated size, otherwise we will
1034 * misunderstand what's allocated, and the size of the mapping. We
1035 * prevent padding by ensuring that all devices in the pool have the
1036 * same ashift, and the allocation size is a multiple of the ashift.
1037 */
1038 VERIFY3U(DVA_GET_ASIZE(&dst), ==, size);
1039
1040 entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
1041 DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
1042 entry->vime_mapping.vimep_dst = dst;
1043 if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
1044 entry->vime_obsolete_count = range_tree_space(obsolete_segs);
1045 }
1046
1047 vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP);
1048 vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
1049 vcsa->vcsa_obsolete_segs = obsolete_segs;
1050 vcsa->vcsa_spa = spa;
1051 vcsa->vcsa_txg = txg;
1052
1053 /*
1054 * See comment before spa_vdev_copy_one_child().
1055 */
1056 spa_config_enter(spa, SCL_STATE, spa, RW_READER);
1057 zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL,
1058 spa_vdev_copy_segment_done, vcsa, 0);
1059 vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst));
1060 if (dest_vd->vdev_ops == &vdev_mirror_ops) {
1061 for (int i = 0; i < dest_vd->vdev_children; i++) {
1062 vdev_t *child = dest_vd->vdev_child[i];
1063 spa_vdev_copy_one_child(vca, nzio, vd, start,
1064 child, DVA_GET_OFFSET(&dst), i, size);
1065 }
1066 } else {
1067 spa_vdev_copy_one_child(vca, nzio, vd, start,
1068 dest_vd, DVA_GET_OFFSET(&dst), -1, size);
1069 }
1070 zio_nowait(nzio);
1071
1072 list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
1073 ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
1074 vdev_dirty(vd, 0, NULL, txg);
1075
1076 return (0);
1077 }
1078
1079 /*
1080 * Complete the removal of a toplevel vdev. This is called as a
1081 * synctask in the same txg that we will sync out the new config (to the
1082 * MOS object) which indicates that this vdev is indirect.
1083 */
1084 static void
1085 vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
1086 {
1087 spa_vdev_removal_t *svr = arg;
1088 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1089 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1090
1091 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
1092
1093 for (int i = 0; i < TXG_SIZE; i++) {
1094 ASSERT0(svr->svr_bytes_done[i]);
1095 }
1096
1097 ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
1098 spa->spa_removing_phys.sr_to_copy);
1099
1100 vdev_destroy_spacemaps(vd, tx);
1101
1102 /* destroy leaf zaps, if any */
1103 ASSERT3P(svr->svr_zaplist, !=, NULL);
1104 for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
1105 pair != NULL;
1106 pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
1107 vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
1108 }
1109 fnvlist_free(svr->svr_zaplist);
1110
1111 spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
1112 /* vd->vdev_path is not available here */
1113 spa_history_log_internal(spa, "vdev remove completed", tx,
1114 "%s vdev %llu", spa_name(spa), vd->vdev_id);
1115 }
1116
1117 static void
1118 vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
1119 {
1120 ASSERT3P(zlist, !=, NULL);
1121 ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
1122
1123 if (vd->vdev_leaf_zap != 0) {
1124 char zkey[32];
1125 (void) snprintf(zkey, sizeof (zkey), "%s-%llu",
1126 VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap);
1127 fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
1128 }
1129
1130 for (uint64_t id = 0; id < vd->vdev_children; id++) {
1131 vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
1132 }
1133 }
1134
1135 static void
1136 vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
1137 {
1138 vdev_t *ivd;
1139 dmu_tx_t *tx;
1140 spa_t *spa = vd->vdev_spa;
1141 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1142
1143 /*
1144 * First, build a list of leaf zaps to be destroyed.
1145 * This is passed to the sync context thread,
1146 * which does the actual unlinking.
1147 */
1148 svr->svr_zaplist = fnvlist_alloc();
1149 vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
1150
1151 ivd = vdev_add_parent(vd, &vdev_indirect_ops);
1152 ivd->vdev_removing = 0;
1153
1154 vd->vdev_leaf_zap = 0;
1155
1156 vdev_remove_child(ivd, vd);
1157 vdev_compact_children(ivd);
1158
1159 ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
1160
1161 mutex_enter(&svr->svr_lock);
1162 svr->svr_thread = NULL;
1163 cv_broadcast(&svr->svr_cv);
1164 mutex_exit(&svr->svr_lock);
1165
1166 /* After this, we can not use svr. */
1167 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1168 dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
1169 0, ZFS_SPACE_CHECK_NONE, tx);
1170 dmu_tx_commit(tx);
1171 }
1172
1173 /*
1174 * Complete the removal of a toplevel vdev. This is called in open
1175 * context by the removal thread after we have copied all vdev's data.
1176 */
1177 static void
1178 vdev_remove_complete(spa_t *spa)
1179 {
1180 uint64_t txg;
1181
1182 /*
1183 * Wait for any deferred frees to be synced before we call
1184 * vdev_metaslab_fini()
1185 */
1186 txg_wait_synced(spa->spa_dsl_pool, 0);
1187 txg = spa_vdev_enter(spa);
1188 vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id);
1189 ASSERT3P(vd->vdev_initialize_thread, ==, NULL);
1190 ASSERT3P(vd->vdev_trim_thread, ==, NULL);
1191 ASSERT3P(vd->vdev_autotrim_thread, ==, NULL);
1192
1193 sysevent_t *ev = spa_event_create(spa, vd, NULL,
1194 ESC_ZFS_VDEV_REMOVE_DEV);
1195
1196 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
1197 vd->vdev_id, txg);
1198
1199 /*
1200 * Discard allocation state.
1201 */
1202 if (vd->vdev_mg != NULL) {
1203 vdev_metaslab_fini(vd);
1204 metaslab_group_destroy(vd->vdev_mg);
1205 vd->vdev_mg = NULL;
1206 }
1207 ASSERT0(vd->vdev_stat.vs_space);
1208 ASSERT0(vd->vdev_stat.vs_dspace);
1209
1210 vdev_remove_replace_with_indirect(vd, txg);
1211
1212 /*
1213 * We now release the locks, allowing spa_sync to run and finish the
1214 * removal via vdev_remove_complete_sync in syncing context.
1215 *
1216 * Note that we hold on to the vdev_t that has been replaced. Since
1217 * it isn't part of the vdev tree any longer, it can't be concurrently
1218 * manipulated, even while we don't have the config lock.
1219 */
1220 (void) spa_vdev_exit(spa, NULL, txg, 0);
1221
1222 /*
1223 * Top ZAP should have been transferred to the indirect vdev in
1224 * vdev_remove_replace_with_indirect.
1225 */
1226 ASSERT0(vd->vdev_top_zap);
1227
1228 /*
1229 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
1230 */
1231 ASSERT0(vd->vdev_leaf_zap);
1232
1233 txg = spa_vdev_enter(spa);
1234 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1235 /*
1236 * Request to update the config and the config cachefile.
1237 */
1238 vdev_config_dirty(spa->spa_root_vdev);
1239 (void) spa_vdev_exit(spa, vd, txg, 0);
1240
1241 if (ev != NULL)
1242 spa_event_post(ev);
1243 }
1244
1245 /*
1246 * Evacuates a segment of size at most max_alloc from the vdev
1247 * via repeated calls to spa_vdev_copy_segment. If an allocation
1248 * fails, the pool is probably too fragmented to handle such a
1249 * large size, so decrease max_alloc so that the caller will not try
1250 * this size again this txg.
1251 */
1252 static void
1253 spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
1254 uint64_t *max_alloc, dmu_tx_t *tx)
1255 {
1256 uint64_t txg = dmu_tx_get_txg(tx);
1257 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1258
1259 mutex_enter(&svr->svr_lock);
1260
1261 /*
1262 * Determine how big of a chunk to copy. We can allocate up
1263 * to max_alloc bytes, and we can span up to vdev_removal_max_span
1264 * bytes of unallocated space at a time. "segs" will track the
1265 * allocated segments that we are copying. We may also be copying
1266 * free segments (of up to vdev_removal_max_span bytes).
1267 */
1268 range_tree_t *segs = range_tree_create(NULL, NULL);
1269 for (;;) {
1270 range_seg_t *rs = range_tree_first(svr->svr_allocd_segs);
1271
1272 if (rs == NULL)
1273 break;
1274
1275 uint64_t seg_length;
1276
1277 if (range_tree_is_empty(segs)) {
1278 /* need to truncate the first seg based on max_alloc */
1279 seg_length =
1280 MIN(rs->rs_end - rs->rs_start, *max_alloc);
1281 } else {
1282 if (rs->rs_start - range_tree_max(segs) >
1283 vdev_removal_max_span) {
1284 /*
1285 * Including this segment would cause us to
1286 * copy a larger unneeded chunk than is allowed.
1287 */
1288 break;
1289 } else if (rs->rs_end - range_tree_min(segs) >
1290 *max_alloc) {
1291 /*
1292 * This additional segment would extend past
1293 * max_alloc. Rather than splitting this
1294 * segment, leave it for the next mapping.
1295 */
1296 break;
1297 } else {
1298 seg_length = rs->rs_end - rs->rs_start;
1299 }
1300 }
1301
1302 range_tree_add(segs, rs->rs_start, seg_length);
1303 range_tree_remove(svr->svr_allocd_segs,
1304 rs->rs_start, seg_length);
1305 }
1306
1307 if (range_tree_is_empty(segs)) {
1308 mutex_exit(&svr->svr_lock);
1309 range_tree_destroy(segs);
1310 return;
1311 }
1312
1313 if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
1314 dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
1315 svr, 0, ZFS_SPACE_CHECK_NONE, tx);
1316 }
1317
1318 svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs);
1319
1320 /*
1321 * Note: this is the amount of *allocated* space
1322 * that we are taking care of each txg.
1323 */
1324 svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs);
1325
1326 mutex_exit(&svr->svr_lock);
1327
1328 zio_alloc_list_t zal;
1329 metaslab_trace_init(&zal);
1330 uint64_t thismax = SPA_MAXBLOCKSIZE;
1331 while (!range_tree_is_empty(segs)) {
1332 int error = spa_vdev_copy_segment(vd,
1333 segs, thismax, txg, vca, &zal);
1334
1335 if (error == ENOSPC) {
1336 /*
1337 * Cut our segment in half, and don't try this
1338 * segment size again this txg. Note that the
1339 * allocation size must be aligned to the highest
1340 * ashift in the pool, so that the allocation will
1341 * not be padded out to a multiple of the ashift,
1342 * which could cause us to think that this mapping
1343 * is larger than we intended.
1344 */
1345 ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
1346 ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
1347 uint64_t attempted =
1348 MIN(range_tree_span(segs), thismax);
1349 thismax = P2ROUNDUP(attempted / 2,
1350 1 << spa->spa_max_ashift);
1351 /*
1352 * The minimum-size allocation can not fail.
1353 */
1354 ASSERT3U(attempted, >, 1 << spa->spa_max_ashift);
1355 *max_alloc = attempted - (1 << spa->spa_max_ashift);
1356 } else {
1357 ASSERT0(error);
1358
1359 /*
1360 * We've performed an allocation, so reset the
1361 * alloc trace list.
1362 */
1363 metaslab_trace_fini(&zal);
1364 metaslab_trace_init(&zal);
1365 }
1366 }
1367 metaslab_trace_fini(&zal);
1368 range_tree_destroy(segs);
1369 }
1370
1371 /*
1372 * The size of each removal mapping is limited by the tunable
1373 * zfs_remove_max_segment, but we must adjust this to be a multiple of the
1374 * pool's ashift, so that we don't try to split individual sectors regardless
1375 * of the tunable value. (Note that device removal requires that all devices
1376 * have the same ashift, so there's no difference between spa_min_ashift and
1377 * spa_max_ashift.) The raw tunable should not be used elsewhere.
1378 */
1379 uint64_t
1380 spa_remove_max_segment(spa_t *spa)
1381 {
1382 return (P2ROUNDUP(zfs_remove_max_segment, 1 << spa->spa_max_ashift));
1383 }
1384
1385 /*
1386 * The removal thread operates in open context. It iterates over all
1387 * allocated space in the vdev, by loading each metaslab's spacemap.
1388 * For each contiguous segment of allocated space (capping the segment
1389 * size at SPA_MAXBLOCKSIZE), we:
1390 * - Allocate space for it on another vdev.
1391 * - Create a new mapping from the old location to the new location
1392 * (as a record in svr_new_segments).
1393 * - Initiate a physical read zio to get the data off the removing disk.
1394 * - In the read zio's done callback, initiate a physical write zio to
1395 * write it to the new vdev.
1396 * Note that all of this will take effect when a particular TXG syncs.
1397 * The sync thread ensures that all the phys reads and writes for the syncing
1398 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
1399 * (see vdev_mapping_sync()).
1400 */
1401 static void
1402 spa_vdev_remove_thread(void *arg)
1403 {
1404 spa_t *spa = arg;
1405 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1406 vdev_copy_arg_t vca;
1407 uint64_t max_alloc = spa_remove_max_segment(spa);
1408 uint64_t last_txg = 0;
1409
1410 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1411 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1412 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1413 uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
1414
1415 ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
1416 ASSERT(vdev_is_concrete(vd));
1417 ASSERT(vd->vdev_removing);
1418 ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
1419 ASSERT(vim != NULL);
1420
1421 mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
1422 cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
1423 vca.vca_outstanding_bytes = 0;
1424 vca.vca_read_error_bytes = 0;
1425 vca.vca_write_error_bytes = 0;
1426
1427 mutex_enter(&svr->svr_lock);
1428
1429 /*
1430 * Start from vim_max_offset so we pick up where we left off
1431 * if we are restarting the removal after opening the pool.
1432 */
1433 uint64_t msi;
1434 for (msi = start_offset >> vd->vdev_ms_shift;
1435 msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
1436 metaslab_t *msp = vd->vdev_ms[msi];
1437 ASSERT3U(msi, <=, vd->vdev_ms_count);
1438
1439 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1440
1441 mutex_enter(&msp->ms_sync_lock);
1442 mutex_enter(&msp->ms_lock);
1443
1444 /*
1445 * Assert nothing in flight -- ms_*tree is empty.
1446 */
1447 for (int i = 0; i < TXG_SIZE; i++) {
1448 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1449 }
1450
1451 /*
1452 * If the metaslab has ever been allocated from (ms_sm!=NULL),
1453 * read the allocated segments from the space map object
1454 * into svr_allocd_segs. Since we do this while holding
1455 * svr_lock and ms_sync_lock, concurrent frees (which
1456 * would have modified the space map) will wait for us
1457 * to finish loading the spacemap, and then take the
1458 * appropriate action (see free_from_removing_vdev()).
1459 */
1460 if (msp->ms_sm != NULL) {
1461 VERIFY0(space_map_load(msp->ms_sm,
1462 svr->svr_allocd_segs, SM_ALLOC));
1463
1464 range_tree_walk(msp->ms_freeing,
1465 range_tree_remove, svr->svr_allocd_segs);
1466
1467 /*
1468 * When we are resuming from a paused removal (i.e.
1469 * when importing a pool with a removal in progress),
1470 * discard any state that we have already processed.
1471 */
1472 range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
1473 }
1474 mutex_exit(&msp->ms_lock);
1475 mutex_exit(&msp->ms_sync_lock);
1476
1477 vca.vca_msp = msp;
1478 zfs_dbgmsg("copying %llu segments for metaslab %llu",
1479 avl_numnodes(&svr->svr_allocd_segs->rt_root),
1480 msp->ms_id);
1481
1482 while (!svr->svr_thread_exit &&
1483 !range_tree_is_empty(svr->svr_allocd_segs)) {
1484
1485 mutex_exit(&svr->svr_lock);
1486
1487 /*
1488 * We need to periodically drop the config lock so that
1489 * writers can get in. Additionally, we can't wait
1490 * for a txg to sync while holding a config lock
1491 * (since a waiting writer could cause a 3-way deadlock
1492 * with the sync thread, which also gets a config
1493 * lock for reader). So we can't hold the config lock
1494 * while calling dmu_tx_assign().
1495 */
1496 spa_config_exit(spa, SCL_CONFIG, FTAG);
1497
1498 /*
1499 * This delay will pause the removal around the point
1500 * specified by zfs_removal_suspend_progress. We do this
1501 * solely from the test suite or during debugging.
1502 */
1503 uint64_t bytes_copied =
1504 spa->spa_removing_phys.sr_copied;
1505 for (int i = 0; i < TXG_SIZE; i++)
1506 bytes_copied += svr->svr_bytes_done[i];
1507 while (zfs_removal_suspend_progress &&
1508 !svr->svr_thread_exit)
1509 delay(hz);
1510
1511 mutex_enter(&vca.vca_lock);
1512 while (vca.vca_outstanding_bytes >
1513 zfs_remove_max_copy_bytes) {
1514 cv_wait(&vca.vca_cv, &vca.vca_lock);
1515 }
1516 mutex_exit(&vca.vca_lock);
1517
1518 dmu_tx_t *tx =
1519 dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
1520
1521 VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
1522 uint64_t txg = dmu_tx_get_txg(tx);
1523
1524 /*
1525 * Reacquire the vdev_config lock. The vdev_t
1526 * that we're removing may have changed, e.g. due
1527 * to a vdev_attach or vdev_detach.
1528 */
1529 spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
1530 vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1531
1532 if (txg != last_txg)
1533 max_alloc = spa_remove_max_segment(spa);
1534 last_txg = txg;
1535
1536 spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx);
1537
1538 dmu_tx_commit(tx);
1539 mutex_enter(&svr->svr_lock);
1540 }
1541
1542 mutex_enter(&vca.vca_lock);
1543 if (zfs_removal_ignore_errors == 0 &&
1544 (vca.vca_read_error_bytes > 0 ||
1545 vca.vca_write_error_bytes > 0)) {
1546 svr->svr_thread_exit = B_TRUE;
1547 }
1548 mutex_exit(&vca.vca_lock);
1549 }
1550
1551 mutex_exit(&svr->svr_lock);
1552
1553 spa_config_exit(spa, SCL_CONFIG, FTAG);
1554
1555 /*
1556 * Wait for all copies to finish before cleaning up the vca.
1557 */
1558 txg_wait_synced(spa->spa_dsl_pool, 0);
1559 ASSERT0(vca.vca_outstanding_bytes);
1560
1561 mutex_destroy(&vca.vca_lock);
1562 cv_destroy(&vca.vca_cv);
1563
1564 if (svr->svr_thread_exit) {
1565 mutex_enter(&svr->svr_lock);
1566 range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
1567 svr->svr_thread = NULL;
1568 cv_broadcast(&svr->svr_cv);
1569 mutex_exit(&svr->svr_lock);
1570
1571 /*
1572 * During the removal process an unrecoverable read or write
1573 * error was encountered. The removal process must be
1574 * cancelled or this damage may become permanent.
1575 */
1576 if (zfs_removal_ignore_errors == 0 &&
1577 (vca.vca_read_error_bytes > 0 ||
1578 vca.vca_write_error_bytes > 0)) {
1579 zfs_dbgmsg("canceling removal due to IO errors: "
1580 "[read_error_bytes=%llu] [write_error_bytes=%llu]",
1581 vca.vca_read_error_bytes,
1582 vca.vca_write_error_bytes);
1583 spa_vdev_remove_cancel_impl(spa);
1584 }
1585 } else {
1586 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1587 vdev_remove_complete(spa);
1588 }
1589 }
1590
1591 void
1592 spa_vdev_remove_suspend(spa_t *spa)
1593 {
1594 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1595
1596 if (svr == NULL)
1597 return;
1598
1599 mutex_enter(&svr->svr_lock);
1600 svr->svr_thread_exit = B_TRUE;
1601 while (svr->svr_thread != NULL)
1602 cv_wait(&svr->svr_cv, &svr->svr_lock);
1603 svr->svr_thread_exit = B_FALSE;
1604 mutex_exit(&svr->svr_lock);
1605 }
1606
1607 /* ARGSUSED */
1608 static int
1609 spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
1610 {
1611 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1612
1613 if (spa->spa_vdev_removal == NULL)
1614 return (ENOTACTIVE);
1615 return (0);
1616 }
1617
1618 /*
1619 * Cancel a removal by freeing all entries from the partial mapping
1620 * and marking the vdev as no longer being removing.
1621 */
1622 /* ARGSUSED */
1623 static void
1624 spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
1625 {
1626 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1627 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1628 vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
1629 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
1630 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
1631 objset_t *mos = spa->spa_meta_objset;
1632
1633 ASSERT3P(svr->svr_thread, ==, NULL);
1634
1635 spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
1636
1637 boolean_t are_precise;
1638 VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
1639 if (are_precise) {
1640 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1641 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1642 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
1643 }
1644
1645 uint64_t obsolete_sm_object;
1646 VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
1647 if (obsolete_sm_object != 0) {
1648 ASSERT(vd->vdev_obsolete_sm != NULL);
1649 ASSERT3U(obsolete_sm_object, ==,
1650 space_map_object(vd->vdev_obsolete_sm));
1651
1652 space_map_free(vd->vdev_obsolete_sm, tx);
1653 VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
1654 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
1655 space_map_close(vd->vdev_obsolete_sm);
1656 vd->vdev_obsolete_sm = NULL;
1657 spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
1658 }
1659 for (int i = 0; i < TXG_SIZE; i++) {
1660 ASSERT(list_is_empty(&svr->svr_new_segments[i]));
1661 ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
1662 vdev_indirect_mapping_max_offset(vim));
1663 }
1664
1665 for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
1666 metaslab_t *msp = vd->vdev_ms[msi];
1667
1668 if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
1669 break;
1670
1671 ASSERT0(range_tree_space(svr->svr_allocd_segs));
1672
1673 mutex_enter(&msp->ms_lock);
1674
1675 /*
1676 * Assert nothing in flight -- ms_*tree is empty.
1677 */
1678 for (int i = 0; i < TXG_SIZE; i++)
1679 ASSERT0(range_tree_space(msp->ms_allocating[i]));
1680 for (int i = 0; i < TXG_DEFER_SIZE; i++)
1681 ASSERT0(range_tree_space(msp->ms_defer[i]));
1682 ASSERT0(range_tree_space(msp->ms_freed));
1683
1684 if (msp->ms_sm != NULL) {
1685 mutex_enter(&svr->svr_lock);
1686 VERIFY0(space_map_load(msp->ms_sm,
1687 svr->svr_allocd_segs, SM_ALLOC));
1688 range_tree_walk(msp->ms_freeing,
1689 range_tree_remove, svr->svr_allocd_segs);
1690
1691 /*
1692 * Clear everything past what has been synced,
1693 * because we have not allocated mappings for it yet.
1694 */
1695 uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
1696 uint64_t sm_end = msp->ms_sm->sm_start +
1697 msp->ms_sm->sm_size;
1698 if (sm_end > syncd)
1699 range_tree_clear(svr->svr_allocd_segs,
1700 syncd, sm_end - syncd);
1701
1702 mutex_exit(&svr->svr_lock);
1703 }
1704 mutex_exit(&msp->ms_lock);
1705
1706 mutex_enter(&svr->svr_lock);
1707 range_tree_vacate(svr->svr_allocd_segs,
1708 free_mapped_segment_cb, vd);
1709 mutex_exit(&svr->svr_lock);
1710 }
1711
1712 /*
1713 * Note: this must happen after we invoke free_mapped_segment_cb,
1714 * because it adds to the obsolete_segments.
1715 */
1716 range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
1717
1718 ASSERT3U(vic->vic_mapping_object, ==,
1719 vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
1720 vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
1721 vd->vdev_indirect_mapping = NULL;
1722 vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
1723 vic->vic_mapping_object = 0;
1724
1725 ASSERT3U(vic->vic_births_object, ==,
1726 vdev_indirect_births_object(vd->vdev_indirect_births));
1727 vdev_indirect_births_close(vd->vdev_indirect_births);
1728 vd->vdev_indirect_births = NULL;
1729 vdev_indirect_births_free(mos, vic->vic_births_object, tx);
1730 vic->vic_births_object = 0;
1731
1732 /*
1733 * We may have processed some frees from the removing vdev in this
1734 * txg, thus increasing svr_bytes_done; discard that here to
1735 * satisfy the assertions in spa_vdev_removal_destroy().
1736 * Note that future txg's can not have any bytes_done, because
1737 * future TXG's are only modified from open context, and we have
1738 * already shut down the copying thread.
1739 */
1740 svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
1741 spa_finish_removal(spa, DSS_CANCELED, tx);
1742
1743 vd->vdev_removing = B_FALSE;
1744 vdev_config_dirty(vd);
1745
1746 zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
1747 vd->vdev_id, dmu_tx_get_txg(tx));
1748 spa_history_log_internal(spa, "vdev remove canceled", tx,
1749 "%s vdev %llu %s", spa_name(spa),
1750 vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
1751 }
1752
1753 static int
1754 spa_vdev_remove_cancel_impl(spa_t *spa)
1755 {
1756 uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id;
1757
1758 int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
1759 spa_vdev_remove_cancel_sync, NULL, 0,
1760 ZFS_SPACE_CHECK_EXTRA_RESERVED);
1761
1762 if (error == 0) {
1763 spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
1764 vdev_t *vd = vdev_lookup_top(spa, vdid);
1765 metaslab_group_activate(vd->vdev_mg);
1766 spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
1767 }
1768
1769 return (error);
1770 }
1771
1772 int
1773 spa_vdev_remove_cancel(spa_t *spa)
1774 {
1775 spa_vdev_remove_suspend(spa);
1776
1777 if (spa->spa_vdev_removal == NULL)
1778 return (ENOTACTIVE);
1779
1780 return (spa_vdev_remove_cancel_impl(spa));
1781 }
1782
1783 void
1784 svr_sync(spa_t *spa, dmu_tx_t *tx)
1785 {
1786 spa_vdev_removal_t *svr = spa->spa_vdev_removal;
1787 int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
1788
1789 if (svr == NULL)
1790 return;
1791
1792 /*
1793 * This check is necessary so that we do not dirty the
1794 * DIRECTORY_OBJECT via spa_sync_removing_state() when there
1795 * is nothing to do. Dirtying it every time would prevent us
1796 * from syncing-to-convergence.
1797 */
1798 if (svr->svr_bytes_done[txgoff] == 0)
1799 return;
1800
1801 /*
1802 * Update progress accounting.
1803 */
1804 spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
1805 svr->svr_bytes_done[txgoff] = 0;
1806
1807 spa_sync_removing_state(spa, tx);
1808 }
1809
1810 static void
1811 vdev_remove_make_hole_and_free(vdev_t *vd)
1812 {
1813 uint64_t id = vd->vdev_id;
1814 spa_t *spa = vd->vdev_spa;
1815 vdev_t *rvd = spa->spa_root_vdev;
1816 boolean_t last_vdev = (id == (rvd->vdev_children - 1));
1817
1818 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1819 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1820
1821 vdev_free(vd);
1822
1823 if (last_vdev) {
1824 vdev_compact_children(rvd);
1825 } else {
1826 vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
1827 vdev_add_child(rvd, vd);
1828 }
1829 vdev_config_dirty(rvd);
1830
1831 /*
1832 * Reassess the health of our root vdev.
1833 */
1834 vdev_reopen(rvd);
1835 }
1836
1837 /*
1838 * Remove a log device. The config lock is held for the specified TXG.
1839 */
1840 static int
1841 spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
1842 {
1843 metaslab_group_t *mg = vd->vdev_mg;
1844 spa_t *spa = vd->vdev_spa;
1845 int error = 0;
1846
1847 ASSERT(vd->vdev_islog);
1848 ASSERT(vd == vd->vdev_top);
1849 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1850
1851 /*
1852 * Stop allocating from this vdev.
1853 */
1854 metaslab_group_passivate(mg);
1855
1856 /*
1857 * Wait for the youngest allocations and frees to sync,
1858 * and then wait for the deferral of those frees to finish.
1859 */
1860 spa_vdev_config_exit(spa, NULL,
1861 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
1862
1863 /*
1864 * Evacuate the device. We don't hold the config lock as
1865 * writer since we need to do I/O but we do keep the
1866 * spa_namespace_lock held. Once this completes the device
1867 * should no longer have any blocks allocated on it.
1868 */
1869 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1870 if (vd->vdev_stat.vs_alloc != 0)
1871 error = spa_reset_logs(spa);
1872
1873 *txg = spa_vdev_config_enter(spa);
1874
1875 if (error != 0) {
1876 metaslab_group_activate(mg);
1877 return (error);
1878 }
1879 ASSERT0(vd->vdev_stat.vs_alloc);
1880
1881 /*
1882 * The evacuation succeeded. Remove any remaining MOS metadata
1883 * associated with this vdev, and wait for these changes to sync.
1884 */
1885 vd->vdev_removing = B_TRUE;
1886
1887 vdev_dirty_leaves(vd, VDD_DTL, *txg);
1888 vdev_config_dirty(vd);
1889
1890 vdev_metaslab_fini(vd);
1891
1892 spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
1893
1894 /* Stop initializing and TRIM */
1895 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_CANCELED);
1896 vdev_trim_stop_all(vd, VDEV_TRIM_CANCELED);
1897 vdev_autotrim_stop_wait(vd);
1898
1899 *txg = spa_vdev_config_enter(spa);
1900
1901 sysevent_t *ev = spa_event_create(spa, vd, NULL,
1902 ESC_ZFS_VDEV_REMOVE_DEV);
1903 ASSERT(MUTEX_HELD(&spa_namespace_lock));
1904 ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
1905
1906 /* The top ZAP should have been destroyed by vdev_remove_empty. */
1907 ASSERT0(vd->vdev_top_zap);
1908 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
1909 ASSERT0(vd->vdev_leaf_zap);
1910
1911 (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
1912
1913 if (list_link_active(&vd->vdev_state_dirty_node))
1914 vdev_state_clean(vd);
1915 if (list_link_active(&vd->vdev_config_dirty_node))
1916 vdev_config_clean(vd);
1917
1918 ASSERT0(vd->vdev_stat.vs_alloc);
1919
1920 /*
1921 * Clean up the vdev namespace.
1922 */
1923 vdev_remove_make_hole_and_free(vd);
1924
1925 if (ev != NULL)
1926 spa_event_post(ev);
1927
1928 return (0);
1929 }
1930
1931 static int
1932 spa_vdev_remove_top_check(vdev_t *vd)
1933 {
1934 spa_t *spa = vd->vdev_spa;
1935
1936 if (vd != vd->vdev_top)
1937 return (SET_ERROR(ENOTSUP));
1938
1939 if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
1940 return (SET_ERROR(ENOTSUP));
1941
1942 /* available space in the pool's normal class */
1943 uint64_t available = dsl_dir_space_available(
1944 spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE);
1945
1946 metaslab_class_t *mc = vd->vdev_mg->mg_class;
1947
1948 /*
1949 * When removing a vdev from an allocation class that has
1950 * remaining vdevs, include available space from the class.
1951 */
1952 if (mc != spa_normal_class(spa) && mc->mc_groups > 1) {
1953 uint64_t class_avail = metaslab_class_get_space(mc) -
1954 metaslab_class_get_alloc(mc);
1955
1956 /* add class space, adjusted for overhead */
1957 available += (class_avail * 94) / 100;
1958 }
1959
1960 /*
1961 * There has to be enough free space to remove the
1962 * device and leave double the "slop" space (i.e. we
1963 * must leave at least 3% of the pool free, in addition to
1964 * the normal slop space).
1965 */
1966 if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
1967 return (SET_ERROR(ENOSPC));
1968 }
1969
1970 /*
1971 * There can not be a removal in progress.
1972 */
1973 if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
1974 return (SET_ERROR(EBUSY));
1975
1976 /*
1977 * The device must have all its data.
1978 */
1979 if (!vdev_dtl_empty(vd, DTL_MISSING) ||
1980 !vdev_dtl_empty(vd, DTL_OUTAGE))
1981 return (SET_ERROR(EBUSY));
1982
1983 /*
1984 * The device must be healthy.
1985 */
1986 if (!vdev_readable(vd))
1987 return (SET_ERROR(EIO));
1988
1989 /*
1990 * All vdevs in normal class must have the same ashift.
1991 */
1992 if (spa->spa_max_ashift != spa->spa_min_ashift) {
1993 return (SET_ERROR(EINVAL));
1994 }
1995
1996 /*
1997 * All vdevs in normal class must have the same ashift
1998 * and not be raidz.
1999 */
2000 vdev_t *rvd = spa->spa_root_vdev;
2001 int num_indirect = 0;
2002 for (uint64_t id = 0; id < rvd->vdev_children; id++) {
2003 vdev_t *cvd = rvd->vdev_child[id];
2004 if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
2005 ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
2006 if (cvd->vdev_ops == &vdev_indirect_ops)
2007 num_indirect++;
2008 if (!vdev_is_concrete(cvd))
2009 continue;
2010 if (cvd->vdev_ops == &vdev_raidz_ops)
2011 return (SET_ERROR(EINVAL));
2012 /*
2013 * Need the mirror to be mirror of leaf vdevs only
2014 */
2015 if (cvd->vdev_ops == &vdev_mirror_ops) {
2016 for (uint64_t cid = 0;
2017 cid < cvd->vdev_children; cid++) {
2018 if (!cvd->vdev_child[cid]->vdev_ops->
2019 vdev_op_leaf)
2020 return (SET_ERROR(EINVAL));
2021 }
2022 }
2023 }
2024
2025 return (0);
2026 }
2027
2028 /*
2029 * Initiate removal of a top-level vdev, reducing the total space in the pool.
2030 * The config lock is held for the specified TXG. Once initiated,
2031 * evacuation of all allocated space (copying it to other vdevs) happens
2032 * in the background (see spa_vdev_remove_thread()), and can be canceled
2033 * (see spa_vdev_remove_cancel()). If successful, the vdev will
2034 * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
2035 */
2036 static int
2037 spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
2038 {
2039 spa_t *spa = vd->vdev_spa;
2040 int error;
2041
2042 /*
2043 * Check for errors up-front, so that we don't waste time
2044 * passivating the metaslab group and clearing the ZIL if there
2045 * are errors.
2046 */
2047 error = spa_vdev_remove_top_check(vd);
2048 if (error != 0)
2049 return (error);
2050
2051 /*
2052 * Stop allocating from this vdev. Note that we must check
2053 * that this is not the only device in the pool before
2054 * passivating, otherwise we will not be able to make
2055 * progress because we can't allocate from any vdevs.
2056 * The above check for sufficient free space serves this
2057 * purpose.
2058 */
2059 metaslab_group_t *mg = vd->vdev_mg;
2060 metaslab_group_passivate(mg);
2061
2062 /*
2063 * Wait for the youngest allocations and frees to sync,
2064 * and then wait for the deferral of those frees to finish.
2065 */
2066 spa_vdev_config_exit(spa, NULL,
2067 *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
2068
2069 /*
2070 * We must ensure that no "stubby" log blocks are allocated
2071 * on the device to be removed. These blocks could be
2072 * written at any time, including while we are in the middle
2073 * of copying them.
2074 */
2075 error = spa_reset_logs(spa);
2076
2077 /*
2078 * We stop any initializing and TRIM that is currently in progress
2079 * but leave the state as "active". This will allow the process to
2080 * resume if the removal is canceled sometime later.
2081 */
2082 vdev_initialize_stop_all(vd, VDEV_INITIALIZE_ACTIVE);
2083 vdev_trim_stop_all(vd, VDEV_TRIM_ACTIVE);
2084 vdev_autotrim_stop_wait(vd);
2085
2086 *txg = spa_vdev_config_enter(spa);
2087
2088 /*
2089 * Things might have changed while the config lock was dropped
2090 * (e.g. space usage). Check for errors again.
2091 */
2092 if (error == 0)
2093 error = spa_vdev_remove_top_check(vd);
2094
2095 if (error != 0) {
2096 metaslab_group_activate(mg);
2097 spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
2098 spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
2099 spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
2100 return (error);
2101 }
2102
2103 vd->vdev_removing = B_TRUE;
2104
2105 vdev_dirty_leaves(vd, VDD_DTL, *txg);
2106 vdev_config_dirty(vd);
2107 dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
2108 dsl_sync_task_nowait(spa->spa_dsl_pool,
2109 vdev_remove_initiate_sync,
2110 (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx);
2111 dmu_tx_commit(tx);
2112
2113 return (0);
2114 }
2115
2116 /*
2117 * Remove a device from the pool.
2118 *
2119 * Removing a device from the vdev namespace requires several steps
2120 * and can take a significant amount of time. As a result we use
2121 * the spa_vdev_config_[enter/exit] functions which allow us to
2122 * grab and release the spa_config_lock while still holding the namespace
2123 * lock. During each step the configuration is synced out.
2124 */
2125 int
2126 spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
2127 {
2128 vdev_t *vd;
2129 nvlist_t **spares, **l2cache, *nv;
2130 uint64_t txg = 0;
2131 uint_t nspares, nl2cache;
2132 int error = 0, error_log;
2133 boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
2134 sysevent_t *ev = NULL;
2135 char *vd_type = NULL, *vd_path = NULL, *vd_path_log = NULL;
2136
2137 ASSERT(spa_writeable(spa));
2138
2139 if (!locked)
2140 txg = spa_vdev_enter(spa);
2141
2142 ASSERT(MUTEX_HELD(&spa_namespace_lock));
2143 if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
2144 error = (spa_has_checkpoint(spa)) ?
2145 ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
2146
2147 if (!locked)
2148 return (spa_vdev_exit(spa, NULL, txg, error));
2149
2150 return (error);
2151 }
2152
2153 vd = spa_lookup_by_guid(spa, guid, B_FALSE);
2154
2155 if (spa->spa_spares.sav_vdevs != NULL &&
2156 nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
2157 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
2158 (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
2159 /*
2160 * Only remove the hot spare if it's not currently in use
2161 * in this pool.
2162 */
2163 if (vd == NULL || unspare) {
2164 if (vd == NULL)
2165 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
2166 ev = spa_event_create(spa, vd, NULL,
2167 ESC_ZFS_VDEV_REMOVE_AUX);
2168
2169 vd_type = VDEV_TYPE_SPARE;
2170 vd_path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
2171 spa_vdev_remove_aux(spa->spa_spares.sav_config,
2172 ZPOOL_CONFIG_SPARES, spares, nspares, nv);
2173 spa_load_spares(spa);
2174 spa->spa_spares.sav_sync = B_TRUE;
2175 } else {
2176 error = SET_ERROR(EBUSY);
2177 }
2178 } else if (spa->spa_l2cache.sav_vdevs != NULL &&
2179 nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
2180 ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
2181 (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
2182 vd_type = VDEV_TYPE_L2CACHE;
2183 vd_path = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
2184 /*
2185 * Cache devices can always be removed.
2186 */
2187 vd = spa_lookup_by_guid(spa, guid, B_TRUE);
2188 ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
2189 spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
2190 ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
2191 spa_load_l2cache(spa);
2192 spa->spa_l2cache.sav_sync = B_TRUE;
2193 } else if (vd != NULL && vd->vdev_islog) {
2194 ASSERT(!locked);
2195 vd_type = VDEV_TYPE_LOG;
2196 vd_path = (vd->vdev_path != NULL) ? vd->vdev_path : "-";
2197 error = spa_vdev_remove_log(vd, &txg);
2198 } else if (vd != NULL) {
2199 ASSERT(!locked);
2200 error = spa_vdev_remove_top(vd, &txg);
2201 } else {
2202 /*
2203 * There is no vdev of any kind with the specified guid.
2204 */
2205 error = SET_ERROR(ENOENT);
2206 }
2207
2208 if (vd_path != NULL)
2209 vd_path_log = spa_strdup(vd_path);
2210
2211 error_log = error;
2212
2213 if (!locked)
2214 error = spa_vdev_exit(spa, NULL, txg, error);
2215
2216 /*
2217 * Logging must be done outside the spa config lock. Otherwise,
2218 * this code path could end up holding the spa config lock while
2219 * waiting for a txg_sync so it can write to the internal log.
2220 * Doing that would prevent the txg sync from actually happening,
2221 * causing a deadlock.
2222 */
2223 if (error_log == 0 && vd_type != NULL && vd_path_log != NULL) {
2224 spa_history_log_internal(spa, "vdev remove", NULL,
2225 "%s vdev (%s) %s", spa_name(spa), vd_type, vd_path_log);
2226 }
2227 if (vd_path_log != NULL)
2228 spa_strfree(vd_path_log);
2229
2230 if (ev != NULL)
2231 spa_event_post(ev);
2232
2233 return (error);
2234 }
2235
2236 int
2237 spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
2238 {
2239 prs->prs_state = spa->spa_removing_phys.sr_state;
2240
2241 if (prs->prs_state == DSS_NONE)
2242 return (SET_ERROR(ENOENT));
2243
2244 prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
2245 prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
2246 prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
2247 prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
2248 prs->prs_copied = spa->spa_removing_phys.sr_copied;
2249
2250 prs->prs_mapping_memory = 0;
2251 uint64_t indirect_vdev_id =
2252 spa->spa_removing_phys.sr_prev_indirect_vdev;
2253 while (indirect_vdev_id != -1) {
2254 vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
2255 vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
2256 vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
2257
2258 ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
2259 prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
2260 indirect_vdev_id = vic->vic_prev_indirect_vdev;
2261 }
2262
2263 return (0);
2264 }
2265
2266 #if defined(_KERNEL)
2267 module_param(zfs_removal_ignore_errors, int, 0644);
2268 MODULE_PARM_DESC(zfs_removal_ignore_errors,
2269 "Ignore hard IO errors when removing device");
2270
2271 module_param(zfs_remove_max_segment, int, 0644);
2272 MODULE_PARM_DESC(zfs_remove_max_segment,
2273 "Largest contiguous segment to allocate when removing device");
2274
2275 module_param(vdev_removal_max_span, int, 0644);
2276 MODULE_PARM_DESC(vdev_removal_max_span,
2277 "Largest span of free chunks a remap segment can span");
2278
2279 /* BEGIN CSTYLED */
2280 module_param(zfs_removal_suspend_progress, int, 0644);
2281 MODULE_PARM_DESC(zfs_removal_suspend_progress,
2282 "Pause device removal after this many bytes are copied "
2283 "(debug use only - causes removal to hang)");
2284 /* END CSTYLED */
2285
2286 EXPORT_SYMBOL(free_from_removing_vdev);
2287 EXPORT_SYMBOL(spa_removal_get_stats);
2288 EXPORT_SYMBOL(spa_remove_init);
2289 EXPORT_SYMBOL(spa_restart_removal);
2290 EXPORT_SYMBOL(spa_vdev_removal_destroy);
2291 EXPORT_SYMBOL(spa_vdev_remove);
2292 EXPORT_SYMBOL(spa_vdev_remove_cancel);
2293 EXPORT_SYMBOL(spa_vdev_remove_suspend);
2294 EXPORT_SYMBOL(svr_sync);
2295 #endif