4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2011, 2019 by Delphix. All rights reserved.
27 #include <sys/zfs_context.h>
28 #include <sys/spa_impl.h>
30 #include <sys/dmu_tx.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/metaslab.h>
34 #include <sys/metaslab_impl.h>
35 #include <sys/uberblock_impl.h>
38 #include <sys/bpobj.h>
39 #include <sys/dsl_pool.h>
40 #include <sys/dsl_synctask.h>
41 #include <sys/dsl_dir.h>
43 #include <sys/zfeature.h>
44 #include <sys/vdev_indirect_births.h>
45 #include <sys/vdev_indirect_mapping.h>
47 #include <sys/vdev_initialize.h>
48 #include <sys/vdev_trim.h>
49 #include <sys/trace_defs.h>
52 * This file contains the necessary logic to remove vdevs from a
53 * storage pool. Currently, the only devices that can be removed
54 * are log, cache, and spare devices; and top level vdevs from a pool
55 * w/o raidz or mirrors. (Note that members of a mirror can be removed
56 * by the detach operation.)
58 * Log vdevs are removed by evacuating them and then turning the vdev
59 * into a hole vdev while holding spa config locks.
61 * Top level vdevs are removed and converted into an indirect vdev via
62 * a multi-step process:
64 * - Disable allocations from this device (spa_vdev_remove_top).
66 * - From a new thread (spa_vdev_remove_thread), copy data from
67 * the removing vdev to a different vdev. The copy happens in open
68 * context (spa_vdev_copy_impl) and issues a sync task
69 * (vdev_mapping_sync) so the sync thread can update the partial
70 * indirect mappings in core and on disk.
72 * - If a free happens during a removal, it is freed from the
73 * removing vdev, and if it has already been copied, from the new
74 * location as well (free_from_removing_vdev).
76 * - After the removal is completed, the copy thread converts the vdev
77 * into an indirect vdev (vdev_remove_complete) before instructing
78 * the sync thread to destroy the space maps and finish the removal
79 * (spa_finish_removal).
82 typedef struct vdev_copy_arg
{
84 uint64_t vca_outstanding_bytes
;
85 uint64_t vca_read_error_bytes
;
86 uint64_t vca_write_error_bytes
;
92 * The maximum amount of memory we can use for outstanding i/o while
93 * doing a device removal. This determines how much i/o we can have
94 * in flight concurrently.
96 int zfs_remove_max_copy_bytes
= 64 * 1024 * 1024;
99 * The largest contiguous segment that we will attempt to allocate when
100 * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
101 * there is a performance problem with attempting to allocate large blocks,
102 * consider decreasing this.
104 * See also the accessor function spa_remove_max_segment().
106 int zfs_remove_max_segment
= SPA_MAXBLOCKSIZE
;
109 * Ignore hard IO errors during device removal. When set if a device
110 * encounters hard IO error during the removal process the removal will
111 * not be cancelled. This can result in a normally recoverable block
112 * becoming permanently damaged and is not recommended.
114 int zfs_removal_ignore_errors
= 0;
117 * Allow a remap segment to span free chunks of at most this size. The main
118 * impact of a larger span is that we will read and write larger, more
119 * contiguous chunks, with more "unnecessary" data -- trading off bandwidth
120 * for iops. The value here was chosen to align with
121 * zfs_vdev_read_gap_limit, which is a similar concept when doing regular
122 * reads (but there's no reason it has to be the same).
124 * Additionally, a higher span will have the following relatively minor
126 * - the mapping will be smaller, since one entry can cover more allocated
128 * - more of the fragmentation in the removing device will be preserved
129 * - we'll do larger allocations, which may fail and fall back on smaller
132 int vdev_removal_max_span
= 32 * 1024;
135 * This is used by the test suite so that it can ensure that certain
136 * actions happen while in the middle of a removal.
138 int zfs_removal_suspend_progress
= 0;
140 #define VDEV_REMOVAL_ZAP_OBJS "lzap"
142 static void spa_vdev_remove_thread(void *arg
);
143 static int spa_vdev_remove_cancel_impl(spa_t
*spa
);
146 spa_sync_removing_state(spa_t
*spa
, dmu_tx_t
*tx
)
148 VERIFY0(zap_update(spa
->spa_dsl_pool
->dp_meta_objset
,
149 DMU_POOL_DIRECTORY_OBJECT
,
150 DMU_POOL_REMOVING
, sizeof (uint64_t),
151 sizeof (spa
->spa_removing_phys
) / sizeof (uint64_t),
152 &spa
->spa_removing_phys
, tx
));
156 spa_nvlist_lookup_by_guid(nvlist_t
**nvpp
, int count
, uint64_t target_guid
)
158 for (int i
= 0; i
< count
; i
++) {
160 fnvlist_lookup_uint64(nvpp
[i
], ZPOOL_CONFIG_GUID
);
162 if (guid
== target_guid
)
170 spa_vdev_remove_aux(nvlist_t
*config
, char *name
, nvlist_t
**dev
, int count
,
171 nvlist_t
*dev_to_remove
)
173 nvlist_t
**newdev
= NULL
;
176 newdev
= kmem_alloc((count
- 1) * sizeof (void *), KM_SLEEP
);
178 for (int i
= 0, j
= 0; i
< count
; i
++) {
179 if (dev
[i
] == dev_to_remove
)
181 VERIFY(nvlist_dup(dev
[i
], &newdev
[j
++], KM_SLEEP
) == 0);
184 VERIFY(nvlist_remove(config
, name
, DATA_TYPE_NVLIST_ARRAY
) == 0);
185 VERIFY(nvlist_add_nvlist_array(config
, name
, newdev
, count
- 1) == 0);
187 for (int i
= 0; i
< count
- 1; i
++)
188 nvlist_free(newdev
[i
]);
191 kmem_free(newdev
, (count
- 1) * sizeof (void *));
194 static spa_vdev_removal_t
*
195 spa_vdev_removal_create(vdev_t
*vd
)
197 spa_vdev_removal_t
*svr
= kmem_zalloc(sizeof (*svr
), KM_SLEEP
);
198 mutex_init(&svr
->svr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
199 cv_init(&svr
->svr_cv
, NULL
, CV_DEFAULT
, NULL
);
200 svr
->svr_allocd_segs
= range_tree_create(NULL
, NULL
);
201 svr
->svr_vdev_id
= vd
->vdev_id
;
203 for (int i
= 0; i
< TXG_SIZE
; i
++) {
204 svr
->svr_frees
[i
] = range_tree_create(NULL
, NULL
);
205 list_create(&svr
->svr_new_segments
[i
],
206 sizeof (vdev_indirect_mapping_entry_t
),
207 offsetof(vdev_indirect_mapping_entry_t
, vime_node
));
214 spa_vdev_removal_destroy(spa_vdev_removal_t
*svr
)
216 for (int i
= 0; i
< TXG_SIZE
; i
++) {
217 ASSERT0(svr
->svr_bytes_done
[i
]);
218 ASSERT0(svr
->svr_max_offset_to_sync
[i
]);
219 range_tree_destroy(svr
->svr_frees
[i
]);
220 list_destroy(&svr
->svr_new_segments
[i
]);
223 range_tree_destroy(svr
->svr_allocd_segs
);
224 mutex_destroy(&svr
->svr_lock
);
225 cv_destroy(&svr
->svr_cv
);
226 kmem_free(svr
, sizeof (*svr
));
230 * This is called as a synctask in the txg in which we will mark this vdev
231 * as removing (in the config stored in the MOS).
233 * It begins the evacuation of a toplevel vdev by:
234 * - initializing the spa_removing_phys which tracks this removal
235 * - computing the amount of space to remove for accounting purposes
236 * - dirtying all dbufs in the spa_config_object
237 * - creating the spa_vdev_removal
238 * - starting the spa_vdev_remove_thread
241 vdev_remove_initiate_sync(void *arg
, dmu_tx_t
*tx
)
243 int vdev_id
= (uintptr_t)arg
;
244 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
245 vdev_t
*vd
= vdev_lookup_top(spa
, vdev_id
);
246 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
247 objset_t
*mos
= spa
->spa_dsl_pool
->dp_meta_objset
;
248 spa_vdev_removal_t
*svr
= NULL
;
249 ASSERTV(uint64_t txg
= dmu_tx_get_txg(tx
));
251 ASSERT3P(vd
->vdev_ops
, !=, &vdev_raidz_ops
);
252 svr
= spa_vdev_removal_create(vd
);
254 ASSERT(vd
->vdev_removing
);
255 ASSERT3P(vd
->vdev_indirect_mapping
, ==, NULL
);
257 spa_feature_incr(spa
, SPA_FEATURE_DEVICE_REMOVAL
, tx
);
258 if (spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
)) {
260 * By activating the OBSOLETE_COUNTS feature, we prevent
261 * the pool from being downgraded and ensure that the
262 * refcounts are precise.
264 spa_feature_incr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
266 VERIFY0(zap_add(spa
->spa_meta_objset
, vd
->vdev_top_zap
,
267 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE
, sizeof (one
), 1,
269 ASSERTV(boolean_t are_precise
);
270 ASSERT0(vdev_obsolete_counts_are_precise(vd
, &are_precise
));
271 ASSERT3B(are_precise
, ==, B_TRUE
);
274 vic
->vic_mapping_object
= vdev_indirect_mapping_alloc(mos
, tx
);
275 vd
->vdev_indirect_mapping
=
276 vdev_indirect_mapping_open(mos
, vic
->vic_mapping_object
);
277 vic
->vic_births_object
= vdev_indirect_births_alloc(mos
, tx
);
278 vd
->vdev_indirect_births
=
279 vdev_indirect_births_open(mos
, vic
->vic_births_object
);
280 spa
->spa_removing_phys
.sr_removing_vdev
= vd
->vdev_id
;
281 spa
->spa_removing_phys
.sr_start_time
= gethrestime_sec();
282 spa
->spa_removing_phys
.sr_end_time
= 0;
283 spa
->spa_removing_phys
.sr_state
= DSS_SCANNING
;
284 spa
->spa_removing_phys
.sr_to_copy
= 0;
285 spa
->spa_removing_phys
.sr_copied
= 0;
288 * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
289 * there may be space in the defer tree, which is free, but still
290 * counted in vs_alloc.
292 for (uint64_t i
= 0; i
< vd
->vdev_ms_count
; i
++) {
293 metaslab_t
*ms
= vd
->vdev_ms
[i
];
294 if (ms
->ms_sm
== NULL
)
297 spa
->spa_removing_phys
.sr_to_copy
+=
298 metaslab_allocated_space(ms
);
301 * Space which we are freeing this txg does not need to
304 spa
->spa_removing_phys
.sr_to_copy
-=
305 range_tree_space(ms
->ms_freeing
);
307 ASSERT0(range_tree_space(ms
->ms_freed
));
308 for (int t
= 0; t
< TXG_SIZE
; t
++)
309 ASSERT0(range_tree_space(ms
->ms_allocating
[t
]));
313 * Sync tasks are called before metaslab_sync(), so there should
314 * be no already-synced metaslabs in the TXG_CLEAN list.
316 ASSERT3P(txg_list_head(&vd
->vdev_ms_list
, TXG_CLEAN(txg
)), ==, NULL
);
318 spa_sync_removing_state(spa
, tx
);
321 * All blocks that we need to read the most recent mapping must be
322 * stored on concrete vdevs. Therefore, we must dirty anything that
323 * is read before spa_remove_init(). Specifically, the
324 * spa_config_object. (Note that although we already modified the
325 * spa_config_object in spa_sync_removing_state, that may not have
326 * modified all blocks of the object.)
328 dmu_object_info_t doi
;
329 VERIFY0(dmu_object_info(mos
, DMU_POOL_DIRECTORY_OBJECT
, &doi
));
330 for (uint64_t offset
= 0; offset
< doi
.doi_max_offset
; ) {
332 VERIFY0(dmu_buf_hold(mos
, DMU_POOL_DIRECTORY_OBJECT
,
333 offset
, FTAG
, &dbuf
, 0));
334 dmu_buf_will_dirty(dbuf
, tx
);
335 offset
+= dbuf
->db_size
;
336 dmu_buf_rele(dbuf
, FTAG
);
340 * Now that we've allocated the im_object, dirty the vdev to ensure
341 * that the object gets written to the config on disk.
343 vdev_config_dirty(vd
);
345 zfs_dbgmsg("starting removal thread for vdev %llu (%px) in txg %llu "
346 "im_obj=%llu", vd
->vdev_id
, vd
, dmu_tx_get_txg(tx
),
347 vic
->vic_mapping_object
);
349 spa_history_log_internal(spa
, "vdev remove started", tx
,
350 "%s vdev %llu %s", spa_name(spa
), (u_longlong_t
)vd
->vdev_id
,
351 (vd
->vdev_path
!= NULL
) ? vd
->vdev_path
: "-");
353 * Setting spa_vdev_removal causes subsequent frees to call
354 * free_from_removing_vdev(). Note that we don't need any locking
355 * because we are the sync thread, and metaslab_free_impl() is only
356 * called from syncing context (potentially from a zio taskq thread,
357 * but in any case only when there are outstanding free i/os, which
360 ASSERT3P(spa
->spa_vdev_removal
, ==, NULL
);
361 spa
->spa_vdev_removal
= svr
;
362 svr
->svr_thread
= thread_create(NULL
, 0,
363 spa_vdev_remove_thread
, spa
, 0, &p0
, TS_RUN
, minclsyspri
);
367 * When we are opening a pool, we must read the mapping for each
368 * indirect vdev in order from most recently removed to least
369 * recently removed. We do this because the blocks for the mapping
370 * of older indirect vdevs may be stored on more recently removed vdevs.
371 * In order to read each indirect mapping object, we must have
372 * initialized all more recently removed vdevs.
375 spa_remove_init(spa_t
*spa
)
379 error
= zap_lookup(spa
->spa_dsl_pool
->dp_meta_objset
,
380 DMU_POOL_DIRECTORY_OBJECT
,
381 DMU_POOL_REMOVING
, sizeof (uint64_t),
382 sizeof (spa
->spa_removing_phys
) / sizeof (uint64_t),
383 &spa
->spa_removing_phys
);
385 if (error
== ENOENT
) {
386 spa
->spa_removing_phys
.sr_state
= DSS_NONE
;
387 spa
->spa_removing_phys
.sr_removing_vdev
= -1;
388 spa
->spa_removing_phys
.sr_prev_indirect_vdev
= -1;
389 spa
->spa_indirect_vdevs_loaded
= B_TRUE
;
391 } else if (error
!= 0) {
395 if (spa
->spa_removing_phys
.sr_state
== DSS_SCANNING
) {
397 * We are currently removing a vdev. Create and
398 * initialize a spa_vdev_removal_t from the bonus
399 * buffer of the removing vdevs vdev_im_object, and
400 * initialize its partial mapping.
402 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
403 vdev_t
*vd
= vdev_lookup_top(spa
,
404 spa
->spa_removing_phys
.sr_removing_vdev
);
407 spa_config_exit(spa
, SCL_STATE
, FTAG
);
411 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
413 ASSERT(vdev_is_concrete(vd
));
414 spa_vdev_removal_t
*svr
= spa_vdev_removal_create(vd
);
415 ASSERT3U(svr
->svr_vdev_id
, ==, vd
->vdev_id
);
416 ASSERT(vd
->vdev_removing
);
418 vd
->vdev_indirect_mapping
= vdev_indirect_mapping_open(
419 spa
->spa_meta_objset
, vic
->vic_mapping_object
);
420 vd
->vdev_indirect_births
= vdev_indirect_births_open(
421 spa
->spa_meta_objset
, vic
->vic_births_object
);
422 spa_config_exit(spa
, SCL_STATE
, FTAG
);
424 spa
->spa_vdev_removal
= svr
;
427 spa_config_enter(spa
, SCL_STATE
, FTAG
, RW_READER
);
428 uint64_t indirect_vdev_id
=
429 spa
->spa_removing_phys
.sr_prev_indirect_vdev
;
430 while (indirect_vdev_id
!= UINT64_MAX
) {
431 vdev_t
*vd
= vdev_lookup_top(spa
, indirect_vdev_id
);
432 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
434 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
435 vd
->vdev_indirect_mapping
= vdev_indirect_mapping_open(
436 spa
->spa_meta_objset
, vic
->vic_mapping_object
);
437 vd
->vdev_indirect_births
= vdev_indirect_births_open(
438 spa
->spa_meta_objset
, vic
->vic_births_object
);
440 indirect_vdev_id
= vic
->vic_prev_indirect_vdev
;
442 spa_config_exit(spa
, SCL_STATE
, FTAG
);
445 * Now that we've loaded all the indirect mappings, we can allow
446 * reads from other blocks (e.g. via predictive prefetch).
448 spa
->spa_indirect_vdevs_loaded
= B_TRUE
;
453 spa_restart_removal(spa_t
*spa
)
455 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
461 * In general when this function is called there is no
462 * removal thread running. The only scenario where this
463 * is not true is during spa_import() where this function
464 * is called twice [once from spa_import_impl() and
465 * spa_async_resume()]. Thus, in the scenario where we
466 * import a pool that has an ongoing removal we don't
467 * want to spawn a second thread.
469 if (svr
->svr_thread
!= NULL
)
472 if (!spa_writeable(spa
))
475 zfs_dbgmsg("restarting removal of %llu", svr
->svr_vdev_id
);
476 svr
->svr_thread
= thread_create(NULL
, 0, spa_vdev_remove_thread
, spa
,
477 0, &p0
, TS_RUN
, minclsyspri
);
481 * Process freeing from a device which is in the middle of being removed.
482 * We must handle this carefully so that we attempt to copy freed data,
483 * and we correctly free already-copied data.
486 free_from_removing_vdev(vdev_t
*vd
, uint64_t offset
, uint64_t size
)
488 spa_t
*spa
= vd
->vdev_spa
;
489 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
490 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
491 uint64_t txg
= spa_syncing_txg(spa
);
492 uint64_t max_offset_yet
= 0;
494 ASSERT(vd
->vdev_indirect_config
.vic_mapping_object
!= 0);
495 ASSERT3U(vd
->vdev_indirect_config
.vic_mapping_object
, ==,
496 vdev_indirect_mapping_object(vim
));
497 ASSERT3U(vd
->vdev_id
, ==, svr
->svr_vdev_id
);
499 mutex_enter(&svr
->svr_lock
);
502 * Remove the segment from the removing vdev's spacemap. This
503 * ensures that we will not attempt to copy this space (if the
504 * removal thread has not yet visited it), and also ensures
505 * that we know what is actually allocated on the new vdevs
506 * (needed if we cancel the removal).
508 * Note: we must do the metaslab_free_concrete() with the svr_lock
509 * held, so that the remove_thread can not load this metaslab and then
510 * visit this offset between the time that we metaslab_free_concrete()
511 * and when we check to see if it has been visited.
513 * Note: The checkpoint flag is set to false as having/taking
514 * a checkpoint and removing a device can't happen at the same
517 ASSERT(!spa_has_checkpoint(spa
));
518 metaslab_free_concrete(vd
, offset
, size
, B_FALSE
);
520 uint64_t synced_size
= 0;
521 uint64_t synced_offset
= 0;
522 uint64_t max_offset_synced
= vdev_indirect_mapping_max_offset(vim
);
523 if (offset
< max_offset_synced
) {
525 * The mapping for this offset is already on disk.
526 * Free from the new location.
528 * Note that we use svr_max_synced_offset because it is
529 * updated atomically with respect to the in-core mapping.
530 * By contrast, vim_max_offset is not.
532 * This block may be split between a synced entry and an
533 * in-flight or unvisited entry. Only process the synced
534 * portion of it here.
536 synced_size
= MIN(size
, max_offset_synced
- offset
);
537 synced_offset
= offset
;
539 ASSERT3U(max_offset_yet
, <=, max_offset_synced
);
540 max_offset_yet
= max_offset_synced
;
542 DTRACE_PROBE3(remove__free__synced
,
545 uint64_t, synced_size
);
548 offset
+= synced_size
;
552 * Look at all in-flight txgs starting from the currently syncing one
553 * and see if a section of this free is being copied. By starting from
554 * this txg and iterating forward, we might find that this region
555 * was copied in two different txgs and handle it appropriately.
557 for (int i
= 0; i
< TXG_CONCURRENT_STATES
; i
++) {
558 int txgoff
= (txg
+ i
) & TXG_MASK
;
559 if (size
> 0 && offset
< svr
->svr_max_offset_to_sync
[txgoff
]) {
561 * The mapping for this offset is in flight, and
562 * will be synced in txg+i.
564 uint64_t inflight_size
= MIN(size
,
565 svr
->svr_max_offset_to_sync
[txgoff
] - offset
);
567 DTRACE_PROBE4(remove__free__inflight
,
570 uint64_t, inflight_size
,
574 * We copy data in order of increasing offset.
575 * Therefore the max_offset_to_sync[] must increase
576 * (or be zero, indicating that nothing is being
577 * copied in that txg).
579 if (svr
->svr_max_offset_to_sync
[txgoff
] != 0) {
580 ASSERT3U(svr
->svr_max_offset_to_sync
[txgoff
],
583 svr
->svr_max_offset_to_sync
[txgoff
];
587 * We've already committed to copying this segment:
588 * we have allocated space elsewhere in the pool for
589 * it and have an IO outstanding to copy the data. We
590 * cannot free the space before the copy has
591 * completed, or else the copy IO might overwrite any
592 * new data. To free that space, we record the
593 * segment in the appropriate svr_frees tree and free
594 * the mapped space later, in the txg where we have
595 * completed the copy and synced the mapping (see
596 * vdev_mapping_sync).
598 range_tree_add(svr
->svr_frees
[txgoff
],
599 offset
, inflight_size
);
600 size
-= inflight_size
;
601 offset
+= inflight_size
;
604 * This space is already accounted for as being
605 * done, because it is being copied in txg+i.
606 * However, if i!=0, then it is being copied in
607 * a future txg. If we crash after this txg
608 * syncs but before txg+i syncs, then the space
609 * will be free. Therefore we must account
610 * for the space being done in *this* txg
611 * (when it is freed) rather than the future txg
612 * (when it will be copied).
614 ASSERT3U(svr
->svr_bytes_done
[txgoff
], >=,
616 svr
->svr_bytes_done
[txgoff
] -= inflight_size
;
617 svr
->svr_bytes_done
[txg
& TXG_MASK
] += inflight_size
;
620 ASSERT0(svr
->svr_max_offset_to_sync
[TXG_CLEAN(txg
) & TXG_MASK
]);
624 * The copy thread has not yet visited this offset. Ensure
628 DTRACE_PROBE3(remove__free__unvisited
,
633 if (svr
->svr_allocd_segs
!= NULL
)
634 range_tree_clear(svr
->svr_allocd_segs
, offset
, size
);
637 * Since we now do not need to copy this data, for
638 * accounting purposes we have done our job and can count
641 svr
->svr_bytes_done
[txg
& TXG_MASK
] += size
;
643 mutex_exit(&svr
->svr_lock
);
646 * Now that we have dropped svr_lock, process the synced portion
649 if (synced_size
> 0) {
650 vdev_indirect_mark_obsolete(vd
, synced_offset
, synced_size
);
653 * Note: this can only be called from syncing context,
654 * and the vdev_indirect_mapping is only changed from the
655 * sync thread, so we don't need svr_lock while doing
656 * metaslab_free_impl_cb.
658 boolean_t checkpoint
= B_FALSE
;
659 vdev_indirect_ops
.vdev_op_remap(vd
, synced_offset
, synced_size
,
660 metaslab_free_impl_cb
, &checkpoint
);
665 * Stop an active removal and update the spa_removing phys.
668 spa_finish_removal(spa_t
*spa
, dsl_scan_state_t state
, dmu_tx_t
*tx
)
670 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
671 ASSERT3U(dmu_tx_get_txg(tx
), ==, spa_syncing_txg(spa
));
673 /* Ensure the removal thread has completed before we free the svr. */
674 spa_vdev_remove_suspend(spa
);
676 ASSERT(state
== DSS_FINISHED
|| state
== DSS_CANCELED
);
678 if (state
== DSS_FINISHED
) {
679 spa_removing_phys_t
*srp
= &spa
->spa_removing_phys
;
680 vdev_t
*vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
681 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
683 if (srp
->sr_prev_indirect_vdev
!= -1) {
685 pvd
= vdev_lookup_top(spa
,
686 srp
->sr_prev_indirect_vdev
);
687 ASSERT3P(pvd
->vdev_ops
, ==, &vdev_indirect_ops
);
690 vic
->vic_prev_indirect_vdev
= srp
->sr_prev_indirect_vdev
;
691 srp
->sr_prev_indirect_vdev
= vd
->vdev_id
;
693 spa
->spa_removing_phys
.sr_state
= state
;
694 spa
->spa_removing_phys
.sr_end_time
= gethrestime_sec();
696 spa
->spa_vdev_removal
= NULL
;
697 spa_vdev_removal_destroy(svr
);
699 spa_sync_removing_state(spa
, tx
);
701 vdev_config_dirty(spa
->spa_root_vdev
);
705 free_mapped_segment_cb(void *arg
, uint64_t offset
, uint64_t size
)
708 vdev_indirect_mark_obsolete(vd
, offset
, size
);
709 boolean_t checkpoint
= B_FALSE
;
710 vdev_indirect_ops
.vdev_op_remap(vd
, offset
, size
,
711 metaslab_free_impl_cb
, &checkpoint
);
715 * On behalf of the removal thread, syncs an incremental bit more of
716 * the indirect mapping to disk and updates the in-memory mapping.
717 * Called as a sync task in every txg that the removal thread makes progress.
720 vdev_mapping_sync(void *arg
, dmu_tx_t
*tx
)
722 spa_vdev_removal_t
*svr
= arg
;
723 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
724 vdev_t
*vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
725 ASSERTV(vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
);
726 uint64_t txg
= dmu_tx_get_txg(tx
);
727 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
729 ASSERT(vic
->vic_mapping_object
!= 0);
730 ASSERT3U(txg
, ==, spa_syncing_txg(spa
));
732 vdev_indirect_mapping_add_entries(vim
,
733 &svr
->svr_new_segments
[txg
& TXG_MASK
], tx
);
734 vdev_indirect_births_add_entry(vd
->vdev_indirect_births
,
735 vdev_indirect_mapping_max_offset(vim
), dmu_tx_get_txg(tx
), tx
);
738 * Free the copied data for anything that was freed while the
739 * mapping entries were in flight.
741 mutex_enter(&svr
->svr_lock
);
742 range_tree_vacate(svr
->svr_frees
[txg
& TXG_MASK
],
743 free_mapped_segment_cb
, vd
);
744 ASSERT3U(svr
->svr_max_offset_to_sync
[txg
& TXG_MASK
], >=,
745 vdev_indirect_mapping_max_offset(vim
));
746 svr
->svr_max_offset_to_sync
[txg
& TXG_MASK
] = 0;
747 mutex_exit(&svr
->svr_lock
);
749 spa_sync_removing_state(spa
, tx
);
752 typedef struct vdev_copy_segment_arg
{
754 dva_t
*vcsa_dest_dva
;
756 range_tree_t
*vcsa_obsolete_segs
;
757 } vdev_copy_segment_arg_t
;
760 unalloc_seg(void *arg
, uint64_t start
, uint64_t size
)
762 vdev_copy_segment_arg_t
*vcsa
= arg
;
763 spa_t
*spa
= vcsa
->vcsa_spa
;
764 blkptr_t bp
= { { { {0} } } };
766 BP_SET_BIRTH(&bp
, TXG_INITIAL
, TXG_INITIAL
);
767 BP_SET_LSIZE(&bp
, size
);
768 BP_SET_PSIZE(&bp
, size
);
769 BP_SET_COMPRESS(&bp
, ZIO_COMPRESS_OFF
);
770 BP_SET_CHECKSUM(&bp
, ZIO_CHECKSUM_OFF
);
771 BP_SET_TYPE(&bp
, DMU_OT_NONE
);
772 BP_SET_LEVEL(&bp
, 0);
773 BP_SET_DEDUP(&bp
, 0);
774 BP_SET_BYTEORDER(&bp
, ZFS_HOST_BYTEORDER
);
776 DVA_SET_VDEV(&bp
.blk_dva
[0], DVA_GET_VDEV(vcsa
->vcsa_dest_dva
));
777 DVA_SET_OFFSET(&bp
.blk_dva
[0],
778 DVA_GET_OFFSET(vcsa
->vcsa_dest_dva
) + start
);
779 DVA_SET_ASIZE(&bp
.blk_dva
[0], size
);
781 zio_free(spa
, vcsa
->vcsa_txg
, &bp
);
785 * All reads and writes associated with a call to spa_vdev_copy_segment()
789 spa_vdev_copy_segment_done(zio_t
*zio
)
791 vdev_copy_segment_arg_t
*vcsa
= zio
->io_private
;
793 range_tree_vacate(vcsa
->vcsa_obsolete_segs
,
795 range_tree_destroy(vcsa
->vcsa_obsolete_segs
);
796 kmem_free(vcsa
, sizeof (*vcsa
));
798 spa_config_exit(zio
->io_spa
, SCL_STATE
, zio
->io_spa
);
802 * The write of the new location is done.
805 spa_vdev_copy_segment_write_done(zio_t
*zio
)
807 vdev_copy_arg_t
*vca
= zio
->io_private
;
809 abd_free(zio
->io_abd
);
811 mutex_enter(&vca
->vca_lock
);
812 vca
->vca_outstanding_bytes
-= zio
->io_size
;
814 if (zio
->io_error
!= 0)
815 vca
->vca_write_error_bytes
+= zio
->io_size
;
817 cv_signal(&vca
->vca_cv
);
818 mutex_exit(&vca
->vca_lock
);
822 * The read of the old location is done. The parent zio is the write to
823 * the new location. Allow it to start.
826 spa_vdev_copy_segment_read_done(zio_t
*zio
)
828 vdev_copy_arg_t
*vca
= zio
->io_private
;
830 if (zio
->io_error
!= 0) {
831 mutex_enter(&vca
->vca_lock
);
832 vca
->vca_read_error_bytes
+= zio
->io_size
;
833 mutex_exit(&vca
->vca_lock
);
836 zio_nowait(zio_unique_parent(zio
));
840 * If the old and new vdevs are mirrors, we will read both sides of the old
841 * mirror, and write each copy to the corresponding side of the new mirror.
842 * If the old and new vdevs have a different number of children, we will do
843 * this as best as possible. Since we aren't verifying checksums, this
844 * ensures that as long as there's a good copy of the data, we'll have a
845 * good copy after the removal, even if there's silent damage to one side
846 * of the mirror. If we're removing a mirror that has some silent damage,
847 * we'll have exactly the same damage in the new location (assuming that
848 * the new location is also a mirror).
850 * We accomplish this by creating a tree of zio_t's, with as many writes as
851 * there are "children" of the new vdev (a non-redundant vdev counts as one
852 * child, a 2-way mirror has 2 children, etc). Each write has an associated
853 * read from a child of the old vdev. Typically there will be the same
854 * number of children of the old and new vdevs. However, if there are more
855 * children of the new vdev, some child(ren) of the old vdev will be issued
856 * multiple reads. If there are more children of the old vdev, some copies
859 * For example, the tree of zio_t's for a 2-way mirror is:
863 * write(new vdev, child 0) write(new vdev, child 1)
865 * read(old vdev, child 0) read(old vdev, child 1)
867 * Child zio's complete before their parents complete. However, zio's
868 * created with zio_vdev_child_io() may be issued before their children
869 * complete. In this case we need to make sure that the children (reads)
870 * complete before the parents (writes) are *issued*. We do this by not
871 * calling zio_nowait() on each write until its corresponding read has
874 * The spa_config_lock must be held while zio's created by
875 * zio_vdev_child_io() are in progress, to ensure that the vdev tree does
876 * not change (e.g. due to a concurrent "zpool attach/detach"). The "null"
877 * zio is needed to release the spa_config_lock after all the reads and
878 * writes complete. (Note that we can't grab the config lock for each read,
879 * because it is not reentrant - we could deadlock with a thread waiting
883 spa_vdev_copy_one_child(vdev_copy_arg_t
*vca
, zio_t
*nzio
,
884 vdev_t
*source_vd
, uint64_t source_offset
,
885 vdev_t
*dest_child_vd
, uint64_t dest_offset
, int dest_id
, uint64_t size
)
887 ASSERT3U(spa_config_held(nzio
->io_spa
, SCL_ALL
, RW_READER
), !=, 0);
890 * If the destination child in unwritable then there is no point
891 * in issuing the source reads which cannot be written.
893 if (!vdev_writeable(dest_child_vd
))
896 mutex_enter(&vca
->vca_lock
);
897 vca
->vca_outstanding_bytes
+= size
;
898 mutex_exit(&vca
->vca_lock
);
900 abd_t
*abd
= abd_alloc_for_io(size
, B_FALSE
);
902 vdev_t
*source_child_vd
= NULL
;
903 if (source_vd
->vdev_ops
== &vdev_mirror_ops
&& dest_id
!= -1) {
905 * Source and dest are both mirrors. Copy from the same
906 * child id as we are copying to (wrapping around if there
907 * are more dest children than source children). If the
908 * preferred source child is unreadable select another.
910 for (int i
= 0; i
< source_vd
->vdev_children
; i
++) {
911 source_child_vd
= source_vd
->vdev_child
[
912 (dest_id
+ i
) % source_vd
->vdev_children
];
913 if (vdev_readable(source_child_vd
))
917 source_child_vd
= source_vd
;
921 * There should always be at least one readable source child or
922 * the pool would be in a suspended state. Somehow selecting an
923 * unreadable child would result in IO errors, the removal process
924 * being cancelled, and the pool reverting to its pre-removal state.
926 ASSERT3P(source_child_vd
, !=, NULL
);
928 zio_t
*write_zio
= zio_vdev_child_io(nzio
, NULL
,
929 dest_child_vd
, dest_offset
, abd
, size
,
930 ZIO_TYPE_WRITE
, ZIO_PRIORITY_REMOVAL
,
932 spa_vdev_copy_segment_write_done
, vca
);
934 zio_nowait(zio_vdev_child_io(write_zio
, NULL
,
935 source_child_vd
, source_offset
, abd
, size
,
936 ZIO_TYPE_READ
, ZIO_PRIORITY_REMOVAL
,
938 spa_vdev_copy_segment_read_done
, vca
));
942 * Allocate a new location for this segment, and create the zio_t's to
943 * read from the old location and write to the new location.
946 spa_vdev_copy_segment(vdev_t
*vd
, range_tree_t
*segs
,
947 uint64_t maxalloc
, uint64_t txg
,
948 vdev_copy_arg_t
*vca
, zio_alloc_list_t
*zal
)
950 metaslab_group_t
*mg
= vd
->vdev_mg
;
951 spa_t
*spa
= vd
->vdev_spa
;
952 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
953 vdev_indirect_mapping_entry_t
*entry
;
955 uint64_t start
= range_tree_min(segs
);
956 ASSERT0(P2PHASE(start
, 1 << spa
->spa_min_ashift
));
958 ASSERT3U(maxalloc
, <=, SPA_MAXBLOCKSIZE
);
959 ASSERT0(P2PHASE(maxalloc
, 1 << spa
->spa_min_ashift
));
961 uint64_t size
= range_tree_span(segs
);
962 if (range_tree_span(segs
) > maxalloc
) {
964 * We can't allocate all the segments. Prefer to end
965 * the allocation at the end of a segment, thus avoiding
966 * additional split blocks.
970 search
.rs_start
= start
+ maxalloc
;
971 search
.rs_end
= search
.rs_start
;
972 range_seg_t
*rs
= avl_find(&segs
->rt_root
, &search
, &where
);
974 rs
= avl_nearest(&segs
->rt_root
, where
, AVL_BEFORE
);
976 rs
= AVL_PREV(&segs
->rt_root
, rs
);
979 size
= rs
->rs_end
- start
;
982 * There are no segments that end before maxalloc.
983 * I.e. the first segment is larger than maxalloc,
984 * so we must split it.
989 ASSERT3U(size
, <=, maxalloc
);
990 ASSERT0(P2PHASE(size
, 1 << spa
->spa_min_ashift
));
993 * An allocation class might not have any remaining vdevs or space
995 metaslab_class_t
*mc
= mg
->mg_class
;
996 if (mc
!= spa_normal_class(spa
) && mc
->mc_groups
<= 1)
997 mc
= spa_normal_class(spa
);
998 int error
= metaslab_alloc_dva(spa
, mc
, size
, &dst
, 0, NULL
, txg
, 0,
1000 if (error
== ENOSPC
&& mc
!= spa_normal_class(spa
)) {
1001 error
= metaslab_alloc_dva(spa
, spa_normal_class(spa
), size
,
1002 &dst
, 0, NULL
, txg
, 0, zal
, 0);
1008 * Determine the ranges that are not actually needed. Offsets are
1009 * relative to the start of the range to be copied (i.e. relative to the
1010 * local variable "start").
1012 range_tree_t
*obsolete_segs
= range_tree_create(NULL
, NULL
);
1014 range_seg_t
*rs
= avl_first(&segs
->rt_root
);
1015 ASSERT3U(rs
->rs_start
, ==, start
);
1016 uint64_t prev_seg_end
= rs
->rs_end
;
1017 while ((rs
= AVL_NEXT(&segs
->rt_root
, rs
)) != NULL
) {
1018 if (rs
->rs_start
>= start
+ size
) {
1021 range_tree_add(obsolete_segs
,
1022 prev_seg_end
- start
,
1023 rs
->rs_start
- prev_seg_end
);
1025 prev_seg_end
= rs
->rs_end
;
1027 /* We don't end in the middle of an obsolete range */
1028 ASSERT3U(start
+ size
, <=, prev_seg_end
);
1030 range_tree_clear(segs
, start
, size
);
1033 * We can't have any padding of the allocated size, otherwise we will
1034 * misunderstand what's allocated, and the size of the mapping. We
1035 * prevent padding by ensuring that all devices in the pool have the
1036 * same ashift, and the allocation size is a multiple of the ashift.
1038 VERIFY3U(DVA_GET_ASIZE(&dst
), ==, size
);
1040 entry
= kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t
), KM_SLEEP
);
1041 DVA_MAPPING_SET_SRC_OFFSET(&entry
->vime_mapping
, start
);
1042 entry
->vime_mapping
.vimep_dst
= dst
;
1043 if (spa_feature_is_enabled(spa
, SPA_FEATURE_OBSOLETE_COUNTS
)) {
1044 entry
->vime_obsolete_count
= range_tree_space(obsolete_segs
);
1047 vdev_copy_segment_arg_t
*vcsa
= kmem_zalloc(sizeof (*vcsa
), KM_SLEEP
);
1048 vcsa
->vcsa_dest_dva
= &entry
->vime_mapping
.vimep_dst
;
1049 vcsa
->vcsa_obsolete_segs
= obsolete_segs
;
1050 vcsa
->vcsa_spa
= spa
;
1051 vcsa
->vcsa_txg
= txg
;
1054 * See comment before spa_vdev_copy_one_child().
1056 spa_config_enter(spa
, SCL_STATE
, spa
, RW_READER
);
1057 zio_t
*nzio
= zio_null(spa
->spa_txg_zio
[txg
& TXG_MASK
], spa
, NULL
,
1058 spa_vdev_copy_segment_done
, vcsa
, 0);
1059 vdev_t
*dest_vd
= vdev_lookup_top(spa
, DVA_GET_VDEV(&dst
));
1060 if (dest_vd
->vdev_ops
== &vdev_mirror_ops
) {
1061 for (int i
= 0; i
< dest_vd
->vdev_children
; i
++) {
1062 vdev_t
*child
= dest_vd
->vdev_child
[i
];
1063 spa_vdev_copy_one_child(vca
, nzio
, vd
, start
,
1064 child
, DVA_GET_OFFSET(&dst
), i
, size
);
1067 spa_vdev_copy_one_child(vca
, nzio
, vd
, start
,
1068 dest_vd
, DVA_GET_OFFSET(&dst
), -1, size
);
1072 list_insert_tail(&svr
->svr_new_segments
[txg
& TXG_MASK
], entry
);
1073 ASSERT3U(start
+ size
, <=, vd
->vdev_ms_count
<< vd
->vdev_ms_shift
);
1074 vdev_dirty(vd
, 0, NULL
, txg
);
1080 * Complete the removal of a toplevel vdev. This is called as a
1081 * synctask in the same txg that we will sync out the new config (to the
1082 * MOS object) which indicates that this vdev is indirect.
1085 vdev_remove_complete_sync(void *arg
, dmu_tx_t
*tx
)
1087 spa_vdev_removal_t
*svr
= arg
;
1088 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1089 vdev_t
*vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
1091 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
1093 for (int i
= 0; i
< TXG_SIZE
; i
++) {
1094 ASSERT0(svr
->svr_bytes_done
[i
]);
1097 ASSERT3U(spa
->spa_removing_phys
.sr_copied
, ==,
1098 spa
->spa_removing_phys
.sr_to_copy
);
1100 vdev_destroy_spacemaps(vd
, tx
);
1102 /* destroy leaf zaps, if any */
1103 ASSERT3P(svr
->svr_zaplist
, !=, NULL
);
1104 for (nvpair_t
*pair
= nvlist_next_nvpair(svr
->svr_zaplist
, NULL
);
1106 pair
= nvlist_next_nvpair(svr
->svr_zaplist
, pair
)) {
1107 vdev_destroy_unlink_zap(vd
, fnvpair_value_uint64(pair
), tx
);
1109 fnvlist_free(svr
->svr_zaplist
);
1111 spa_finish_removal(dmu_tx_pool(tx
)->dp_spa
, DSS_FINISHED
, tx
);
1112 /* vd->vdev_path is not available here */
1113 spa_history_log_internal(spa
, "vdev remove completed", tx
,
1114 "%s vdev %llu", spa_name(spa
), (u_longlong_t
)vd
->vdev_id
);
1118 vdev_remove_enlist_zaps(vdev_t
*vd
, nvlist_t
*zlist
)
1120 ASSERT3P(zlist
, !=, NULL
);
1121 ASSERT3P(vd
->vdev_ops
, !=, &vdev_raidz_ops
);
1123 if (vd
->vdev_leaf_zap
!= 0) {
1125 (void) snprintf(zkey
, sizeof (zkey
), "%s-%llu",
1126 VDEV_REMOVAL_ZAP_OBJS
, (u_longlong_t
)vd
->vdev_leaf_zap
);
1127 fnvlist_add_uint64(zlist
, zkey
, vd
->vdev_leaf_zap
);
1130 for (uint64_t id
= 0; id
< vd
->vdev_children
; id
++) {
1131 vdev_remove_enlist_zaps(vd
->vdev_child
[id
], zlist
);
1136 vdev_remove_replace_with_indirect(vdev_t
*vd
, uint64_t txg
)
1140 spa_t
*spa
= vd
->vdev_spa
;
1141 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
1144 * First, build a list of leaf zaps to be destroyed.
1145 * This is passed to the sync context thread,
1146 * which does the actual unlinking.
1148 svr
->svr_zaplist
= fnvlist_alloc();
1149 vdev_remove_enlist_zaps(vd
, svr
->svr_zaplist
);
1151 ivd
= vdev_add_parent(vd
, &vdev_indirect_ops
);
1152 ivd
->vdev_removing
= 0;
1154 vd
->vdev_leaf_zap
= 0;
1156 vdev_remove_child(ivd
, vd
);
1157 vdev_compact_children(ivd
);
1159 ASSERT(!list_link_active(&vd
->vdev_state_dirty_node
));
1161 mutex_enter(&svr
->svr_lock
);
1162 svr
->svr_thread
= NULL
;
1163 cv_broadcast(&svr
->svr_cv
);
1164 mutex_exit(&svr
->svr_lock
);
1166 /* After this, we can not use svr. */
1167 tx
= dmu_tx_create_assigned(spa
->spa_dsl_pool
, txg
);
1168 dsl_sync_task_nowait(spa
->spa_dsl_pool
, vdev_remove_complete_sync
, svr
,
1169 0, ZFS_SPACE_CHECK_NONE
, tx
);
1174 * Complete the removal of a toplevel vdev. This is called in open
1175 * context by the removal thread after we have copied all vdev's data.
1178 vdev_remove_complete(spa_t
*spa
)
1183 * Wait for any deferred frees to be synced before we call
1184 * vdev_metaslab_fini()
1186 txg_wait_synced(spa
->spa_dsl_pool
, 0);
1187 txg
= spa_vdev_enter(spa
);
1188 vdev_t
*vd
= vdev_lookup_top(spa
, spa
->spa_vdev_removal
->svr_vdev_id
);
1189 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
1190 ASSERT3P(vd
->vdev_trim_thread
, ==, NULL
);
1191 ASSERT3P(vd
->vdev_autotrim_thread
, ==, NULL
);
1193 sysevent_t
*ev
= spa_event_create(spa
, vd
, NULL
,
1194 ESC_ZFS_VDEV_REMOVE_DEV
);
1196 zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
1200 * Discard allocation state.
1202 if (vd
->vdev_mg
!= NULL
) {
1203 vdev_metaslab_fini(vd
);
1204 metaslab_group_destroy(vd
->vdev_mg
);
1206 spa_log_sm_set_blocklimit(spa
);
1208 ASSERT0(vd
->vdev_stat
.vs_space
);
1209 ASSERT0(vd
->vdev_stat
.vs_dspace
);
1211 vdev_remove_replace_with_indirect(vd
, txg
);
1214 * We now release the locks, allowing spa_sync to run and finish the
1215 * removal via vdev_remove_complete_sync in syncing context.
1217 * Note that we hold on to the vdev_t that has been replaced. Since
1218 * it isn't part of the vdev tree any longer, it can't be concurrently
1219 * manipulated, even while we don't have the config lock.
1221 (void) spa_vdev_exit(spa
, NULL
, txg
, 0);
1224 * Top ZAP should have been transferred to the indirect vdev in
1225 * vdev_remove_replace_with_indirect.
1227 ASSERT0(vd
->vdev_top_zap
);
1230 * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
1232 ASSERT0(vd
->vdev_leaf_zap
);
1234 txg
= spa_vdev_enter(spa
);
1235 (void) vdev_label_init(vd
, 0, VDEV_LABEL_REMOVE
);
1237 * Request to update the config and the config cachefile.
1239 vdev_config_dirty(spa
->spa_root_vdev
);
1240 (void) spa_vdev_exit(spa
, vd
, txg
, 0);
1247 * Evacuates a segment of size at most max_alloc from the vdev
1248 * via repeated calls to spa_vdev_copy_segment. If an allocation
1249 * fails, the pool is probably too fragmented to handle such a
1250 * large size, so decrease max_alloc so that the caller will not try
1251 * this size again this txg.
1254 spa_vdev_copy_impl(vdev_t
*vd
, spa_vdev_removal_t
*svr
, vdev_copy_arg_t
*vca
,
1255 uint64_t *max_alloc
, dmu_tx_t
*tx
)
1257 uint64_t txg
= dmu_tx_get_txg(tx
);
1258 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1260 mutex_enter(&svr
->svr_lock
);
1263 * Determine how big of a chunk to copy. We can allocate up
1264 * to max_alloc bytes, and we can span up to vdev_removal_max_span
1265 * bytes of unallocated space at a time. "segs" will track the
1266 * allocated segments that we are copying. We may also be copying
1267 * free segments (of up to vdev_removal_max_span bytes).
1269 range_tree_t
*segs
= range_tree_create(NULL
, NULL
);
1271 range_seg_t
*rs
= range_tree_first(svr
->svr_allocd_segs
);
1276 uint64_t seg_length
;
1278 if (range_tree_is_empty(segs
)) {
1279 /* need to truncate the first seg based on max_alloc */
1281 MIN(rs
->rs_end
- rs
->rs_start
, *max_alloc
);
1283 if (rs
->rs_start
- range_tree_max(segs
) >
1284 vdev_removal_max_span
) {
1286 * Including this segment would cause us to
1287 * copy a larger unneeded chunk than is allowed.
1290 } else if (rs
->rs_end
- range_tree_min(segs
) >
1293 * This additional segment would extend past
1294 * max_alloc. Rather than splitting this
1295 * segment, leave it for the next mapping.
1299 seg_length
= rs
->rs_end
- rs
->rs_start
;
1303 range_tree_add(segs
, rs
->rs_start
, seg_length
);
1304 range_tree_remove(svr
->svr_allocd_segs
,
1305 rs
->rs_start
, seg_length
);
1308 if (range_tree_is_empty(segs
)) {
1309 mutex_exit(&svr
->svr_lock
);
1310 range_tree_destroy(segs
);
1314 if (svr
->svr_max_offset_to_sync
[txg
& TXG_MASK
] == 0) {
1315 dsl_sync_task_nowait(dmu_tx_pool(tx
), vdev_mapping_sync
,
1316 svr
, 0, ZFS_SPACE_CHECK_NONE
, tx
);
1319 svr
->svr_max_offset_to_sync
[txg
& TXG_MASK
] = range_tree_max(segs
);
1322 * Note: this is the amount of *allocated* space
1323 * that we are taking care of each txg.
1325 svr
->svr_bytes_done
[txg
& TXG_MASK
] += range_tree_space(segs
);
1327 mutex_exit(&svr
->svr_lock
);
1329 zio_alloc_list_t zal
;
1330 metaslab_trace_init(&zal
);
1331 uint64_t thismax
= SPA_MAXBLOCKSIZE
;
1332 while (!range_tree_is_empty(segs
)) {
1333 int error
= spa_vdev_copy_segment(vd
,
1334 segs
, thismax
, txg
, vca
, &zal
);
1336 if (error
== ENOSPC
) {
1338 * Cut our segment in half, and don't try this
1339 * segment size again this txg. Note that the
1340 * allocation size must be aligned to the highest
1341 * ashift in the pool, so that the allocation will
1342 * not be padded out to a multiple of the ashift,
1343 * which could cause us to think that this mapping
1344 * is larger than we intended.
1346 ASSERT3U(spa
->spa_max_ashift
, >=, SPA_MINBLOCKSHIFT
);
1347 ASSERT3U(spa
->spa_max_ashift
, ==, spa
->spa_min_ashift
);
1348 uint64_t attempted
=
1349 MIN(range_tree_span(segs
), thismax
);
1350 thismax
= P2ROUNDUP(attempted
/ 2,
1351 1 << spa
->spa_max_ashift
);
1353 * The minimum-size allocation can not fail.
1355 ASSERT3U(attempted
, >, 1 << spa
->spa_max_ashift
);
1356 *max_alloc
= attempted
- (1 << spa
->spa_max_ashift
);
1361 * We've performed an allocation, so reset the
1364 metaslab_trace_fini(&zal
);
1365 metaslab_trace_init(&zal
);
1368 metaslab_trace_fini(&zal
);
1369 range_tree_destroy(segs
);
1373 * The size of each removal mapping is limited by the tunable
1374 * zfs_remove_max_segment, but we must adjust this to be a multiple of the
1375 * pool's ashift, so that we don't try to split individual sectors regardless
1376 * of the tunable value. (Note that device removal requires that all devices
1377 * have the same ashift, so there's no difference between spa_min_ashift and
1378 * spa_max_ashift.) The raw tunable should not be used elsewhere.
1381 spa_remove_max_segment(spa_t
*spa
)
1383 return (P2ROUNDUP(zfs_remove_max_segment
, 1 << spa
->spa_max_ashift
));
1387 * The removal thread operates in open context. It iterates over all
1388 * allocated space in the vdev, by loading each metaslab's spacemap.
1389 * For each contiguous segment of allocated space (capping the segment
1390 * size at SPA_MAXBLOCKSIZE), we:
1391 * - Allocate space for it on another vdev.
1392 * - Create a new mapping from the old location to the new location
1393 * (as a record in svr_new_segments).
1394 * - Initiate a physical read zio to get the data off the removing disk.
1395 * - In the read zio's done callback, initiate a physical write zio to
1396 * write it to the new vdev.
1397 * Note that all of this will take effect when a particular TXG syncs.
1398 * The sync thread ensures that all the phys reads and writes for the syncing
1399 * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
1400 * (see vdev_mapping_sync()).
1403 spa_vdev_remove_thread(void *arg
)
1406 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
1407 vdev_copy_arg_t vca
;
1408 uint64_t max_alloc
= spa_remove_max_segment(spa
);
1409 uint64_t last_txg
= 0;
1411 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1412 vdev_t
*vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
1413 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
1414 uint64_t start_offset
= vdev_indirect_mapping_max_offset(vim
);
1416 ASSERT3P(vd
->vdev_ops
, !=, &vdev_indirect_ops
);
1417 ASSERT(vdev_is_concrete(vd
));
1418 ASSERT(vd
->vdev_removing
);
1419 ASSERT(vd
->vdev_indirect_config
.vic_mapping_object
!= 0);
1420 ASSERT(vim
!= NULL
);
1422 mutex_init(&vca
.vca_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1423 cv_init(&vca
.vca_cv
, NULL
, CV_DEFAULT
, NULL
);
1424 vca
.vca_outstanding_bytes
= 0;
1425 vca
.vca_read_error_bytes
= 0;
1426 vca
.vca_write_error_bytes
= 0;
1428 mutex_enter(&svr
->svr_lock
);
1431 * Start from vim_max_offset so we pick up where we left off
1432 * if we are restarting the removal after opening the pool.
1435 for (msi
= start_offset
>> vd
->vdev_ms_shift
;
1436 msi
< vd
->vdev_ms_count
&& !svr
->svr_thread_exit
; msi
++) {
1437 metaslab_t
*msp
= vd
->vdev_ms
[msi
];
1438 ASSERT3U(msi
, <=, vd
->vdev_ms_count
);
1440 ASSERT0(range_tree_space(svr
->svr_allocd_segs
));
1442 mutex_enter(&msp
->ms_sync_lock
);
1443 mutex_enter(&msp
->ms_lock
);
1446 * Assert nothing in flight -- ms_*tree is empty.
1448 for (int i
= 0; i
< TXG_SIZE
; i
++) {
1449 ASSERT0(range_tree_space(msp
->ms_allocating
[i
]));
1453 * If the metaslab has ever been allocated from (ms_sm!=NULL),
1454 * read the allocated segments from the space map object
1455 * into svr_allocd_segs. Since we do this while holding
1456 * svr_lock and ms_sync_lock, concurrent frees (which
1457 * would have modified the space map) will wait for us
1458 * to finish loading the spacemap, and then take the
1459 * appropriate action (see free_from_removing_vdev()).
1461 if (msp
->ms_sm
!= NULL
) {
1462 VERIFY0(space_map_load(msp
->ms_sm
,
1463 svr
->svr_allocd_segs
, SM_ALLOC
));
1465 range_tree_walk(msp
->ms_unflushed_allocs
,
1466 range_tree_add
, svr
->svr_allocd_segs
);
1467 range_tree_walk(msp
->ms_unflushed_frees
,
1468 range_tree_remove
, svr
->svr_allocd_segs
);
1469 range_tree_walk(msp
->ms_freeing
,
1470 range_tree_remove
, svr
->svr_allocd_segs
);
1473 * When we are resuming from a paused removal (i.e.
1474 * when importing a pool with a removal in progress),
1475 * discard any state that we have already processed.
1477 range_tree_clear(svr
->svr_allocd_segs
, 0, start_offset
);
1479 mutex_exit(&msp
->ms_lock
);
1480 mutex_exit(&msp
->ms_sync_lock
);
1483 zfs_dbgmsg("copying %llu segments for metaslab %llu",
1484 avl_numnodes(&svr
->svr_allocd_segs
->rt_root
),
1487 while (!svr
->svr_thread_exit
&&
1488 !range_tree_is_empty(svr
->svr_allocd_segs
)) {
1490 mutex_exit(&svr
->svr_lock
);
1493 * We need to periodically drop the config lock so that
1494 * writers can get in. Additionally, we can't wait
1495 * for a txg to sync while holding a config lock
1496 * (since a waiting writer could cause a 3-way deadlock
1497 * with the sync thread, which also gets a config
1498 * lock for reader). So we can't hold the config lock
1499 * while calling dmu_tx_assign().
1501 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1504 * This delay will pause the removal around the point
1505 * specified by zfs_removal_suspend_progress. We do this
1506 * solely from the test suite or during debugging.
1508 uint64_t bytes_copied
=
1509 spa
->spa_removing_phys
.sr_copied
;
1510 for (int i
= 0; i
< TXG_SIZE
; i
++)
1511 bytes_copied
+= svr
->svr_bytes_done
[i
];
1512 while (zfs_removal_suspend_progress
&&
1513 !svr
->svr_thread_exit
)
1516 mutex_enter(&vca
.vca_lock
);
1517 while (vca
.vca_outstanding_bytes
>
1518 zfs_remove_max_copy_bytes
) {
1519 cv_wait(&vca
.vca_cv
, &vca
.vca_lock
);
1521 mutex_exit(&vca
.vca_lock
);
1524 dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
1526 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
1527 uint64_t txg
= dmu_tx_get_txg(tx
);
1530 * Reacquire the vdev_config lock. The vdev_t
1531 * that we're removing may have changed, e.g. due
1532 * to a vdev_attach or vdev_detach.
1534 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
1535 vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
1537 if (txg
!= last_txg
)
1538 max_alloc
= spa_remove_max_segment(spa
);
1541 spa_vdev_copy_impl(vd
, svr
, &vca
, &max_alloc
, tx
);
1544 mutex_enter(&svr
->svr_lock
);
1547 mutex_enter(&vca
.vca_lock
);
1548 if (zfs_removal_ignore_errors
== 0 &&
1549 (vca
.vca_read_error_bytes
> 0 ||
1550 vca
.vca_write_error_bytes
> 0)) {
1551 svr
->svr_thread_exit
= B_TRUE
;
1553 mutex_exit(&vca
.vca_lock
);
1556 mutex_exit(&svr
->svr_lock
);
1558 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
1561 * Wait for all copies to finish before cleaning up the vca.
1563 txg_wait_synced(spa
->spa_dsl_pool
, 0);
1564 ASSERT0(vca
.vca_outstanding_bytes
);
1566 mutex_destroy(&vca
.vca_lock
);
1567 cv_destroy(&vca
.vca_cv
);
1569 if (svr
->svr_thread_exit
) {
1570 mutex_enter(&svr
->svr_lock
);
1571 range_tree_vacate(svr
->svr_allocd_segs
, NULL
, NULL
);
1572 svr
->svr_thread
= NULL
;
1573 cv_broadcast(&svr
->svr_cv
);
1574 mutex_exit(&svr
->svr_lock
);
1577 * During the removal process an unrecoverable read or write
1578 * error was encountered. The removal process must be
1579 * cancelled or this damage may become permanent.
1581 if (zfs_removal_ignore_errors
== 0 &&
1582 (vca
.vca_read_error_bytes
> 0 ||
1583 vca
.vca_write_error_bytes
> 0)) {
1584 zfs_dbgmsg("canceling removal due to IO errors: "
1585 "[read_error_bytes=%llu] [write_error_bytes=%llu]",
1586 vca
.vca_read_error_bytes
,
1587 vca
.vca_write_error_bytes
);
1588 spa_vdev_remove_cancel_impl(spa
);
1591 ASSERT0(range_tree_space(svr
->svr_allocd_segs
));
1592 vdev_remove_complete(spa
);
1597 spa_vdev_remove_suspend(spa_t
*spa
)
1599 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
1604 mutex_enter(&svr
->svr_lock
);
1605 svr
->svr_thread_exit
= B_TRUE
;
1606 while (svr
->svr_thread
!= NULL
)
1607 cv_wait(&svr
->svr_cv
, &svr
->svr_lock
);
1608 svr
->svr_thread_exit
= B_FALSE
;
1609 mutex_exit(&svr
->svr_lock
);
1614 spa_vdev_remove_cancel_check(void *arg
, dmu_tx_t
*tx
)
1616 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1618 if (spa
->spa_vdev_removal
== NULL
)
1619 return (ENOTACTIVE
);
1624 * Cancel a removal by freeing all entries from the partial mapping
1625 * and marking the vdev as no longer being removing.
1629 spa_vdev_remove_cancel_sync(void *arg
, dmu_tx_t
*tx
)
1631 spa_t
*spa
= dmu_tx_pool(tx
)->dp_spa
;
1632 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
1633 vdev_t
*vd
= vdev_lookup_top(spa
, svr
->svr_vdev_id
);
1634 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
1635 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
1636 objset_t
*mos
= spa
->spa_meta_objset
;
1638 ASSERT3P(svr
->svr_thread
, ==, NULL
);
1640 spa_feature_decr(spa
, SPA_FEATURE_DEVICE_REMOVAL
, tx
);
1642 boolean_t are_precise
;
1643 VERIFY0(vdev_obsolete_counts_are_precise(vd
, &are_precise
));
1645 spa_feature_decr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
1646 VERIFY0(zap_remove(spa
->spa_meta_objset
, vd
->vdev_top_zap
,
1647 VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE
, tx
));
1650 uint64_t obsolete_sm_object
;
1651 VERIFY0(vdev_obsolete_sm_object(vd
, &obsolete_sm_object
));
1652 if (obsolete_sm_object
!= 0) {
1653 ASSERT(vd
->vdev_obsolete_sm
!= NULL
);
1654 ASSERT3U(obsolete_sm_object
, ==,
1655 space_map_object(vd
->vdev_obsolete_sm
));
1657 space_map_free(vd
->vdev_obsolete_sm
, tx
);
1658 VERIFY0(zap_remove(spa
->spa_meta_objset
, vd
->vdev_top_zap
,
1659 VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM
, tx
));
1660 space_map_close(vd
->vdev_obsolete_sm
);
1661 vd
->vdev_obsolete_sm
= NULL
;
1662 spa_feature_decr(spa
, SPA_FEATURE_OBSOLETE_COUNTS
, tx
);
1664 for (int i
= 0; i
< TXG_SIZE
; i
++) {
1665 ASSERT(list_is_empty(&svr
->svr_new_segments
[i
]));
1666 ASSERT3U(svr
->svr_max_offset_to_sync
[i
], <=,
1667 vdev_indirect_mapping_max_offset(vim
));
1670 for (uint64_t msi
= 0; msi
< vd
->vdev_ms_count
; msi
++) {
1671 metaslab_t
*msp
= vd
->vdev_ms
[msi
];
1673 if (msp
->ms_start
>= vdev_indirect_mapping_max_offset(vim
))
1676 ASSERT0(range_tree_space(svr
->svr_allocd_segs
));
1678 mutex_enter(&msp
->ms_lock
);
1681 * Assert nothing in flight -- ms_*tree is empty.
1683 for (int i
= 0; i
< TXG_SIZE
; i
++)
1684 ASSERT0(range_tree_space(msp
->ms_allocating
[i
]));
1685 for (int i
= 0; i
< TXG_DEFER_SIZE
; i
++)
1686 ASSERT0(range_tree_space(msp
->ms_defer
[i
]));
1687 ASSERT0(range_tree_space(msp
->ms_freed
));
1689 if (msp
->ms_sm
!= NULL
) {
1690 mutex_enter(&svr
->svr_lock
);
1691 VERIFY0(space_map_load(msp
->ms_sm
,
1692 svr
->svr_allocd_segs
, SM_ALLOC
));
1694 range_tree_walk(msp
->ms_unflushed_allocs
,
1695 range_tree_add
, svr
->svr_allocd_segs
);
1696 range_tree_walk(msp
->ms_unflushed_frees
,
1697 range_tree_remove
, svr
->svr_allocd_segs
);
1698 range_tree_walk(msp
->ms_freeing
,
1699 range_tree_remove
, svr
->svr_allocd_segs
);
1702 * Clear everything past what has been synced,
1703 * because we have not allocated mappings for it yet.
1705 uint64_t syncd
= vdev_indirect_mapping_max_offset(vim
);
1706 uint64_t sm_end
= msp
->ms_sm
->sm_start
+
1707 msp
->ms_sm
->sm_size
;
1709 range_tree_clear(svr
->svr_allocd_segs
,
1710 syncd
, sm_end
- syncd
);
1712 mutex_exit(&svr
->svr_lock
);
1714 mutex_exit(&msp
->ms_lock
);
1716 mutex_enter(&svr
->svr_lock
);
1717 range_tree_vacate(svr
->svr_allocd_segs
,
1718 free_mapped_segment_cb
, vd
);
1719 mutex_exit(&svr
->svr_lock
);
1723 * Note: this must happen after we invoke free_mapped_segment_cb,
1724 * because it adds to the obsolete_segments.
1726 range_tree_vacate(vd
->vdev_obsolete_segments
, NULL
, NULL
);
1728 ASSERT3U(vic
->vic_mapping_object
, ==,
1729 vdev_indirect_mapping_object(vd
->vdev_indirect_mapping
));
1730 vdev_indirect_mapping_close(vd
->vdev_indirect_mapping
);
1731 vd
->vdev_indirect_mapping
= NULL
;
1732 vdev_indirect_mapping_free(mos
, vic
->vic_mapping_object
, tx
);
1733 vic
->vic_mapping_object
= 0;
1735 ASSERT3U(vic
->vic_births_object
, ==,
1736 vdev_indirect_births_object(vd
->vdev_indirect_births
));
1737 vdev_indirect_births_close(vd
->vdev_indirect_births
);
1738 vd
->vdev_indirect_births
= NULL
;
1739 vdev_indirect_births_free(mos
, vic
->vic_births_object
, tx
);
1740 vic
->vic_births_object
= 0;
1743 * We may have processed some frees from the removing vdev in this
1744 * txg, thus increasing svr_bytes_done; discard that here to
1745 * satisfy the assertions in spa_vdev_removal_destroy().
1746 * Note that future txg's can not have any bytes_done, because
1747 * future TXG's are only modified from open context, and we have
1748 * already shut down the copying thread.
1750 svr
->svr_bytes_done
[dmu_tx_get_txg(tx
) & TXG_MASK
] = 0;
1751 spa_finish_removal(spa
, DSS_CANCELED
, tx
);
1753 vd
->vdev_removing
= B_FALSE
;
1754 vdev_config_dirty(vd
);
1756 zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
1757 vd
->vdev_id
, dmu_tx_get_txg(tx
));
1758 spa_history_log_internal(spa
, "vdev remove canceled", tx
,
1759 "%s vdev %llu %s", spa_name(spa
),
1760 (u_longlong_t
)vd
->vdev_id
,
1761 (vd
->vdev_path
!= NULL
) ? vd
->vdev_path
: "-");
1765 spa_vdev_remove_cancel_impl(spa_t
*spa
)
1767 uint64_t vdid
= spa
->spa_vdev_removal
->svr_vdev_id
;
1769 int error
= dsl_sync_task(spa
->spa_name
, spa_vdev_remove_cancel_check
,
1770 spa_vdev_remove_cancel_sync
, NULL
, 0,
1771 ZFS_SPACE_CHECK_EXTRA_RESERVED
);
1774 spa_config_enter(spa
, SCL_ALLOC
| SCL_VDEV
, FTAG
, RW_WRITER
);
1775 vdev_t
*vd
= vdev_lookup_top(spa
, vdid
);
1776 metaslab_group_activate(vd
->vdev_mg
);
1777 spa_config_exit(spa
, SCL_ALLOC
| SCL_VDEV
, FTAG
);
1784 spa_vdev_remove_cancel(spa_t
*spa
)
1786 spa_vdev_remove_suspend(spa
);
1788 if (spa
->spa_vdev_removal
== NULL
)
1789 return (ENOTACTIVE
);
1791 return (spa_vdev_remove_cancel_impl(spa
));
1795 svr_sync(spa_t
*spa
, dmu_tx_t
*tx
)
1797 spa_vdev_removal_t
*svr
= spa
->spa_vdev_removal
;
1798 int txgoff
= dmu_tx_get_txg(tx
) & TXG_MASK
;
1804 * This check is necessary so that we do not dirty the
1805 * DIRECTORY_OBJECT via spa_sync_removing_state() when there
1806 * is nothing to do. Dirtying it every time would prevent us
1807 * from syncing-to-convergence.
1809 if (svr
->svr_bytes_done
[txgoff
] == 0)
1813 * Update progress accounting.
1815 spa
->spa_removing_phys
.sr_copied
+= svr
->svr_bytes_done
[txgoff
];
1816 svr
->svr_bytes_done
[txgoff
] = 0;
1818 spa_sync_removing_state(spa
, tx
);
1822 vdev_remove_make_hole_and_free(vdev_t
*vd
)
1824 uint64_t id
= vd
->vdev_id
;
1825 spa_t
*spa
= vd
->vdev_spa
;
1826 vdev_t
*rvd
= spa
->spa_root_vdev
;
1828 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1829 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
1833 vd
= vdev_alloc_common(spa
, id
, 0, &vdev_hole_ops
);
1834 vdev_add_child(rvd
, vd
);
1835 vdev_config_dirty(rvd
);
1838 * Reassess the health of our root vdev.
1844 * Remove a log device. The config lock is held for the specified TXG.
1847 spa_vdev_remove_log(vdev_t
*vd
, uint64_t *txg
)
1849 metaslab_group_t
*mg
= vd
->vdev_mg
;
1850 spa_t
*spa
= vd
->vdev_spa
;
1853 ASSERT(vd
->vdev_islog
);
1854 ASSERT(vd
== vd
->vdev_top
);
1855 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1858 * Stop allocating from this vdev.
1860 metaslab_group_passivate(mg
);
1863 * Wait for the youngest allocations and frees to sync,
1864 * and then wait for the deferral of those frees to finish.
1866 spa_vdev_config_exit(spa
, NULL
,
1867 *txg
+ TXG_CONCURRENT_STATES
+ TXG_DEFER_SIZE
, 0, FTAG
);
1870 * Evacuate the device. We don't hold the config lock as
1871 * writer since we need to do I/O but we do keep the
1872 * spa_namespace_lock held. Once this completes the device
1873 * should no longer have any blocks allocated on it.
1875 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1876 if (vd
->vdev_stat
.vs_alloc
!= 0)
1877 error
= spa_reset_logs(spa
);
1879 *txg
= spa_vdev_config_enter(spa
);
1882 metaslab_group_activate(mg
);
1885 ASSERT0(vd
->vdev_stat
.vs_alloc
);
1888 * The evacuation succeeded. Remove any remaining MOS metadata
1889 * associated with this vdev, and wait for these changes to sync.
1891 vd
->vdev_removing
= B_TRUE
;
1893 vdev_dirty_leaves(vd
, VDD_DTL
, *txg
);
1894 vdev_config_dirty(vd
);
1897 * When the log space map feature is enabled we look at
1898 * the vdev's top_zap to find the on-disk flush data of
1899 * the metaslab we just flushed. Thus, while removing a
1900 * log vdev we make sure to call vdev_metaslab_fini()
1901 * first, which removes all metaslabs of this vdev from
1902 * spa_metaslabs_by_flushed before vdev_remove_empty()
1903 * destroys the top_zap of this log vdev.
1905 * This avoids the scenario where we flush a metaslab
1906 * from the log vdev being removed that doesn't have a
1907 * top_zap and end up failing to lookup its on-disk flush
1910 * We don't call metaslab_group_destroy() right away
1911 * though (it will be called in vdev_free() later) as
1912 * during metaslab_sync() of metaslabs from other vdevs
1913 * we may touch the metaslab group of this vdev through
1914 * metaslab_class_histogram_verify()
1916 vdev_metaslab_fini(vd
);
1917 spa_log_sm_set_blocklimit(spa
);
1919 spa_vdev_config_exit(spa
, NULL
, *txg
, 0, FTAG
);
1921 /* Stop initializing and TRIM */
1922 vdev_initialize_stop_all(vd
, VDEV_INITIALIZE_CANCELED
);
1923 vdev_trim_stop_all(vd
, VDEV_TRIM_CANCELED
);
1924 vdev_autotrim_stop_wait(vd
);
1926 *txg
= spa_vdev_config_enter(spa
);
1928 sysevent_t
*ev
= spa_event_create(spa
, vd
, NULL
,
1929 ESC_ZFS_VDEV_REMOVE_DEV
);
1930 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
1931 ASSERT(spa_config_held(spa
, SCL_ALL
, RW_WRITER
) == SCL_ALL
);
1933 /* The top ZAP should have been destroyed by vdev_remove_empty. */
1934 ASSERT0(vd
->vdev_top_zap
);
1935 /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
1936 ASSERT0(vd
->vdev_leaf_zap
);
1938 (void) vdev_label_init(vd
, 0, VDEV_LABEL_REMOVE
);
1940 if (list_link_active(&vd
->vdev_state_dirty_node
))
1941 vdev_state_clean(vd
);
1942 if (list_link_active(&vd
->vdev_config_dirty_node
))
1943 vdev_config_clean(vd
);
1945 ASSERT0(vd
->vdev_stat
.vs_alloc
);
1948 * Clean up the vdev namespace.
1950 vdev_remove_make_hole_and_free(vd
);
1959 spa_vdev_remove_top_check(vdev_t
*vd
)
1961 spa_t
*spa
= vd
->vdev_spa
;
1963 if (vd
!= vd
->vdev_top
)
1964 return (SET_ERROR(ENOTSUP
));
1966 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_DEVICE_REMOVAL
))
1967 return (SET_ERROR(ENOTSUP
));
1969 /* available space in the pool's normal class */
1970 uint64_t available
= dsl_dir_space_available(
1971 spa
->spa_dsl_pool
->dp_root_dir
, NULL
, 0, B_TRUE
);
1973 metaslab_class_t
*mc
= vd
->vdev_mg
->mg_class
;
1976 * When removing a vdev from an allocation class that has
1977 * remaining vdevs, include available space from the class.
1979 if (mc
!= spa_normal_class(spa
) && mc
->mc_groups
> 1) {
1980 uint64_t class_avail
= metaslab_class_get_space(mc
) -
1981 metaslab_class_get_alloc(mc
);
1983 /* add class space, adjusted for overhead */
1984 available
+= (class_avail
* 94) / 100;
1988 * There has to be enough free space to remove the
1989 * device and leave double the "slop" space (i.e. we
1990 * must leave at least 3% of the pool free, in addition to
1991 * the normal slop space).
1993 if (available
< vd
->vdev_stat
.vs_dspace
+ spa_get_slop_space(spa
)) {
1994 return (SET_ERROR(ENOSPC
));
1998 * There can not be a removal in progress.
2000 if (spa
->spa_removing_phys
.sr_state
== DSS_SCANNING
)
2001 return (SET_ERROR(EBUSY
));
2004 * The device must have all its data.
2006 if (!vdev_dtl_empty(vd
, DTL_MISSING
) ||
2007 !vdev_dtl_empty(vd
, DTL_OUTAGE
))
2008 return (SET_ERROR(EBUSY
));
2011 * The device must be healthy.
2013 if (!vdev_readable(vd
))
2014 return (SET_ERROR(EIO
));
2017 * All vdevs in normal class must have the same ashift.
2019 if (spa
->spa_max_ashift
!= spa
->spa_min_ashift
) {
2020 return (SET_ERROR(EINVAL
));
2024 * All vdevs in normal class must have the same ashift
2027 vdev_t
*rvd
= spa
->spa_root_vdev
;
2028 int num_indirect
= 0;
2029 for (uint64_t id
= 0; id
< rvd
->vdev_children
; id
++) {
2030 vdev_t
*cvd
= rvd
->vdev_child
[id
];
2031 if (cvd
->vdev_ashift
!= 0 && !cvd
->vdev_islog
)
2032 ASSERT3U(cvd
->vdev_ashift
, ==, spa
->spa_max_ashift
);
2033 if (cvd
->vdev_ops
== &vdev_indirect_ops
)
2035 if (!vdev_is_concrete(cvd
))
2037 if (cvd
->vdev_ops
== &vdev_raidz_ops
)
2038 return (SET_ERROR(EINVAL
));
2040 * Need the mirror to be mirror of leaf vdevs only
2042 if (cvd
->vdev_ops
== &vdev_mirror_ops
) {
2043 for (uint64_t cid
= 0;
2044 cid
< cvd
->vdev_children
; cid
++) {
2045 if (!cvd
->vdev_child
[cid
]->vdev_ops
->
2047 return (SET_ERROR(EINVAL
));
2056 * Initiate removal of a top-level vdev, reducing the total space in the pool.
2057 * The config lock is held for the specified TXG. Once initiated,
2058 * evacuation of all allocated space (copying it to other vdevs) happens
2059 * in the background (see spa_vdev_remove_thread()), and can be canceled
2060 * (see spa_vdev_remove_cancel()). If successful, the vdev will
2061 * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
2064 spa_vdev_remove_top(vdev_t
*vd
, uint64_t *txg
)
2066 spa_t
*spa
= vd
->vdev_spa
;
2070 * Check for errors up-front, so that we don't waste time
2071 * passivating the metaslab group and clearing the ZIL if there
2074 error
= spa_vdev_remove_top_check(vd
);
2079 * Stop allocating from this vdev. Note that we must check
2080 * that this is not the only device in the pool before
2081 * passivating, otherwise we will not be able to make
2082 * progress because we can't allocate from any vdevs.
2083 * The above check for sufficient free space serves this
2086 metaslab_group_t
*mg
= vd
->vdev_mg
;
2087 metaslab_group_passivate(mg
);
2090 * Wait for the youngest allocations and frees to sync,
2091 * and then wait for the deferral of those frees to finish.
2093 spa_vdev_config_exit(spa
, NULL
,
2094 *txg
+ TXG_CONCURRENT_STATES
+ TXG_DEFER_SIZE
, 0, FTAG
);
2097 * We must ensure that no "stubby" log blocks are allocated
2098 * on the device to be removed. These blocks could be
2099 * written at any time, including while we are in the middle
2102 error
= spa_reset_logs(spa
);
2105 * We stop any initializing and TRIM that is currently in progress
2106 * but leave the state as "active". This will allow the process to
2107 * resume if the removal is canceled sometime later.
2109 vdev_initialize_stop_all(vd
, VDEV_INITIALIZE_ACTIVE
);
2110 vdev_trim_stop_all(vd
, VDEV_TRIM_ACTIVE
);
2111 vdev_autotrim_stop_wait(vd
);
2113 *txg
= spa_vdev_config_enter(spa
);
2116 * Things might have changed while the config lock was dropped
2117 * (e.g. space usage). Check for errors again.
2120 error
= spa_vdev_remove_top_check(vd
);
2123 metaslab_group_activate(mg
);
2124 spa_async_request(spa
, SPA_ASYNC_INITIALIZE_RESTART
);
2125 spa_async_request(spa
, SPA_ASYNC_TRIM_RESTART
);
2126 spa_async_request(spa
, SPA_ASYNC_AUTOTRIM_RESTART
);
2130 vd
->vdev_removing
= B_TRUE
;
2132 vdev_dirty_leaves(vd
, VDD_DTL
, *txg
);
2133 vdev_config_dirty(vd
);
2134 dmu_tx_t
*tx
= dmu_tx_create_assigned(spa
->spa_dsl_pool
, *txg
);
2135 dsl_sync_task_nowait(spa
->spa_dsl_pool
,
2136 vdev_remove_initiate_sync
,
2137 (void *)(uintptr_t)vd
->vdev_id
, 0, ZFS_SPACE_CHECK_NONE
, tx
);
2144 * Remove a device from the pool.
2146 * Removing a device from the vdev namespace requires several steps
2147 * and can take a significant amount of time. As a result we use
2148 * the spa_vdev_config_[enter/exit] functions which allow us to
2149 * grab and release the spa_config_lock while still holding the namespace
2150 * lock. During each step the configuration is synced out.
2153 spa_vdev_remove(spa_t
*spa
, uint64_t guid
, boolean_t unspare
)
2156 nvlist_t
**spares
, **l2cache
, *nv
;
2158 uint_t nspares
, nl2cache
;
2159 int error
= 0, error_log
;
2160 boolean_t locked
= MUTEX_HELD(&spa_namespace_lock
);
2161 sysevent_t
*ev
= NULL
;
2162 char *vd_type
= NULL
, *vd_path
= NULL
, *vd_path_log
= NULL
;
2164 ASSERT(spa_writeable(spa
));
2167 txg
= spa_vdev_enter(spa
);
2169 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
2170 if (spa_feature_is_active(spa
, SPA_FEATURE_POOL_CHECKPOINT
)) {
2171 error
= (spa_has_checkpoint(spa
)) ?
2172 ZFS_ERR_CHECKPOINT_EXISTS
: ZFS_ERR_DISCARDING_CHECKPOINT
;
2175 return (spa_vdev_exit(spa
, NULL
, txg
, error
));
2180 vd
= spa_lookup_by_guid(spa
, guid
, B_FALSE
);
2182 if (spa
->spa_spares
.sav_vdevs
!= NULL
&&
2183 nvlist_lookup_nvlist_array(spa
->spa_spares
.sav_config
,
2184 ZPOOL_CONFIG_SPARES
, &spares
, &nspares
) == 0 &&
2185 (nv
= spa_nvlist_lookup_by_guid(spares
, nspares
, guid
)) != NULL
) {
2187 * Only remove the hot spare if it's not currently in use
2190 if (vd
== NULL
|| unspare
) {
2192 vd
= spa_lookup_by_guid(spa
, guid
, B_TRUE
);
2193 ev
= spa_event_create(spa
, vd
, NULL
,
2194 ESC_ZFS_VDEV_REMOVE_AUX
);
2196 vd_type
= VDEV_TYPE_SPARE
;
2197 vd_path
= fnvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
);
2198 spa_vdev_remove_aux(spa
->spa_spares
.sav_config
,
2199 ZPOOL_CONFIG_SPARES
, spares
, nspares
, nv
);
2200 spa_load_spares(spa
);
2201 spa
->spa_spares
.sav_sync
= B_TRUE
;
2203 error
= SET_ERROR(EBUSY
);
2205 } else if (spa
->spa_l2cache
.sav_vdevs
!= NULL
&&
2206 nvlist_lookup_nvlist_array(spa
->spa_l2cache
.sav_config
,
2207 ZPOOL_CONFIG_L2CACHE
, &l2cache
, &nl2cache
) == 0 &&
2208 (nv
= spa_nvlist_lookup_by_guid(l2cache
, nl2cache
, guid
)) != NULL
) {
2209 vd_type
= VDEV_TYPE_L2CACHE
;
2210 vd_path
= fnvlist_lookup_string(nv
, ZPOOL_CONFIG_PATH
);
2212 * Cache devices can always be removed.
2214 vd
= spa_lookup_by_guid(spa
, guid
, B_TRUE
);
2215 ev
= spa_event_create(spa
, vd
, NULL
, ESC_ZFS_VDEV_REMOVE_AUX
);
2216 spa_vdev_remove_aux(spa
->spa_l2cache
.sav_config
,
2217 ZPOOL_CONFIG_L2CACHE
, l2cache
, nl2cache
, nv
);
2218 spa_load_l2cache(spa
);
2219 spa
->spa_l2cache
.sav_sync
= B_TRUE
;
2220 } else if (vd
!= NULL
&& vd
->vdev_islog
) {
2222 vd_type
= VDEV_TYPE_LOG
;
2223 vd_path
= (vd
->vdev_path
!= NULL
) ? vd
->vdev_path
: "-";
2224 error
= spa_vdev_remove_log(vd
, &txg
);
2225 } else if (vd
!= NULL
) {
2227 error
= spa_vdev_remove_top(vd
, &txg
);
2230 * There is no vdev of any kind with the specified guid.
2232 error
= SET_ERROR(ENOENT
);
2235 if (vd_path
!= NULL
)
2236 vd_path_log
= spa_strdup(vd_path
);
2241 error
= spa_vdev_exit(spa
, NULL
, txg
, error
);
2244 * Logging must be done outside the spa config lock. Otherwise,
2245 * this code path could end up holding the spa config lock while
2246 * waiting for a txg_sync so it can write to the internal log.
2247 * Doing that would prevent the txg sync from actually happening,
2248 * causing a deadlock.
2250 if (error_log
== 0 && vd_type
!= NULL
&& vd_path_log
!= NULL
) {
2251 spa_history_log_internal(spa
, "vdev remove", NULL
,
2252 "%s vdev (%s) %s", spa_name(spa
), vd_type
, vd_path_log
);
2254 if (vd_path_log
!= NULL
)
2255 spa_strfree(vd_path_log
);
2264 spa_removal_get_stats(spa_t
*spa
, pool_removal_stat_t
*prs
)
2266 prs
->prs_state
= spa
->spa_removing_phys
.sr_state
;
2268 if (prs
->prs_state
== DSS_NONE
)
2269 return (SET_ERROR(ENOENT
));
2271 prs
->prs_removing_vdev
= spa
->spa_removing_phys
.sr_removing_vdev
;
2272 prs
->prs_start_time
= spa
->spa_removing_phys
.sr_start_time
;
2273 prs
->prs_end_time
= spa
->spa_removing_phys
.sr_end_time
;
2274 prs
->prs_to_copy
= spa
->spa_removing_phys
.sr_to_copy
;
2275 prs
->prs_copied
= spa
->spa_removing_phys
.sr_copied
;
2277 prs
->prs_mapping_memory
= 0;
2278 uint64_t indirect_vdev_id
=
2279 spa
->spa_removing_phys
.sr_prev_indirect_vdev
;
2280 while (indirect_vdev_id
!= -1) {
2281 vdev_t
*vd
= spa
->spa_root_vdev
->vdev_child
[indirect_vdev_id
];
2282 vdev_indirect_config_t
*vic
= &vd
->vdev_indirect_config
;
2283 vdev_indirect_mapping_t
*vim
= vd
->vdev_indirect_mapping
;
2285 ASSERT3P(vd
->vdev_ops
, ==, &vdev_indirect_ops
);
2286 prs
->prs_mapping_memory
+= vdev_indirect_mapping_size(vim
);
2287 indirect_vdev_id
= vic
->vic_prev_indirect_vdev
;
2294 ZFS_MODULE_PARAM(zfs_vdev
, zfs_
, removal_ignore_errors
, INT
, ZMOD_RW
,
2295 "Ignore hard IO errors when removing device");
2297 ZFS_MODULE_PARAM(zfs_vdev
, zfs_
, remove_max_segment
, INT
, ZMOD_RW
,
2298 "Largest contiguous segment to allocate when removing device");
2300 ZFS_MODULE_PARAM(zfs_vdev
, vdev_
, removal_max_span
, INT
, ZMOD_RW
,
2301 "Largest span of free chunks a remap segment can span");
2303 ZFS_MODULE_PARAM(zfs_vdev
, zfs_
, removal_suspend_progress
, INT
, ZMOD_RW
,
2304 "Pause device removal after this many bytes are copied "
2305 "(debug use only - causes removal to hang)");
2308 EXPORT_SYMBOL(free_from_removing_vdev
);
2309 EXPORT_SYMBOL(spa_removal_get_stats
);
2310 EXPORT_SYMBOL(spa_remove_init
);
2311 EXPORT_SYMBOL(spa_restart_removal
);
2312 EXPORT_SYMBOL(spa_vdev_removal_destroy
);
2313 EXPORT_SYMBOL(spa_vdev_remove
);
2314 EXPORT_SYMBOL(spa_vdev_remove_cancel
);
2315 EXPORT_SYMBOL(spa_vdev_remove_suspend
);
2316 EXPORT_SYMBOL(svr_sync
);