]>
Commit | Line | Data |
---|---|---|
a1d477c2 MA |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | ||
22 | /* | |
23 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. | |
cc99f275 | 24 | * Copyright (c) 2011, 2018 by Delphix. All rights reserved. |
a1d477c2 MA |
25 | */ |
26 | ||
27 | #include <sys/zfs_context.h> | |
28 | #include <sys/spa_impl.h> | |
29 | #include <sys/dmu.h> | |
30 | #include <sys/dmu_tx.h> | |
31 | #include <sys/zap.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/metaslab.h> | |
34 | #include <sys/metaslab_impl.h> | |
35 | #include <sys/uberblock_impl.h> | |
36 | #include <sys/txg.h> | |
37 | #include <sys/avl.h> | |
38 | #include <sys/bpobj.h> | |
39 | #include <sys/dsl_pool.h> | |
40 | #include <sys/dsl_synctask.h> | |
41 | #include <sys/dsl_dir.h> | |
42 | #include <sys/arc.h> | |
43 | #include <sys/zfeature.h> | |
44 | #include <sys/vdev_indirect_births.h> | |
45 | #include <sys/vdev_indirect_mapping.h> | |
46 | #include <sys/abd.h> | |
47 | #include <sys/trace_vdev.h> | |
48 | ||
49 | /* | |
50 | * This file contains the necessary logic to remove vdevs from a | |
51 | * storage pool. Currently, the only devices that can be removed | |
52 | * are log, cache, and spare devices; and top level vdevs from a pool | |
53 | * w/o raidz or mirrors. (Note that members of a mirror can be removed | |
54 | * by the detach operation.) | |
55 | * | |
56 | * Log vdevs are removed by evacuating them and then turning the vdev | |
57 | * into a hole vdev while holding spa config locks. | |
58 | * | |
59 | * Top level vdevs are removed and converted into an indirect vdev via | |
60 | * a multi-step process: | |
61 | * | |
62 | * - Disable allocations from this device (spa_vdev_remove_top). | |
63 | * | |
64 | * - From a new thread (spa_vdev_remove_thread), copy data from | |
65 | * the removing vdev to a different vdev. The copy happens in open | |
66 | * context (spa_vdev_copy_impl) and issues a sync task | |
67 | * (vdev_mapping_sync) so the sync thread can update the partial | |
68 | * indirect mappings in core and on disk. | |
69 | * | |
70 | * - If a free happens during a removal, it is freed from the | |
71 | * removing vdev, and if it has already been copied, from the new | |
72 | * location as well (free_from_removing_vdev). | |
73 | * | |
74 | * - After the removal is completed, the copy thread converts the vdev | |
75 | * into an indirect vdev (vdev_remove_complete) before instructing | |
76 | * the sync thread to destroy the space maps and finish the removal | |
77 | * (spa_finish_removal). | |
78 | */ | |
79 | ||
80 | typedef struct vdev_copy_arg { | |
81 | metaslab_t *vca_msp; | |
82 | uint64_t vca_outstanding_bytes; | |
83 | kcondvar_t vca_cv; | |
84 | kmutex_t vca_lock; | |
85 | } vdev_copy_arg_t; | |
86 | ||
a1d477c2 | 87 | /* |
9e052db4 MA |
88 | * The maximum amount of memory we can use for outstanding i/o while |
89 | * doing a device removal. This determines how much i/o we can have | |
90 | * in flight concurrently. | |
a1d477c2 | 91 | */ |
9e052db4 | 92 | int zfs_remove_max_copy_bytes = 64 * 1024 * 1024; |
a1d477c2 MA |
93 | |
94 | /* | |
95 | * The largest contiguous segment that we will attempt to allocate when | |
96 | * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If | |
97 | * there is a performance problem with attempting to allocate large blocks, | |
98 | * consider decreasing this. | |
99 | */ | |
100 | int zfs_remove_max_segment = SPA_MAXBLOCKSIZE; | |
101 | ||
0dc2f70c MA |
102 | /* |
103 | * Allow a remap segment to span free chunks of at most this size. The main | |
104 | * impact of a larger span is that we will read and write larger, more | |
105 | * contiguous chunks, with more "unnecessary" data -- trading off bandwidth | |
106 | * for iops. The value here was chosen to align with | |
107 | * zfs_vdev_read_gap_limit, which is a similar concept when doing regular | |
108 | * reads (but there's no reason it has to be the same). | |
109 | * | |
110 | * Additionally, a higher span will have the following relatively minor | |
111 | * effects: | |
112 | * - the mapping will be smaller, since one entry can cover more allocated | |
113 | * segments | |
114 | * - more of the fragmentation in the removing device will be preserved | |
115 | * - we'll do larger allocations, which may fail and fall back on smaller | |
116 | * allocations | |
117 | */ | |
118 | int vdev_removal_max_span = 32 * 1024; | |
119 | ||
d2734cce SD |
120 | /* |
121 | * This is used by the test suite so that it can ensure that certain | |
122 | * actions happen while in the middle of a removal. | |
123 | */ | |
124 | unsigned long zfs_remove_max_bytes_pause = -1UL; | |
125 | ||
a1d477c2 MA |
126 | #define VDEV_REMOVAL_ZAP_OBJS "lzap" |
127 | ||
128 | static void spa_vdev_remove_thread(void *arg); | |
129 | ||
130 | static void | |
131 | spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx) | |
132 | { | |
133 | VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset, | |
134 | DMU_POOL_DIRECTORY_OBJECT, | |
135 | DMU_POOL_REMOVING, sizeof (uint64_t), | |
136 | sizeof (spa->spa_removing_phys) / sizeof (uint64_t), | |
137 | &spa->spa_removing_phys, tx)); | |
138 | } | |
139 | ||
140 | static nvlist_t * | |
141 | spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid) | |
142 | { | |
143 | for (int i = 0; i < count; i++) { | |
144 | uint64_t guid = | |
145 | fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID); | |
146 | ||
147 | if (guid == target_guid) | |
148 | return (nvpp[i]); | |
149 | } | |
150 | ||
151 | return (NULL); | |
152 | } | |
153 | ||
154 | static void | |
155 | spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count, | |
156 | nvlist_t *dev_to_remove) | |
157 | { | |
158 | nvlist_t **newdev = NULL; | |
159 | ||
160 | if (count > 1) | |
161 | newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP); | |
162 | ||
163 | for (int i = 0, j = 0; i < count; i++) { | |
164 | if (dev[i] == dev_to_remove) | |
165 | continue; | |
166 | VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0); | |
167 | } | |
168 | ||
169 | VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0); | |
170 | VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0); | |
171 | ||
172 | for (int i = 0; i < count - 1; i++) | |
173 | nvlist_free(newdev[i]); | |
174 | ||
175 | if (count > 1) | |
176 | kmem_free(newdev, (count - 1) * sizeof (void *)); | |
177 | } | |
178 | ||
179 | static spa_vdev_removal_t * | |
180 | spa_vdev_removal_create(vdev_t *vd) | |
181 | { | |
182 | spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP); | |
183 | mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL); | |
184 | cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL); | |
185 | svr->svr_allocd_segs = range_tree_create(NULL, NULL); | |
9e052db4 | 186 | svr->svr_vdev_id = vd->vdev_id; |
a1d477c2 MA |
187 | |
188 | for (int i = 0; i < TXG_SIZE; i++) { | |
189 | svr->svr_frees[i] = range_tree_create(NULL, NULL); | |
190 | list_create(&svr->svr_new_segments[i], | |
191 | sizeof (vdev_indirect_mapping_entry_t), | |
192 | offsetof(vdev_indirect_mapping_entry_t, vime_node)); | |
193 | } | |
194 | ||
195 | return (svr); | |
196 | } | |
197 | ||
198 | void | |
199 | spa_vdev_removal_destroy(spa_vdev_removal_t *svr) | |
200 | { | |
201 | for (int i = 0; i < TXG_SIZE; i++) { | |
202 | ASSERT0(svr->svr_bytes_done[i]); | |
203 | ASSERT0(svr->svr_max_offset_to_sync[i]); | |
204 | range_tree_destroy(svr->svr_frees[i]); | |
205 | list_destroy(&svr->svr_new_segments[i]); | |
206 | } | |
207 | ||
208 | range_tree_destroy(svr->svr_allocd_segs); | |
209 | mutex_destroy(&svr->svr_lock); | |
210 | cv_destroy(&svr->svr_cv); | |
211 | kmem_free(svr, sizeof (*svr)); | |
212 | } | |
213 | ||
214 | /* | |
215 | * This is called as a synctask in the txg in which we will mark this vdev | |
216 | * as removing (in the config stored in the MOS). | |
217 | * | |
218 | * It begins the evacuation of a toplevel vdev by: | |
219 | * - initializing the spa_removing_phys which tracks this removal | |
220 | * - computing the amount of space to remove for accounting purposes | |
221 | * - dirtying all dbufs in the spa_config_object | |
222 | * - creating the spa_vdev_removal | |
223 | * - starting the spa_vdev_remove_thread | |
224 | */ | |
225 | static void | |
226 | vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx) | |
227 | { | |
9e052db4 MA |
228 | int vdev_id = (uintptr_t)arg; |
229 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
230 | vdev_t *vd = vdev_lookup_top(spa, vdev_id); | |
a1d477c2 | 231 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; |
a1d477c2 MA |
232 | objset_t *mos = spa->spa_dsl_pool->dp_meta_objset; |
233 | spa_vdev_removal_t *svr = NULL; | |
234 | ASSERTV(uint64_t txg = dmu_tx_get_txg(tx)); | |
235 | ||
236 | ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); | |
237 | svr = spa_vdev_removal_create(vd); | |
238 | ||
239 | ASSERT(vd->vdev_removing); | |
240 | ASSERT3P(vd->vdev_indirect_mapping, ==, NULL); | |
241 | ||
242 | spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); | |
243 | if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { | |
244 | /* | |
245 | * By activating the OBSOLETE_COUNTS feature, we prevent | |
246 | * the pool from being downgraded and ensure that the | |
247 | * refcounts are precise. | |
248 | */ | |
249 | spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
250 | uint64_t one = 1; | |
251 | VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap, | |
252 | VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1, | |
253 | &one, tx)); | |
27f80e85 BB |
254 | ASSERTV(boolean_t are_precise); |
255 | ASSERT0(vdev_obsolete_counts_are_precise(vd, &are_precise)); | |
256 | ASSERT3B(are_precise, ==, B_TRUE); | |
a1d477c2 MA |
257 | } |
258 | ||
259 | vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx); | |
260 | vd->vdev_indirect_mapping = | |
261 | vdev_indirect_mapping_open(mos, vic->vic_mapping_object); | |
262 | vic->vic_births_object = vdev_indirect_births_alloc(mos, tx); | |
263 | vd->vdev_indirect_births = | |
264 | vdev_indirect_births_open(mos, vic->vic_births_object); | |
265 | spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id; | |
266 | spa->spa_removing_phys.sr_start_time = gethrestime_sec(); | |
267 | spa->spa_removing_phys.sr_end_time = 0; | |
268 | spa->spa_removing_phys.sr_state = DSS_SCANNING; | |
269 | spa->spa_removing_phys.sr_to_copy = 0; | |
270 | spa->spa_removing_phys.sr_copied = 0; | |
271 | ||
272 | /* | |
273 | * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because | |
274 | * there may be space in the defer tree, which is free, but still | |
275 | * counted in vs_alloc. | |
276 | */ | |
277 | for (uint64_t i = 0; i < vd->vdev_ms_count; i++) { | |
278 | metaslab_t *ms = vd->vdev_ms[i]; | |
279 | if (ms->ms_sm == NULL) | |
280 | continue; | |
281 | ||
282 | /* | |
283 | * Sync tasks happen before metaslab_sync(), therefore | |
284 | * smp_alloc and sm_alloc must be the same. | |
285 | */ | |
286 | ASSERT3U(space_map_allocated(ms->ms_sm), ==, | |
287 | ms->ms_sm->sm_phys->smp_alloc); | |
288 | ||
289 | spa->spa_removing_phys.sr_to_copy += | |
290 | space_map_allocated(ms->ms_sm); | |
291 | ||
292 | /* | |
293 | * Space which we are freeing this txg does not need to | |
294 | * be copied. | |
295 | */ | |
296 | spa->spa_removing_phys.sr_to_copy -= | |
d2734cce | 297 | range_tree_space(ms->ms_freeing); |
a1d477c2 | 298 | |
d2734cce | 299 | ASSERT0(range_tree_space(ms->ms_freed)); |
a1d477c2 | 300 | for (int t = 0; t < TXG_SIZE; t++) |
d2734cce | 301 | ASSERT0(range_tree_space(ms->ms_allocating[t])); |
a1d477c2 MA |
302 | } |
303 | ||
304 | /* | |
305 | * Sync tasks are called before metaslab_sync(), so there should | |
306 | * be no already-synced metaslabs in the TXG_CLEAN list. | |
307 | */ | |
308 | ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL); | |
309 | ||
310 | spa_sync_removing_state(spa, tx); | |
311 | ||
312 | /* | |
313 | * All blocks that we need to read the most recent mapping must be | |
314 | * stored on concrete vdevs. Therefore, we must dirty anything that | |
315 | * is read before spa_remove_init(). Specifically, the | |
316 | * spa_config_object. (Note that although we already modified the | |
317 | * spa_config_object in spa_sync_removing_state, that may not have | |
318 | * modified all blocks of the object.) | |
319 | */ | |
320 | dmu_object_info_t doi; | |
321 | VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi)); | |
322 | for (uint64_t offset = 0; offset < doi.doi_max_offset; ) { | |
323 | dmu_buf_t *dbuf; | |
324 | VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT, | |
325 | offset, FTAG, &dbuf, 0)); | |
326 | dmu_buf_will_dirty(dbuf, tx); | |
327 | offset += dbuf->db_size; | |
328 | dmu_buf_rele(dbuf, FTAG); | |
329 | } | |
330 | ||
331 | /* | |
332 | * Now that we've allocated the im_object, dirty the vdev to ensure | |
333 | * that the object gets written to the config on disk. | |
334 | */ | |
335 | vdev_config_dirty(vd); | |
336 | ||
337 | zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu " | |
338 | "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx), | |
339 | vic->vic_mapping_object); | |
340 | ||
341 | spa_history_log_internal(spa, "vdev remove started", tx, | |
342 | "%s vdev %llu %s", spa_name(spa), vd->vdev_id, | |
343 | (vd->vdev_path != NULL) ? vd->vdev_path : "-"); | |
344 | /* | |
345 | * Setting spa_vdev_removal causes subsequent frees to call | |
346 | * free_from_removing_vdev(). Note that we don't need any locking | |
347 | * because we are the sync thread, and metaslab_free_impl() is only | |
348 | * called from syncing context (potentially from a zio taskq thread, | |
349 | * but in any case only when there are outstanding free i/os, which | |
350 | * there are not). | |
351 | */ | |
352 | ASSERT3P(spa->spa_vdev_removal, ==, NULL); | |
353 | spa->spa_vdev_removal = svr; | |
354 | svr->svr_thread = thread_create(NULL, 0, | |
9e052db4 | 355 | spa_vdev_remove_thread, spa, 0, &p0, TS_RUN, minclsyspri); |
a1d477c2 MA |
356 | } |
357 | ||
358 | /* | |
359 | * When we are opening a pool, we must read the mapping for each | |
360 | * indirect vdev in order from most recently removed to least | |
361 | * recently removed. We do this because the blocks for the mapping | |
362 | * of older indirect vdevs may be stored on more recently removed vdevs. | |
363 | * In order to read each indirect mapping object, we must have | |
364 | * initialized all more recently removed vdevs. | |
365 | */ | |
366 | int | |
367 | spa_remove_init(spa_t *spa) | |
368 | { | |
369 | int error; | |
370 | ||
371 | error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset, | |
372 | DMU_POOL_DIRECTORY_OBJECT, | |
373 | DMU_POOL_REMOVING, sizeof (uint64_t), | |
374 | sizeof (spa->spa_removing_phys) / sizeof (uint64_t), | |
375 | &spa->spa_removing_phys); | |
376 | ||
377 | if (error == ENOENT) { | |
378 | spa->spa_removing_phys.sr_state = DSS_NONE; | |
379 | spa->spa_removing_phys.sr_removing_vdev = -1; | |
380 | spa->spa_removing_phys.sr_prev_indirect_vdev = -1; | |
20507534 | 381 | spa->spa_indirect_vdevs_loaded = B_TRUE; |
a1d477c2 MA |
382 | return (0); |
383 | } else if (error != 0) { | |
384 | return (error); | |
385 | } | |
386 | ||
387 | if (spa->spa_removing_phys.sr_state == DSS_SCANNING) { | |
388 | /* | |
389 | * We are currently removing a vdev. Create and | |
390 | * initialize a spa_vdev_removal_t from the bonus | |
391 | * buffer of the removing vdevs vdev_im_object, and | |
392 | * initialize its partial mapping. | |
393 | */ | |
394 | spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); | |
395 | vdev_t *vd = vdev_lookup_top(spa, | |
396 | spa->spa_removing_phys.sr_removing_vdev); | |
a1d477c2 | 397 | |
9e052db4 MA |
398 | if (vd == NULL) { |
399 | spa_config_exit(spa, SCL_STATE, FTAG); | |
a1d477c2 | 400 | return (EINVAL); |
9e052db4 | 401 | } |
a1d477c2 MA |
402 | |
403 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; | |
404 | ||
405 | ASSERT(vdev_is_concrete(vd)); | |
406 | spa_vdev_removal_t *svr = spa_vdev_removal_create(vd); | |
9e052db4 MA |
407 | ASSERT3U(svr->svr_vdev_id, ==, vd->vdev_id); |
408 | ASSERT(vd->vdev_removing); | |
a1d477c2 MA |
409 | |
410 | vd->vdev_indirect_mapping = vdev_indirect_mapping_open( | |
411 | spa->spa_meta_objset, vic->vic_mapping_object); | |
412 | vd->vdev_indirect_births = vdev_indirect_births_open( | |
413 | spa->spa_meta_objset, vic->vic_births_object); | |
9e052db4 | 414 | spa_config_exit(spa, SCL_STATE, FTAG); |
a1d477c2 MA |
415 | |
416 | spa->spa_vdev_removal = svr; | |
417 | } | |
418 | ||
419 | spa_config_enter(spa, SCL_STATE, FTAG, RW_READER); | |
420 | uint64_t indirect_vdev_id = | |
421 | spa->spa_removing_phys.sr_prev_indirect_vdev; | |
422 | while (indirect_vdev_id != UINT64_MAX) { | |
423 | vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id); | |
424 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; | |
425 | ||
426 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
427 | vd->vdev_indirect_mapping = vdev_indirect_mapping_open( | |
428 | spa->spa_meta_objset, vic->vic_mapping_object); | |
429 | vd->vdev_indirect_births = vdev_indirect_births_open( | |
430 | spa->spa_meta_objset, vic->vic_births_object); | |
431 | ||
432 | indirect_vdev_id = vic->vic_prev_indirect_vdev; | |
433 | } | |
434 | spa_config_exit(spa, SCL_STATE, FTAG); | |
435 | ||
436 | /* | |
437 | * Now that we've loaded all the indirect mappings, we can allow | |
438 | * reads from other blocks (e.g. via predictive prefetch). | |
439 | */ | |
440 | spa->spa_indirect_vdevs_loaded = B_TRUE; | |
441 | return (0); | |
442 | } | |
443 | ||
444 | void | |
445 | spa_restart_removal(spa_t *spa) | |
446 | { | |
447 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
448 | ||
449 | if (svr == NULL) | |
450 | return; | |
451 | ||
452 | /* | |
453 | * In general when this function is called there is no | |
454 | * removal thread running. The only scenario where this | |
455 | * is not true is during spa_import() where this function | |
456 | * is called twice [once from spa_import_impl() and | |
457 | * spa_async_resume()]. Thus, in the scenario where we | |
458 | * import a pool that has an ongoing removal we don't | |
459 | * want to spawn a second thread. | |
460 | */ | |
461 | if (svr->svr_thread != NULL) | |
462 | return; | |
463 | ||
464 | if (!spa_writeable(spa)) | |
465 | return; | |
466 | ||
9e052db4 MA |
467 | zfs_dbgmsg("restarting removal of %llu", svr->svr_vdev_id); |
468 | svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, spa, | |
a1d477c2 MA |
469 | 0, &p0, TS_RUN, minclsyspri); |
470 | } | |
471 | ||
472 | /* | |
473 | * Process freeing from a device which is in the middle of being removed. | |
474 | * We must handle this carefully so that we attempt to copy freed data, | |
475 | * and we correctly free already-copied data. | |
476 | */ | |
477 | void | |
d2734cce | 478 | free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size) |
a1d477c2 MA |
479 | { |
480 | spa_t *spa = vd->vdev_spa; | |
481 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
482 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
d2734cce | 483 | uint64_t txg = spa_syncing_txg(spa); |
a1d477c2 MA |
484 | uint64_t max_offset_yet = 0; |
485 | ||
486 | ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); | |
487 | ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==, | |
488 | vdev_indirect_mapping_object(vim)); | |
9e052db4 | 489 | ASSERT3U(vd->vdev_id, ==, svr->svr_vdev_id); |
a1d477c2 MA |
490 | |
491 | mutex_enter(&svr->svr_lock); | |
492 | ||
493 | /* | |
494 | * Remove the segment from the removing vdev's spacemap. This | |
495 | * ensures that we will not attempt to copy this space (if the | |
496 | * removal thread has not yet visited it), and also ensures | |
497 | * that we know what is actually allocated on the new vdevs | |
498 | * (needed if we cancel the removal). | |
499 | * | |
500 | * Note: we must do the metaslab_free_concrete() with the svr_lock | |
501 | * held, so that the remove_thread can not load this metaslab and then | |
502 | * visit this offset between the time that we metaslab_free_concrete() | |
503 | * and when we check to see if it has been visited. | |
d2734cce SD |
504 | * |
505 | * Note: The checkpoint flag is set to false as having/taking | |
506 | * a checkpoint and removing a device can't happen at the same | |
507 | * time. | |
a1d477c2 | 508 | */ |
d2734cce SD |
509 | ASSERT(!spa_has_checkpoint(spa)); |
510 | metaslab_free_concrete(vd, offset, size, B_FALSE); | |
a1d477c2 MA |
511 | |
512 | uint64_t synced_size = 0; | |
513 | uint64_t synced_offset = 0; | |
514 | uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim); | |
515 | if (offset < max_offset_synced) { | |
516 | /* | |
517 | * The mapping for this offset is already on disk. | |
518 | * Free from the new location. | |
519 | * | |
520 | * Note that we use svr_max_synced_offset because it is | |
521 | * updated atomically with respect to the in-core mapping. | |
522 | * By contrast, vim_max_offset is not. | |
523 | * | |
524 | * This block may be split between a synced entry and an | |
525 | * in-flight or unvisited entry. Only process the synced | |
526 | * portion of it here. | |
527 | */ | |
528 | synced_size = MIN(size, max_offset_synced - offset); | |
529 | synced_offset = offset; | |
530 | ||
531 | ASSERT3U(max_offset_yet, <=, max_offset_synced); | |
532 | max_offset_yet = max_offset_synced; | |
533 | ||
534 | DTRACE_PROBE3(remove__free__synced, | |
535 | spa_t *, spa, | |
536 | uint64_t, offset, | |
537 | uint64_t, synced_size); | |
538 | ||
539 | size -= synced_size; | |
540 | offset += synced_size; | |
541 | } | |
542 | ||
543 | /* | |
544 | * Look at all in-flight txgs starting from the currently syncing one | |
545 | * and see if a section of this free is being copied. By starting from | |
546 | * this txg and iterating forward, we might find that this region | |
547 | * was copied in two different txgs and handle it appropriately. | |
548 | */ | |
549 | for (int i = 0; i < TXG_CONCURRENT_STATES; i++) { | |
550 | int txgoff = (txg + i) & TXG_MASK; | |
551 | if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) { | |
552 | /* | |
553 | * The mapping for this offset is in flight, and | |
554 | * will be synced in txg+i. | |
555 | */ | |
556 | uint64_t inflight_size = MIN(size, | |
557 | svr->svr_max_offset_to_sync[txgoff] - offset); | |
558 | ||
559 | DTRACE_PROBE4(remove__free__inflight, | |
560 | spa_t *, spa, | |
561 | uint64_t, offset, | |
562 | uint64_t, inflight_size, | |
563 | uint64_t, txg + i); | |
564 | ||
565 | /* | |
566 | * We copy data in order of increasing offset. | |
567 | * Therefore the max_offset_to_sync[] must increase | |
568 | * (or be zero, indicating that nothing is being | |
569 | * copied in that txg). | |
570 | */ | |
571 | if (svr->svr_max_offset_to_sync[txgoff] != 0) { | |
572 | ASSERT3U(svr->svr_max_offset_to_sync[txgoff], | |
573 | >=, max_offset_yet); | |
574 | max_offset_yet = | |
575 | svr->svr_max_offset_to_sync[txgoff]; | |
576 | } | |
577 | ||
578 | /* | |
579 | * We've already committed to copying this segment: | |
580 | * we have allocated space elsewhere in the pool for | |
581 | * it and have an IO outstanding to copy the data. We | |
582 | * cannot free the space before the copy has | |
583 | * completed, or else the copy IO might overwrite any | |
584 | * new data. To free that space, we record the | |
585 | * segment in the appropriate svr_frees tree and free | |
586 | * the mapped space later, in the txg where we have | |
587 | * completed the copy and synced the mapping (see | |
588 | * vdev_mapping_sync). | |
589 | */ | |
590 | range_tree_add(svr->svr_frees[txgoff], | |
591 | offset, inflight_size); | |
592 | size -= inflight_size; | |
593 | offset += inflight_size; | |
594 | ||
595 | /* | |
596 | * This space is already accounted for as being | |
597 | * done, because it is being copied in txg+i. | |
598 | * However, if i!=0, then it is being copied in | |
599 | * a future txg. If we crash after this txg | |
600 | * syncs but before txg+i syncs, then the space | |
601 | * will be free. Therefore we must account | |
602 | * for the space being done in *this* txg | |
603 | * (when it is freed) rather than the future txg | |
604 | * (when it will be copied). | |
605 | */ | |
606 | ASSERT3U(svr->svr_bytes_done[txgoff], >=, | |
607 | inflight_size); | |
608 | svr->svr_bytes_done[txgoff] -= inflight_size; | |
609 | svr->svr_bytes_done[txg & TXG_MASK] += inflight_size; | |
610 | } | |
611 | } | |
612 | ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]); | |
613 | ||
614 | if (size > 0) { | |
615 | /* | |
616 | * The copy thread has not yet visited this offset. Ensure | |
617 | * that it doesn't. | |
618 | */ | |
619 | ||
620 | DTRACE_PROBE3(remove__free__unvisited, | |
621 | spa_t *, spa, | |
622 | uint64_t, offset, | |
623 | uint64_t, size); | |
624 | ||
625 | if (svr->svr_allocd_segs != NULL) | |
626 | range_tree_clear(svr->svr_allocd_segs, offset, size); | |
627 | ||
628 | /* | |
629 | * Since we now do not need to copy this data, for | |
630 | * accounting purposes we have done our job and can count | |
631 | * it as completed. | |
632 | */ | |
633 | svr->svr_bytes_done[txg & TXG_MASK] += size; | |
634 | } | |
635 | mutex_exit(&svr->svr_lock); | |
636 | ||
637 | /* | |
638 | * Now that we have dropped svr_lock, process the synced portion | |
639 | * of this free. | |
640 | */ | |
641 | if (synced_size > 0) { | |
d2734cce SD |
642 | vdev_indirect_mark_obsolete(vd, synced_offset, synced_size); |
643 | ||
a1d477c2 MA |
644 | /* |
645 | * Note: this can only be called from syncing context, | |
646 | * and the vdev_indirect_mapping is only changed from the | |
647 | * sync thread, so we don't need svr_lock while doing | |
648 | * metaslab_free_impl_cb. | |
649 | */ | |
d2734cce | 650 | boolean_t checkpoint = B_FALSE; |
a1d477c2 | 651 | vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size, |
d2734cce | 652 | metaslab_free_impl_cb, &checkpoint); |
a1d477c2 MA |
653 | } |
654 | } | |
655 | ||
656 | /* | |
657 | * Stop an active removal and update the spa_removing phys. | |
658 | */ | |
659 | static void | |
660 | spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx) | |
661 | { | |
662 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
663 | ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa)); | |
664 | ||
665 | /* Ensure the removal thread has completed before we free the svr. */ | |
666 | spa_vdev_remove_suspend(spa); | |
667 | ||
668 | ASSERT(state == DSS_FINISHED || state == DSS_CANCELED); | |
669 | ||
670 | if (state == DSS_FINISHED) { | |
671 | spa_removing_phys_t *srp = &spa->spa_removing_phys; | |
9e052db4 | 672 | vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); |
a1d477c2 MA |
673 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; |
674 | ||
675 | if (srp->sr_prev_indirect_vdev != UINT64_MAX) { | |
676 | vdev_t *pvd; | |
677 | pvd = vdev_lookup_top(spa, | |
678 | srp->sr_prev_indirect_vdev); | |
679 | ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops); | |
680 | } | |
681 | ||
682 | vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev; | |
683 | srp->sr_prev_indirect_vdev = vd->vdev_id; | |
684 | } | |
685 | spa->spa_removing_phys.sr_state = state; | |
686 | spa->spa_removing_phys.sr_end_time = gethrestime_sec(); | |
687 | ||
688 | spa->spa_vdev_removal = NULL; | |
689 | spa_vdev_removal_destroy(svr); | |
690 | ||
691 | spa_sync_removing_state(spa, tx); | |
692 | ||
693 | vdev_config_dirty(spa->spa_root_vdev); | |
694 | } | |
695 | ||
696 | static void | |
697 | free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size) | |
698 | { | |
699 | vdev_t *vd = arg; | |
d2734cce SD |
700 | vdev_indirect_mark_obsolete(vd, offset, size); |
701 | boolean_t checkpoint = B_FALSE; | |
a1d477c2 | 702 | vdev_indirect_ops.vdev_op_remap(vd, offset, size, |
d2734cce | 703 | metaslab_free_impl_cb, &checkpoint); |
a1d477c2 MA |
704 | } |
705 | ||
706 | /* | |
707 | * On behalf of the removal thread, syncs an incremental bit more of | |
708 | * the indirect mapping to disk and updates the in-memory mapping. | |
709 | * Called as a sync task in every txg that the removal thread makes progress. | |
710 | */ | |
711 | static void | |
712 | vdev_mapping_sync(void *arg, dmu_tx_t *tx) | |
713 | { | |
714 | spa_vdev_removal_t *svr = arg; | |
715 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
9e052db4 | 716 | vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); |
a1d477c2 MA |
717 | ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config); |
718 | uint64_t txg = dmu_tx_get_txg(tx); | |
719 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
720 | ||
721 | ASSERT(vic->vic_mapping_object != 0); | |
722 | ASSERT3U(txg, ==, spa_syncing_txg(spa)); | |
723 | ||
724 | vdev_indirect_mapping_add_entries(vim, | |
725 | &svr->svr_new_segments[txg & TXG_MASK], tx); | |
726 | vdev_indirect_births_add_entry(vd->vdev_indirect_births, | |
727 | vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx); | |
728 | ||
729 | /* | |
730 | * Free the copied data for anything that was freed while the | |
731 | * mapping entries were in flight. | |
732 | */ | |
733 | mutex_enter(&svr->svr_lock); | |
734 | range_tree_vacate(svr->svr_frees[txg & TXG_MASK], | |
735 | free_mapped_segment_cb, vd); | |
736 | ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=, | |
737 | vdev_indirect_mapping_max_offset(vim)); | |
738 | svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0; | |
739 | mutex_exit(&svr->svr_lock); | |
740 | ||
741 | spa_sync_removing_state(spa, tx); | |
742 | } | |
743 | ||
0dc2f70c MA |
744 | typedef struct vdev_copy_segment_arg { |
745 | spa_t *vcsa_spa; | |
746 | dva_t *vcsa_dest_dva; | |
747 | uint64_t vcsa_txg; | |
748 | range_tree_t *vcsa_obsolete_segs; | |
749 | } vdev_copy_segment_arg_t; | |
750 | ||
751 | static void | |
752 | unalloc_seg(void *arg, uint64_t start, uint64_t size) | |
753 | { | |
754 | vdev_copy_segment_arg_t *vcsa = arg; | |
755 | spa_t *spa = vcsa->vcsa_spa; | |
756 | blkptr_t bp = { { { {0} } } }; | |
757 | ||
758 | BP_SET_BIRTH(&bp, TXG_INITIAL, TXG_INITIAL); | |
759 | BP_SET_LSIZE(&bp, size); | |
760 | BP_SET_PSIZE(&bp, size); | |
761 | BP_SET_COMPRESS(&bp, ZIO_COMPRESS_OFF); | |
762 | BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_OFF); | |
763 | BP_SET_TYPE(&bp, DMU_OT_NONE); | |
764 | BP_SET_LEVEL(&bp, 0); | |
765 | BP_SET_DEDUP(&bp, 0); | |
766 | BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER); | |
767 | ||
768 | DVA_SET_VDEV(&bp.blk_dva[0], DVA_GET_VDEV(vcsa->vcsa_dest_dva)); | |
769 | DVA_SET_OFFSET(&bp.blk_dva[0], | |
770 | DVA_GET_OFFSET(vcsa->vcsa_dest_dva) + start); | |
771 | DVA_SET_ASIZE(&bp.blk_dva[0], size); | |
772 | ||
773 | zio_free(spa, vcsa->vcsa_txg, &bp); | |
774 | } | |
775 | ||
9e052db4 MA |
776 | /* |
777 | * All reads and writes associated with a call to spa_vdev_copy_segment() | |
778 | * are done. | |
779 | */ | |
780 | static void | |
0dc2f70c | 781 | spa_vdev_copy_segment_done(zio_t *zio) |
9e052db4 | 782 | { |
0dc2f70c MA |
783 | vdev_copy_segment_arg_t *vcsa = zio->io_private; |
784 | ||
785 | range_tree_vacate(vcsa->vcsa_obsolete_segs, | |
786 | unalloc_seg, vcsa); | |
787 | range_tree_destroy(vcsa->vcsa_obsolete_segs); | |
788 | kmem_free(vcsa, sizeof (*vcsa)); | |
789 | ||
9e052db4 MA |
790 | spa_config_exit(zio->io_spa, SCL_STATE, zio->io_spa); |
791 | } | |
792 | ||
793 | /* | |
794 | * The write of the new location is done. | |
795 | */ | |
a1d477c2 MA |
796 | static void |
797 | spa_vdev_copy_segment_write_done(zio_t *zio) | |
798 | { | |
9e052db4 MA |
799 | vdev_copy_arg_t *vca = zio->io_private; |
800 | ||
a1d477c2 MA |
801 | abd_free(zio->io_abd); |
802 | ||
803 | mutex_enter(&vca->vca_lock); | |
804 | vca->vca_outstanding_bytes -= zio->io_size; | |
805 | cv_signal(&vca->vca_cv); | |
806 | mutex_exit(&vca->vca_lock); | |
a1d477c2 MA |
807 | } |
808 | ||
9e052db4 MA |
809 | /* |
810 | * The read of the old location is done. The parent zio is the write to | |
811 | * the new location. Allow it to start. | |
812 | */ | |
a1d477c2 MA |
813 | static void |
814 | spa_vdev_copy_segment_read_done(zio_t *zio) | |
815 | { | |
9e052db4 MA |
816 | zio_nowait(zio_unique_parent(zio)); |
817 | } | |
818 | ||
819 | /* | |
820 | * If the old and new vdevs are mirrors, we will read both sides of the old | |
821 | * mirror, and write each copy to the corresponding side of the new mirror. | |
822 | * If the old and new vdevs have a different number of children, we will do | |
823 | * this as best as possible. Since we aren't verifying checksums, this | |
824 | * ensures that as long as there's a good copy of the data, we'll have a | |
825 | * good copy after the removal, even if there's silent damage to one side | |
826 | * of the mirror. If we're removing a mirror that has some silent damage, | |
827 | * we'll have exactly the same damage in the new location (assuming that | |
828 | * the new location is also a mirror). | |
829 | * | |
830 | * We accomplish this by creating a tree of zio_t's, with as many writes as | |
831 | * there are "children" of the new vdev (a non-redundant vdev counts as one | |
832 | * child, a 2-way mirror has 2 children, etc). Each write has an associated | |
833 | * read from a child of the old vdev. Typically there will be the same | |
834 | * number of children of the old and new vdevs. However, if there are more | |
835 | * children of the new vdev, some child(ren) of the old vdev will be issued | |
836 | * multiple reads. If there are more children of the old vdev, some copies | |
837 | * will be dropped. | |
838 | * | |
839 | * For example, the tree of zio_t's for a 2-way mirror is: | |
840 | * | |
841 | * null | |
842 | * / \ | |
843 | * write(new vdev, child 0) write(new vdev, child 1) | |
844 | * | | | |
845 | * read(old vdev, child 0) read(old vdev, child 1) | |
846 | * | |
847 | * Child zio's complete before their parents complete. However, zio's | |
848 | * created with zio_vdev_child_io() may be issued before their children | |
849 | * complete. In this case we need to make sure that the children (reads) | |
850 | * complete before the parents (writes) are *issued*. We do this by not | |
851 | * calling zio_nowait() on each write until its corresponding read has | |
852 | * completed. | |
853 | * | |
854 | * The spa_config_lock must be held while zio's created by | |
855 | * zio_vdev_child_io() are in progress, to ensure that the vdev tree does | |
856 | * not change (e.g. due to a concurrent "zpool attach/detach"). The "null" | |
857 | * zio is needed to release the spa_config_lock after all the reads and | |
858 | * writes complete. (Note that we can't grab the config lock for each read, | |
859 | * because it is not reentrant - we could deadlock with a thread waiting | |
860 | * for a write lock.) | |
861 | */ | |
862 | static void | |
863 | spa_vdev_copy_one_child(vdev_copy_arg_t *vca, zio_t *nzio, | |
864 | vdev_t *source_vd, uint64_t source_offset, | |
865 | vdev_t *dest_child_vd, uint64_t dest_offset, int dest_id, uint64_t size) | |
866 | { | |
867 | ASSERT3U(spa_config_held(nzio->io_spa, SCL_ALL, RW_READER), !=, 0); | |
868 | ||
869 | mutex_enter(&vca->vca_lock); | |
870 | vca->vca_outstanding_bytes += size; | |
871 | mutex_exit(&vca->vca_lock); | |
872 | ||
873 | abd_t *abd = abd_alloc_for_io(size, B_FALSE); | |
874 | ||
875 | vdev_t *source_child_vd; | |
876 | if (source_vd->vdev_ops == &vdev_mirror_ops && dest_id != -1) { | |
877 | /* | |
878 | * Source and dest are both mirrors. Copy from the same | |
879 | * child id as we are copying to (wrapping around if there | |
880 | * are more dest children than source children). | |
881 | */ | |
882 | source_child_vd = | |
883 | source_vd->vdev_child[dest_id % source_vd->vdev_children]; | |
884 | } else { | |
885 | source_child_vd = source_vd; | |
886 | } | |
887 | ||
888 | zio_t *write_zio = zio_vdev_child_io(nzio, NULL, | |
889 | dest_child_vd, dest_offset, abd, size, | |
890 | ZIO_TYPE_WRITE, ZIO_PRIORITY_REMOVAL, | |
891 | ZIO_FLAG_CANFAIL, | |
892 | spa_vdev_copy_segment_write_done, vca); | |
893 | ||
894 | zio_nowait(zio_vdev_child_io(write_zio, NULL, | |
895 | source_child_vd, source_offset, abd, size, | |
896 | ZIO_TYPE_READ, ZIO_PRIORITY_REMOVAL, | |
897 | ZIO_FLAG_CANFAIL, | |
898 | spa_vdev_copy_segment_read_done, vca)); | |
a1d477c2 MA |
899 | } |
900 | ||
9e052db4 MA |
901 | /* |
902 | * Allocate a new location for this segment, and create the zio_t's to | |
903 | * read from the old location and write to the new location. | |
904 | */ | |
a1d477c2 | 905 | static int |
0dc2f70c MA |
906 | spa_vdev_copy_segment(vdev_t *vd, range_tree_t *segs, |
907 | uint64_t maxalloc, uint64_t txg, | |
a1d477c2 MA |
908 | vdev_copy_arg_t *vca, zio_alloc_list_t *zal) |
909 | { | |
910 | metaslab_group_t *mg = vd->vdev_mg; | |
911 | spa_t *spa = vd->vdev_spa; | |
912 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
913 | vdev_indirect_mapping_entry_t *entry; | |
a1d477c2 | 914 | dva_t dst = {{ 0 }}; |
0dc2f70c MA |
915 | uint64_t start = range_tree_min(segs); |
916 | ||
917 | ASSERT3U(maxalloc, <=, SPA_MAXBLOCKSIZE); | |
a1d477c2 | 918 | |
0dc2f70c MA |
919 | uint64_t size = range_tree_span(segs); |
920 | if (range_tree_span(segs) > maxalloc) { | |
921 | /* | |
922 | * We can't allocate all the segments. Prefer to end | |
923 | * the allocation at the end of a segment, thus avoiding | |
924 | * additional split blocks. | |
925 | */ | |
926 | range_seg_t search; | |
927 | avl_index_t where; | |
928 | search.rs_start = start + maxalloc; | |
929 | search.rs_end = search.rs_start; | |
930 | range_seg_t *rs = avl_find(&segs->rt_root, &search, &where); | |
931 | if (rs == NULL) { | |
932 | rs = avl_nearest(&segs->rt_root, where, AVL_BEFORE); | |
933 | } else { | |
934 | rs = AVL_PREV(&segs->rt_root, rs); | |
935 | } | |
936 | if (rs != NULL) { | |
937 | size = rs->rs_end - start; | |
938 | } else { | |
939 | /* | |
940 | * There are no segments that end before maxalloc. | |
941 | * I.e. the first segment is larger than maxalloc, | |
942 | * so we must split it. | |
943 | */ | |
944 | size = maxalloc; | |
945 | } | |
946 | } | |
947 | ASSERT3U(size, <=, maxalloc); | |
a1d477c2 | 948 | |
cc99f275 DB |
949 | /* |
950 | * An allocation class might not have any remaining vdevs or space | |
951 | */ | |
952 | metaslab_class_t *mc = mg->mg_class; | |
953 | if (mc != spa_normal_class(spa) && mc->mc_groups <= 1) | |
954 | mc = spa_normal_class(spa); | |
955 | int error = metaslab_alloc_dva(spa, mc, size, &dst, 0, NULL, txg, 0, | |
956 | zal, 0); | |
957 | if (error == ENOSPC && mc != spa_normal_class(spa)) { | |
958 | error = metaslab_alloc_dva(spa, spa_normal_class(spa), size, | |
959 | &dst, 0, NULL, txg, 0, zal, 0); | |
960 | } | |
a1d477c2 MA |
961 | if (error != 0) |
962 | return (error); | |
963 | ||
0dc2f70c MA |
964 | /* |
965 | * Determine the ranges that are not actually needed. Offsets are | |
966 | * relative to the start of the range to be copied (i.e. relative to the | |
967 | * local variable "start"). | |
968 | */ | |
969 | range_tree_t *obsolete_segs = range_tree_create(NULL, NULL); | |
970 | ||
971 | range_seg_t *rs = avl_first(&segs->rt_root); | |
972 | ASSERT3U(rs->rs_start, ==, start); | |
973 | uint64_t prev_seg_end = rs->rs_end; | |
974 | while ((rs = AVL_NEXT(&segs->rt_root, rs)) != NULL) { | |
975 | if (rs->rs_start >= start + size) { | |
976 | break; | |
977 | } else { | |
978 | range_tree_add(obsolete_segs, | |
979 | prev_seg_end - start, | |
980 | rs->rs_start - prev_seg_end); | |
981 | } | |
982 | prev_seg_end = rs->rs_end; | |
983 | } | |
984 | /* We don't end in the middle of an obsolete range */ | |
985 | ASSERT3U(start + size, <=, prev_seg_end); | |
986 | ||
987 | range_tree_clear(segs, start, size); | |
988 | ||
a1d477c2 MA |
989 | /* |
990 | * We can't have any padding of the allocated size, otherwise we will | |
991 | * misunderstand what's allocated, and the size of the mapping. | |
992 | * The caller ensures this will be true by passing in a size that is | |
993 | * aligned to the worst (highest) ashift in the pool. | |
994 | */ | |
995 | ASSERT3U(DVA_GET_ASIZE(&dst), ==, size); | |
996 | ||
a1d477c2 MA |
997 | entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP); |
998 | DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start); | |
999 | entry->vime_mapping.vimep_dst = dst; | |
0dc2f70c MA |
1000 | if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) { |
1001 | entry->vime_obsolete_count = range_tree_space(obsolete_segs); | |
1002 | } | |
1003 | ||
1004 | vdev_copy_segment_arg_t *vcsa = kmem_zalloc(sizeof (*vcsa), KM_SLEEP); | |
1005 | vcsa->vcsa_dest_dva = &entry->vime_mapping.vimep_dst; | |
1006 | vcsa->vcsa_obsolete_segs = obsolete_segs; | |
1007 | vcsa->vcsa_spa = spa; | |
1008 | vcsa->vcsa_txg = txg; | |
a1d477c2 | 1009 | |
a1d477c2 | 1010 | /* |
9e052db4 | 1011 | * See comment before spa_vdev_copy_one_child(). |
a1d477c2 | 1012 | */ |
9e052db4 MA |
1013 | spa_config_enter(spa, SCL_STATE, spa, RW_READER); |
1014 | zio_t *nzio = zio_null(spa->spa_txg_zio[txg & TXG_MASK], spa, NULL, | |
0dc2f70c | 1015 | spa_vdev_copy_segment_done, vcsa, 0); |
9e052db4 MA |
1016 | vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dst)); |
1017 | if (dest_vd->vdev_ops == &vdev_mirror_ops) { | |
1018 | for (int i = 0; i < dest_vd->vdev_children; i++) { | |
1019 | vdev_t *child = dest_vd->vdev_child[i]; | |
1020 | spa_vdev_copy_one_child(vca, nzio, vd, start, | |
1021 | child, DVA_GET_OFFSET(&dst), i, size); | |
1022 | } | |
1023 | } else { | |
1024 | spa_vdev_copy_one_child(vca, nzio, vd, start, | |
1025 | dest_vd, DVA_GET_OFFSET(&dst), -1, size); | |
1026 | } | |
1027 | zio_nowait(nzio); | |
a1d477c2 MA |
1028 | |
1029 | list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry); | |
1030 | ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift); | |
1031 | vdev_dirty(vd, 0, NULL, txg); | |
1032 | ||
1033 | return (0); | |
1034 | } | |
1035 | ||
1036 | /* | |
1037 | * Complete the removal of a toplevel vdev. This is called as a | |
1038 | * synctask in the same txg that we will sync out the new config (to the | |
1039 | * MOS object) which indicates that this vdev is indirect. | |
1040 | */ | |
1041 | static void | |
1042 | vdev_remove_complete_sync(void *arg, dmu_tx_t *tx) | |
1043 | { | |
1044 | spa_vdev_removal_t *svr = arg; | |
9e052db4 MA |
1045 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; |
1046 | vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); | |
a1d477c2 MA |
1047 | |
1048 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
1049 | ||
1050 | for (int i = 0; i < TXG_SIZE; i++) { | |
1051 | ASSERT0(svr->svr_bytes_done[i]); | |
1052 | } | |
1053 | ||
1054 | ASSERT3U(spa->spa_removing_phys.sr_copied, ==, | |
1055 | spa->spa_removing_phys.sr_to_copy); | |
1056 | ||
1057 | vdev_destroy_spacemaps(vd, tx); | |
1058 | ||
1059 | /* destroy leaf zaps, if any */ | |
1060 | ASSERT3P(svr->svr_zaplist, !=, NULL); | |
1061 | for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL); | |
1062 | pair != NULL; | |
1063 | pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) { | |
1064 | vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx); | |
1065 | } | |
1066 | fnvlist_free(svr->svr_zaplist); | |
1067 | ||
1068 | spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx); | |
1069 | /* vd->vdev_path is not available here */ | |
1070 | spa_history_log_internal(spa, "vdev remove completed", tx, | |
1071 | "%s vdev %llu", spa_name(spa), vd->vdev_id); | |
1072 | } | |
1073 | ||
a1d477c2 MA |
1074 | static void |
1075 | vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist) | |
1076 | { | |
1077 | ASSERT3P(zlist, !=, NULL); | |
1078 | ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops); | |
1079 | ||
1080 | if (vd->vdev_leaf_zap != 0) { | |
1081 | char zkey[32]; | |
1082 | (void) snprintf(zkey, sizeof (zkey), "%s-%llu", | |
1083 | VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap); | |
1084 | fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap); | |
1085 | } | |
1086 | ||
1087 | for (uint64_t id = 0; id < vd->vdev_children; id++) { | |
1088 | vdev_remove_enlist_zaps(vd->vdev_child[id], zlist); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | static void | |
1093 | vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg) | |
1094 | { | |
1095 | vdev_t *ivd; | |
1096 | dmu_tx_t *tx; | |
1097 | spa_t *spa = vd->vdev_spa; | |
1098 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
1099 | ||
1100 | /* | |
1101 | * First, build a list of leaf zaps to be destroyed. | |
1102 | * This is passed to the sync context thread, | |
1103 | * which does the actual unlinking. | |
1104 | */ | |
1105 | svr->svr_zaplist = fnvlist_alloc(); | |
1106 | vdev_remove_enlist_zaps(vd, svr->svr_zaplist); | |
1107 | ||
1108 | ivd = vdev_add_parent(vd, &vdev_indirect_ops); | |
9e052db4 | 1109 | ivd->vdev_removing = 0; |
a1d477c2 MA |
1110 | |
1111 | vd->vdev_leaf_zap = 0; | |
1112 | ||
1113 | vdev_remove_child(ivd, vd); | |
1114 | vdev_compact_children(ivd); | |
1115 | ||
a1d477c2 MA |
1116 | ASSERT(!list_link_active(&vd->vdev_state_dirty_node)); |
1117 | ||
1118 | tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg); | |
1119 | dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr, | |
1120 | 0, ZFS_SPACE_CHECK_NONE, tx); | |
1121 | dmu_tx_commit(tx); | |
1122 | ||
1123 | /* | |
1124 | * Indicate that this thread has exited. | |
1125 | * After this, we can not use svr. | |
1126 | */ | |
1127 | mutex_enter(&svr->svr_lock); | |
1128 | svr->svr_thread = NULL; | |
1129 | cv_broadcast(&svr->svr_cv); | |
1130 | mutex_exit(&svr->svr_lock); | |
1131 | } | |
1132 | ||
1133 | /* | |
1134 | * Complete the removal of a toplevel vdev. This is called in open | |
1135 | * context by the removal thread after we have copied all vdev's data. | |
1136 | */ | |
1137 | static void | |
9e052db4 | 1138 | vdev_remove_complete(spa_t *spa) |
a1d477c2 | 1139 | { |
a1d477c2 MA |
1140 | uint64_t txg; |
1141 | ||
1142 | /* | |
1143 | * Wait for any deferred frees to be synced before we call | |
1144 | * vdev_metaslab_fini() | |
1145 | */ | |
1146 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
a1d477c2 | 1147 | txg = spa_vdev_enter(spa); |
9e052db4 MA |
1148 | vdev_t *vd = vdev_lookup_top(spa, spa->spa_vdev_removal->svr_vdev_id); |
1149 | ||
1150 | sysevent_t *ev = spa_event_create(spa, vd, NULL, | |
1151 | ESC_ZFS_VDEV_REMOVE_DEV); | |
1152 | ||
a1d477c2 MA |
1153 | zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu", |
1154 | vd->vdev_id, txg); | |
1155 | ||
1156 | /* | |
1157 | * Discard allocation state. | |
1158 | */ | |
1159 | if (vd->vdev_mg != NULL) { | |
1160 | vdev_metaslab_fini(vd); | |
1161 | metaslab_group_destroy(vd->vdev_mg); | |
1162 | vd->vdev_mg = NULL; | |
1163 | } | |
1164 | ASSERT0(vd->vdev_stat.vs_space); | |
1165 | ASSERT0(vd->vdev_stat.vs_dspace); | |
1166 | ||
1167 | vdev_remove_replace_with_indirect(vd, txg); | |
1168 | ||
1169 | /* | |
1170 | * We now release the locks, allowing spa_sync to run and finish the | |
1171 | * removal via vdev_remove_complete_sync in syncing context. | |
9e052db4 MA |
1172 | * |
1173 | * Note that we hold on to the vdev_t that has been replaced. Since | |
1174 | * it isn't part of the vdev tree any longer, it can't be concurrently | |
1175 | * manipulated, even while we don't have the config lock. | |
a1d477c2 MA |
1176 | */ |
1177 | (void) spa_vdev_exit(spa, NULL, txg, 0); | |
1178 | ||
1179 | /* | |
1180 | * Top ZAP should have been transferred to the indirect vdev in | |
1181 | * vdev_remove_replace_with_indirect. | |
1182 | */ | |
1183 | ASSERT0(vd->vdev_top_zap); | |
1184 | ||
1185 | /* | |
1186 | * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect. | |
1187 | */ | |
1188 | ASSERT0(vd->vdev_leaf_zap); | |
1189 | ||
1190 | txg = spa_vdev_enter(spa); | |
1191 | (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); | |
1192 | /* | |
1193 | * Request to update the config and the config cachefile. | |
1194 | */ | |
1195 | vdev_config_dirty(spa->spa_root_vdev); | |
1196 | (void) spa_vdev_exit(spa, vd, txg, 0); | |
9e052db4 MA |
1197 | |
1198 | if (ev != NULL) | |
1199 | spa_event_post(ev); | |
a1d477c2 MA |
1200 | } |
1201 | ||
1202 | /* | |
1203 | * Evacuates a segment of size at most max_alloc from the vdev | |
1204 | * via repeated calls to spa_vdev_copy_segment. If an allocation | |
1205 | * fails, the pool is probably too fragmented to handle such a | |
1206 | * large size, so decrease max_alloc so that the caller will not try | |
1207 | * this size again this txg. | |
1208 | */ | |
1209 | static void | |
9e052db4 | 1210 | spa_vdev_copy_impl(vdev_t *vd, spa_vdev_removal_t *svr, vdev_copy_arg_t *vca, |
a1d477c2 MA |
1211 | uint64_t *max_alloc, dmu_tx_t *tx) |
1212 | { | |
1213 | uint64_t txg = dmu_tx_get_txg(tx); | |
1214 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
1215 | ||
1216 | mutex_enter(&svr->svr_lock); | |
1217 | ||
0dc2f70c MA |
1218 | /* |
1219 | * Determine how big of a chunk to copy. We can allocate up | |
1220 | * to max_alloc bytes, and we can span up to vdev_removal_max_span | |
1221 | * bytes of unallocated space at a time. "segs" will track the | |
1222 | * allocated segments that we are copying. We may also be copying | |
1223 | * free segments (of up to vdev_removal_max_span bytes). | |
1224 | */ | |
1225 | range_tree_t *segs = range_tree_create(NULL, NULL); | |
1226 | for (;;) { | |
1227 | range_seg_t *rs = range_tree_first(svr->svr_allocd_segs); | |
1228 | ||
1229 | if (rs == NULL) | |
1230 | break; | |
1231 | ||
1232 | uint64_t seg_length; | |
1233 | ||
1234 | if (range_tree_is_empty(segs)) { | |
1235 | /* need to truncate the first seg based on max_alloc */ | |
1236 | seg_length = | |
1237 | MIN(rs->rs_end - rs->rs_start, *max_alloc); | |
1238 | } else { | |
1239 | if (rs->rs_start - range_tree_max(segs) > | |
1240 | vdev_removal_max_span) { | |
1241 | /* | |
1242 | * Including this segment would cause us to | |
1243 | * copy a larger unneeded chunk than is allowed. | |
1244 | */ | |
1245 | break; | |
1246 | } else if (rs->rs_end - range_tree_min(segs) > | |
1247 | *max_alloc) { | |
1248 | /* | |
1249 | * This additional segment would extend past | |
1250 | * max_alloc. Rather than splitting this | |
1251 | * segment, leave it for the next mapping. | |
1252 | */ | |
1253 | break; | |
1254 | } else { | |
1255 | seg_length = rs->rs_end - rs->rs_start; | |
1256 | } | |
1257 | } | |
1258 | ||
1259 | range_tree_add(segs, rs->rs_start, seg_length); | |
1260 | range_tree_remove(svr->svr_allocd_segs, | |
1261 | rs->rs_start, seg_length); | |
1262 | } | |
1263 | ||
1264 | if (range_tree_is_empty(segs)) { | |
a1d477c2 | 1265 | mutex_exit(&svr->svr_lock); |
0dc2f70c | 1266 | range_tree_destroy(segs); |
a1d477c2 MA |
1267 | return; |
1268 | } | |
a1d477c2 MA |
1269 | |
1270 | if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) { | |
1271 | dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync, | |
1272 | svr, 0, ZFS_SPACE_CHECK_NONE, tx); | |
1273 | } | |
1274 | ||
0dc2f70c | 1275 | svr->svr_max_offset_to_sync[txg & TXG_MASK] = range_tree_max(segs); |
a1d477c2 MA |
1276 | |
1277 | /* | |
1278 | * Note: this is the amount of *allocated* space | |
1279 | * that we are taking care of each txg. | |
1280 | */ | |
0dc2f70c | 1281 | svr->svr_bytes_done[txg & TXG_MASK] += range_tree_space(segs); |
a1d477c2 MA |
1282 | |
1283 | mutex_exit(&svr->svr_lock); | |
1284 | ||
1285 | zio_alloc_list_t zal; | |
1286 | metaslab_trace_init(&zal); | |
0dc2f70c MA |
1287 | uint64_t thismax = SPA_MAXBLOCKSIZE; |
1288 | while (!range_tree_is_empty(segs)) { | |
9e052db4 | 1289 | int error = spa_vdev_copy_segment(vd, |
0dc2f70c | 1290 | segs, thismax, txg, vca, &zal); |
a1d477c2 MA |
1291 | |
1292 | if (error == ENOSPC) { | |
1293 | /* | |
1294 | * Cut our segment in half, and don't try this | |
1295 | * segment size again this txg. Note that the | |
1296 | * allocation size must be aligned to the highest | |
1297 | * ashift in the pool, so that the allocation will | |
1298 | * not be padded out to a multiple of the ashift, | |
1299 | * which could cause us to think that this mapping | |
1300 | * is larger than we intended. | |
1301 | */ | |
1302 | ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT); | |
1303 | ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift); | |
0dc2f70c MA |
1304 | uint64_t attempted = |
1305 | MIN(range_tree_span(segs), thismax); | |
1306 | thismax = P2ROUNDUP(attempted / 2, | |
a1d477c2 | 1307 | 1 << spa->spa_max_ashift); |
a1d477c2 MA |
1308 | /* |
1309 | * The minimum-size allocation can not fail. | |
1310 | */ | |
0dc2f70c MA |
1311 | ASSERT3U(attempted, >, 1 << spa->spa_max_ashift); |
1312 | *max_alloc = attempted - (1 << spa->spa_max_ashift); | |
a1d477c2 MA |
1313 | } else { |
1314 | ASSERT0(error); | |
a1d477c2 MA |
1315 | |
1316 | /* | |
1317 | * We've performed an allocation, so reset the | |
1318 | * alloc trace list. | |
1319 | */ | |
1320 | metaslab_trace_fini(&zal); | |
1321 | metaslab_trace_init(&zal); | |
1322 | } | |
1323 | } | |
1324 | metaslab_trace_fini(&zal); | |
0dc2f70c | 1325 | range_tree_destroy(segs); |
a1d477c2 MA |
1326 | } |
1327 | ||
1328 | /* | |
1329 | * The removal thread operates in open context. It iterates over all | |
1330 | * allocated space in the vdev, by loading each metaslab's spacemap. | |
1331 | * For each contiguous segment of allocated space (capping the segment | |
1332 | * size at SPA_MAXBLOCKSIZE), we: | |
1333 | * - Allocate space for it on another vdev. | |
1334 | * - Create a new mapping from the old location to the new location | |
1335 | * (as a record in svr_new_segments). | |
1336 | * - Initiate a physical read zio to get the data off the removing disk. | |
1337 | * - In the read zio's done callback, initiate a physical write zio to | |
1338 | * write it to the new vdev. | |
1339 | * Note that all of this will take effect when a particular TXG syncs. | |
1340 | * The sync thread ensures that all the phys reads and writes for the syncing | |
1341 | * TXG have completed (see spa_txg_zio) and writes the new mappings to disk | |
1342 | * (see vdev_mapping_sync()). | |
1343 | */ | |
1344 | static void | |
1345 | spa_vdev_remove_thread(void *arg) | |
1346 | { | |
9e052db4 | 1347 | spa_t *spa = arg; |
a1d477c2 MA |
1348 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; |
1349 | vdev_copy_arg_t vca; | |
1350 | uint64_t max_alloc = zfs_remove_max_segment; | |
1351 | uint64_t last_txg = 0; | |
9e052db4 MA |
1352 | |
1353 | spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); | |
1354 | vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); | |
a1d477c2 MA |
1355 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; |
1356 | uint64_t start_offset = vdev_indirect_mapping_max_offset(vim); | |
1357 | ||
1358 | ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops); | |
1359 | ASSERT(vdev_is_concrete(vd)); | |
1360 | ASSERT(vd->vdev_removing); | |
1361 | ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0); | |
a1d477c2 MA |
1362 | ASSERT(vim != NULL); |
1363 | ||
1364 | mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL); | |
1365 | cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL); | |
1366 | vca.vca_outstanding_bytes = 0; | |
1367 | ||
1368 | mutex_enter(&svr->svr_lock); | |
1369 | ||
1370 | /* | |
1371 | * Start from vim_max_offset so we pick up where we left off | |
1372 | * if we are restarting the removal after opening the pool. | |
1373 | */ | |
1374 | uint64_t msi; | |
1375 | for (msi = start_offset >> vd->vdev_ms_shift; | |
1376 | msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) { | |
1377 | metaslab_t *msp = vd->vdev_ms[msi]; | |
1378 | ASSERT3U(msi, <=, vd->vdev_ms_count); | |
1379 | ||
1380 | ASSERT0(range_tree_space(svr->svr_allocd_segs)); | |
1381 | ||
1382 | mutex_enter(&msp->ms_sync_lock); | |
1383 | mutex_enter(&msp->ms_lock); | |
1384 | ||
1385 | /* | |
1386 | * Assert nothing in flight -- ms_*tree is empty. | |
1387 | */ | |
1388 | for (int i = 0; i < TXG_SIZE; i++) { | |
d2734cce | 1389 | ASSERT0(range_tree_space(msp->ms_allocating[i])); |
a1d477c2 MA |
1390 | } |
1391 | ||
1392 | /* | |
1393 | * If the metaslab has ever been allocated from (ms_sm!=NULL), | |
1394 | * read the allocated segments from the space map object | |
1395 | * into svr_allocd_segs. Since we do this while holding | |
1396 | * svr_lock and ms_sync_lock, concurrent frees (which | |
1397 | * would have modified the space map) will wait for us | |
1398 | * to finish loading the spacemap, and then take the | |
1399 | * appropriate action (see free_from_removing_vdev()). | |
1400 | */ | |
1401 | if (msp->ms_sm != NULL) { | |
1402 | space_map_t *sm = NULL; | |
1403 | ||
1404 | /* | |
1405 | * We have to open a new space map here, because | |
1406 | * ms_sm's sm_length and sm_alloc may not reflect | |
1407 | * what's in the object contents, if we are in between | |
1408 | * metaslab_sync() and metaslab_sync_done(). | |
1409 | */ | |
1410 | VERIFY0(space_map_open(&sm, | |
1411 | spa->spa_dsl_pool->dp_meta_objset, | |
1412 | msp->ms_sm->sm_object, msp->ms_sm->sm_start, | |
1413 | msp->ms_sm->sm_size, msp->ms_sm->sm_shift)); | |
1414 | space_map_update(sm); | |
1415 | VERIFY0(space_map_load(sm, svr->svr_allocd_segs, | |
1416 | SM_ALLOC)); | |
1417 | space_map_close(sm); | |
1418 | ||
d2734cce | 1419 | range_tree_walk(msp->ms_freeing, |
a1d477c2 MA |
1420 | range_tree_remove, svr->svr_allocd_segs); |
1421 | ||
1422 | /* | |
1423 | * When we are resuming from a paused removal (i.e. | |
1424 | * when importing a pool with a removal in progress), | |
1425 | * discard any state that we have already processed. | |
1426 | */ | |
1427 | range_tree_clear(svr->svr_allocd_segs, 0, start_offset); | |
1428 | } | |
1429 | mutex_exit(&msp->ms_lock); | |
1430 | mutex_exit(&msp->ms_sync_lock); | |
1431 | ||
1432 | vca.vca_msp = msp; | |
1433 | zfs_dbgmsg("copying %llu segments for metaslab %llu", | |
1434 | avl_numnodes(&svr->svr_allocd_segs->rt_root), | |
1435 | msp->ms_id); | |
1436 | ||
1437 | while (!svr->svr_thread_exit && | |
d2734cce | 1438 | !range_tree_is_empty(svr->svr_allocd_segs)) { |
a1d477c2 MA |
1439 | |
1440 | mutex_exit(&svr->svr_lock); | |
1441 | ||
9e052db4 MA |
1442 | /* |
1443 | * We need to periodically drop the config lock so that | |
1444 | * writers can get in. Additionally, we can't wait | |
1445 | * for a txg to sync while holding a config lock | |
1446 | * (since a waiting writer could cause a 3-way deadlock | |
1447 | * with the sync thread, which also gets a config | |
1448 | * lock for reader). So we can't hold the config lock | |
1449 | * while calling dmu_tx_assign(). | |
1450 | */ | |
1451 | spa_config_exit(spa, SCL_CONFIG, FTAG); | |
1452 | ||
d2734cce SD |
1453 | /* |
1454 | * This delay will pause the removal around the point | |
1455 | * specified by zfs_remove_max_bytes_pause. We do this | |
1456 | * solely from the test suite or during debugging. | |
1457 | */ | |
1458 | uint64_t bytes_copied = | |
1459 | spa->spa_removing_phys.sr_copied; | |
1460 | for (int i = 0; i < TXG_SIZE; i++) | |
1461 | bytes_copied += svr->svr_bytes_done[i]; | |
1462 | while (zfs_remove_max_bytes_pause <= bytes_copied && | |
1463 | !svr->svr_thread_exit) | |
1464 | delay(hz); | |
1465 | ||
a1d477c2 MA |
1466 | mutex_enter(&vca.vca_lock); |
1467 | while (vca.vca_outstanding_bytes > | |
1468 | zfs_remove_max_copy_bytes) { | |
1469 | cv_wait(&vca.vca_cv, &vca.vca_lock); | |
1470 | } | |
1471 | mutex_exit(&vca.vca_lock); | |
1472 | ||
1473 | dmu_tx_t *tx = | |
1474 | dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir); | |
1475 | dmu_tx_hold_space(tx, SPA_MAXBLOCKSIZE); | |
1476 | VERIFY0(dmu_tx_assign(tx, TXG_WAIT)); | |
1477 | uint64_t txg = dmu_tx_get_txg(tx); | |
1478 | ||
9e052db4 MA |
1479 | /* |
1480 | * Reacquire the vdev_config lock. The vdev_t | |
1481 | * that we're removing may have changed, e.g. due | |
1482 | * to a vdev_attach or vdev_detach. | |
1483 | */ | |
1484 | spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER); | |
1485 | vd = vdev_lookup_top(spa, svr->svr_vdev_id); | |
1486 | ||
a1d477c2 MA |
1487 | if (txg != last_txg) |
1488 | max_alloc = zfs_remove_max_segment; | |
1489 | last_txg = txg; | |
1490 | ||
9e052db4 | 1491 | spa_vdev_copy_impl(vd, svr, &vca, &max_alloc, tx); |
a1d477c2 MA |
1492 | |
1493 | dmu_tx_commit(tx); | |
1494 | mutex_enter(&svr->svr_lock); | |
1495 | } | |
1496 | } | |
1497 | ||
1498 | mutex_exit(&svr->svr_lock); | |
9e052db4 MA |
1499 | |
1500 | spa_config_exit(spa, SCL_CONFIG, FTAG); | |
1501 | ||
a1d477c2 MA |
1502 | /* |
1503 | * Wait for all copies to finish before cleaning up the vca. | |
1504 | */ | |
1505 | txg_wait_synced(spa->spa_dsl_pool, 0); | |
1506 | ASSERT0(vca.vca_outstanding_bytes); | |
1507 | ||
1508 | mutex_destroy(&vca.vca_lock); | |
1509 | cv_destroy(&vca.vca_cv); | |
1510 | ||
1511 | if (svr->svr_thread_exit) { | |
1512 | mutex_enter(&svr->svr_lock); | |
1513 | range_tree_vacate(svr->svr_allocd_segs, NULL, NULL); | |
1514 | svr->svr_thread = NULL; | |
1515 | cv_broadcast(&svr->svr_cv); | |
1516 | mutex_exit(&svr->svr_lock); | |
1517 | } else { | |
1518 | ASSERT0(range_tree_space(svr->svr_allocd_segs)); | |
9e052db4 | 1519 | vdev_remove_complete(spa); |
a1d477c2 MA |
1520 | } |
1521 | } | |
1522 | ||
1523 | void | |
1524 | spa_vdev_remove_suspend(spa_t *spa) | |
1525 | { | |
1526 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
1527 | ||
1528 | if (svr == NULL) | |
1529 | return; | |
1530 | ||
1531 | mutex_enter(&svr->svr_lock); | |
1532 | svr->svr_thread_exit = B_TRUE; | |
1533 | while (svr->svr_thread != NULL) | |
1534 | cv_wait(&svr->svr_cv, &svr->svr_lock); | |
1535 | svr->svr_thread_exit = B_FALSE; | |
1536 | mutex_exit(&svr->svr_lock); | |
1537 | } | |
1538 | ||
1539 | /* ARGSUSED */ | |
1540 | static int | |
1541 | spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx) | |
1542 | { | |
1543 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
1544 | ||
1545 | if (spa->spa_vdev_removal == NULL) | |
1546 | return (ENOTACTIVE); | |
1547 | return (0); | |
1548 | } | |
1549 | ||
1550 | /* | |
1551 | * Cancel a removal by freeing all entries from the partial mapping | |
1552 | * and marking the vdev as no longer being removing. | |
1553 | */ | |
1554 | /* ARGSUSED */ | |
1555 | static void | |
1556 | spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx) | |
1557 | { | |
1558 | spa_t *spa = dmu_tx_pool(tx)->dp_spa; | |
1559 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
9e052db4 | 1560 | vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id); |
a1d477c2 MA |
1561 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; |
1562 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
1563 | objset_t *mos = spa->spa_meta_objset; | |
1564 | ||
1565 | ASSERT3P(svr->svr_thread, ==, NULL); | |
1566 | ||
1567 | spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx); | |
27f80e85 BB |
1568 | |
1569 | boolean_t are_precise; | |
1570 | VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise)); | |
1571 | if (are_precise) { | |
a1d477c2 MA |
1572 | spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); |
1573 | VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, | |
1574 | VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx)); | |
1575 | } | |
1576 | ||
27f80e85 BB |
1577 | uint64_t obsolete_sm_object; |
1578 | VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object)); | |
1579 | if (obsolete_sm_object != 0) { | |
a1d477c2 | 1580 | ASSERT(vd->vdev_obsolete_sm != NULL); |
27f80e85 | 1581 | ASSERT3U(obsolete_sm_object, ==, |
a1d477c2 MA |
1582 | space_map_object(vd->vdev_obsolete_sm)); |
1583 | ||
1584 | space_map_free(vd->vdev_obsolete_sm, tx); | |
1585 | VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap, | |
1586 | VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx)); | |
1587 | space_map_close(vd->vdev_obsolete_sm); | |
1588 | vd->vdev_obsolete_sm = NULL; | |
1589 | spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx); | |
1590 | } | |
1591 | for (int i = 0; i < TXG_SIZE; i++) { | |
1592 | ASSERT(list_is_empty(&svr->svr_new_segments[i])); | |
1593 | ASSERT3U(svr->svr_max_offset_to_sync[i], <=, | |
1594 | vdev_indirect_mapping_max_offset(vim)); | |
1595 | } | |
1596 | ||
1597 | for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) { | |
1598 | metaslab_t *msp = vd->vdev_ms[msi]; | |
1599 | ||
1600 | if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim)) | |
1601 | break; | |
1602 | ||
1603 | ASSERT0(range_tree_space(svr->svr_allocd_segs)); | |
1604 | ||
1605 | mutex_enter(&msp->ms_lock); | |
1606 | ||
1607 | /* | |
1608 | * Assert nothing in flight -- ms_*tree is empty. | |
1609 | */ | |
1610 | for (int i = 0; i < TXG_SIZE; i++) | |
d2734cce | 1611 | ASSERT0(range_tree_space(msp->ms_allocating[i])); |
a1d477c2 | 1612 | for (int i = 0; i < TXG_DEFER_SIZE; i++) |
d2734cce SD |
1613 | ASSERT0(range_tree_space(msp->ms_defer[i])); |
1614 | ASSERT0(range_tree_space(msp->ms_freed)); | |
a1d477c2 MA |
1615 | |
1616 | if (msp->ms_sm != NULL) { | |
1617 | /* | |
1618 | * Assert that the in-core spacemap has the same | |
1619 | * length as the on-disk one, so we can use the | |
1620 | * existing in-core spacemap to load it from disk. | |
1621 | */ | |
1622 | ASSERT3U(msp->ms_sm->sm_alloc, ==, | |
1623 | msp->ms_sm->sm_phys->smp_alloc); | |
1624 | ASSERT3U(msp->ms_sm->sm_length, ==, | |
1625 | msp->ms_sm->sm_phys->smp_objsize); | |
1626 | ||
1627 | mutex_enter(&svr->svr_lock); | |
1628 | VERIFY0(space_map_load(msp->ms_sm, | |
1629 | svr->svr_allocd_segs, SM_ALLOC)); | |
d2734cce | 1630 | range_tree_walk(msp->ms_freeing, |
a1d477c2 MA |
1631 | range_tree_remove, svr->svr_allocd_segs); |
1632 | ||
1633 | /* | |
1634 | * Clear everything past what has been synced, | |
1635 | * because we have not allocated mappings for it yet. | |
1636 | */ | |
1637 | uint64_t syncd = vdev_indirect_mapping_max_offset(vim); | |
9e052db4 MA |
1638 | uint64_t sm_end = msp->ms_sm->sm_start + |
1639 | msp->ms_sm->sm_size; | |
1640 | if (sm_end > syncd) | |
1641 | range_tree_clear(svr->svr_allocd_segs, | |
1642 | syncd, sm_end - syncd); | |
a1d477c2 MA |
1643 | |
1644 | mutex_exit(&svr->svr_lock); | |
1645 | } | |
1646 | mutex_exit(&msp->ms_lock); | |
1647 | ||
1648 | mutex_enter(&svr->svr_lock); | |
1649 | range_tree_vacate(svr->svr_allocd_segs, | |
1650 | free_mapped_segment_cb, vd); | |
1651 | mutex_exit(&svr->svr_lock); | |
1652 | } | |
1653 | ||
1654 | /* | |
1655 | * Note: this must happen after we invoke free_mapped_segment_cb, | |
1656 | * because it adds to the obsolete_segments. | |
1657 | */ | |
1658 | range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL); | |
1659 | ||
1660 | ASSERT3U(vic->vic_mapping_object, ==, | |
1661 | vdev_indirect_mapping_object(vd->vdev_indirect_mapping)); | |
1662 | vdev_indirect_mapping_close(vd->vdev_indirect_mapping); | |
1663 | vd->vdev_indirect_mapping = NULL; | |
1664 | vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx); | |
1665 | vic->vic_mapping_object = 0; | |
1666 | ||
1667 | ASSERT3U(vic->vic_births_object, ==, | |
1668 | vdev_indirect_births_object(vd->vdev_indirect_births)); | |
1669 | vdev_indirect_births_close(vd->vdev_indirect_births); | |
1670 | vd->vdev_indirect_births = NULL; | |
1671 | vdev_indirect_births_free(mos, vic->vic_births_object, tx); | |
1672 | vic->vic_births_object = 0; | |
1673 | ||
1674 | /* | |
1675 | * We may have processed some frees from the removing vdev in this | |
1676 | * txg, thus increasing svr_bytes_done; discard that here to | |
1677 | * satisfy the assertions in spa_vdev_removal_destroy(). | |
1678 | * Note that future txg's can not have any bytes_done, because | |
1679 | * future TXG's are only modified from open context, and we have | |
1680 | * already shut down the copying thread. | |
1681 | */ | |
1682 | svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0; | |
1683 | spa_finish_removal(spa, DSS_CANCELED, tx); | |
1684 | ||
1685 | vd->vdev_removing = B_FALSE; | |
1686 | vdev_config_dirty(vd); | |
1687 | ||
1688 | zfs_dbgmsg("canceled device removal for vdev %llu in %llu", | |
1689 | vd->vdev_id, dmu_tx_get_txg(tx)); | |
1690 | spa_history_log_internal(spa, "vdev remove canceled", tx, | |
1691 | "%s vdev %llu %s", spa_name(spa), | |
1692 | vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-"); | |
1693 | } | |
1694 | ||
1695 | int | |
1696 | spa_vdev_remove_cancel(spa_t *spa) | |
1697 | { | |
1698 | spa_vdev_remove_suspend(spa); | |
1699 | ||
1700 | if (spa->spa_vdev_removal == NULL) | |
1701 | return (ENOTACTIVE); | |
1702 | ||
9e052db4 | 1703 | uint64_t vdid = spa->spa_vdev_removal->svr_vdev_id; |
a1d477c2 MA |
1704 | |
1705 | int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check, | |
d2734cce SD |
1706 | spa_vdev_remove_cancel_sync, NULL, 0, |
1707 | ZFS_SPACE_CHECK_EXTRA_RESERVED); | |
a1d477c2 MA |
1708 | |
1709 | if (error == 0) { | |
1710 | spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER); | |
1711 | vdev_t *vd = vdev_lookup_top(spa, vdid); | |
1712 | metaslab_group_activate(vd->vdev_mg); | |
1713 | spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG); | |
1714 | } | |
1715 | ||
1716 | return (error); | |
1717 | } | |
1718 | ||
1719 | /* | |
1720 | * Called every sync pass of every txg if there's a svr. | |
1721 | */ | |
1722 | void | |
1723 | svr_sync(spa_t *spa, dmu_tx_t *tx) | |
1724 | { | |
1725 | spa_vdev_removal_t *svr = spa->spa_vdev_removal; | |
1726 | int txgoff = dmu_tx_get_txg(tx) & TXG_MASK; | |
1727 | ||
1728 | /* | |
1729 | * This check is necessary so that we do not dirty the | |
1730 | * DIRECTORY_OBJECT via spa_sync_removing_state() when there | |
1731 | * is nothing to do. Dirtying it every time would prevent us | |
1732 | * from syncing-to-convergence. | |
1733 | */ | |
1734 | if (svr->svr_bytes_done[txgoff] == 0) | |
1735 | return; | |
1736 | ||
1737 | /* | |
1738 | * Update progress accounting. | |
1739 | */ | |
1740 | spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff]; | |
1741 | svr->svr_bytes_done[txgoff] = 0; | |
1742 | ||
1743 | spa_sync_removing_state(spa, tx); | |
1744 | } | |
1745 | ||
1746 | static void | |
1747 | vdev_remove_make_hole_and_free(vdev_t *vd) | |
1748 | { | |
1749 | uint64_t id = vd->vdev_id; | |
1750 | spa_t *spa = vd->vdev_spa; | |
1751 | vdev_t *rvd = spa->spa_root_vdev; | |
1752 | boolean_t last_vdev = (id == (rvd->vdev_children - 1)); | |
1753 | ||
1754 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
1755 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); | |
1756 | ||
1757 | vdev_free(vd); | |
1758 | ||
1759 | if (last_vdev) { | |
1760 | vdev_compact_children(rvd); | |
1761 | } else { | |
1762 | vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops); | |
1763 | vdev_add_child(rvd, vd); | |
1764 | } | |
1765 | vdev_config_dirty(rvd); | |
1766 | ||
1767 | /* | |
1768 | * Reassess the health of our root vdev. | |
1769 | */ | |
1770 | vdev_reopen(rvd); | |
1771 | } | |
1772 | ||
1773 | /* | |
1774 | * Remove a log device. The config lock is held for the specified TXG. | |
1775 | */ | |
1776 | static int | |
1777 | spa_vdev_remove_log(vdev_t *vd, uint64_t *txg) | |
1778 | { | |
1779 | metaslab_group_t *mg = vd->vdev_mg; | |
1780 | spa_t *spa = vd->vdev_spa; | |
1781 | int error = 0; | |
1782 | ||
1783 | ASSERT(vd->vdev_islog); | |
1784 | ASSERT(vd == vd->vdev_top); | |
1785 | ||
1786 | /* | |
1787 | * Stop allocating from this vdev. | |
1788 | */ | |
1789 | metaslab_group_passivate(mg); | |
1790 | ||
1791 | /* | |
1792 | * Wait for the youngest allocations and frees to sync, | |
1793 | * and then wait for the deferral of those frees to finish. | |
1794 | */ | |
1795 | spa_vdev_config_exit(spa, NULL, | |
1796 | *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); | |
1797 | ||
1798 | /* | |
1799 | * Evacuate the device. We don't hold the config lock as writer | |
1800 | * since we need to do I/O but we do keep the | |
1801 | * spa_namespace_lock held. Once this completes the device | |
1802 | * should no longer have any blocks allocated on it. | |
1803 | */ | |
1804 | if (vd->vdev_islog) { | |
1805 | if (vd->vdev_stat.vs_alloc != 0) | |
1806 | error = spa_reset_logs(spa); | |
1807 | } | |
1808 | ||
1809 | *txg = spa_vdev_config_enter(spa); | |
1810 | ||
1811 | if (error != 0) { | |
1812 | metaslab_group_activate(mg); | |
1813 | return (error); | |
1814 | } | |
1815 | ASSERT0(vd->vdev_stat.vs_alloc); | |
1816 | ||
1817 | /* | |
1818 | * The evacuation succeeded. Remove any remaining MOS metadata | |
1819 | * associated with this vdev, and wait for these changes to sync. | |
1820 | */ | |
1821 | vd->vdev_removing = B_TRUE; | |
1822 | ||
1823 | vdev_dirty_leaves(vd, VDD_DTL, *txg); | |
1824 | vdev_config_dirty(vd); | |
1825 | ||
1826 | spa_history_log_internal(spa, "vdev remove", NULL, | |
1827 | "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id, | |
1828 | (vd->vdev_path != NULL) ? vd->vdev_path : "-"); | |
1829 | ||
1830 | spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG); | |
1831 | ||
1832 | *txg = spa_vdev_config_enter(spa); | |
1833 | ||
1834 | sysevent_t *ev = spa_event_create(spa, vd, NULL, | |
1835 | ESC_ZFS_VDEV_REMOVE_DEV); | |
1836 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); | |
1837 | ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL); | |
1838 | ||
1839 | /* The top ZAP should have been destroyed by vdev_remove_empty. */ | |
1840 | ASSERT0(vd->vdev_top_zap); | |
1841 | /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */ | |
1842 | ASSERT0(vd->vdev_leaf_zap); | |
1843 | ||
1844 | (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE); | |
1845 | ||
1846 | if (list_link_active(&vd->vdev_state_dirty_node)) | |
1847 | vdev_state_clean(vd); | |
1848 | if (list_link_active(&vd->vdev_config_dirty_node)) | |
1849 | vdev_config_clean(vd); | |
1850 | ||
1851 | /* | |
1852 | * Clean up the vdev namespace. | |
1853 | */ | |
1854 | vdev_remove_make_hole_and_free(vd); | |
1855 | ||
1856 | if (ev != NULL) | |
1857 | spa_event_post(ev); | |
1858 | ||
1859 | return (0); | |
1860 | } | |
1861 | ||
1862 | static int | |
1863 | spa_vdev_remove_top_check(vdev_t *vd) | |
1864 | { | |
1865 | spa_t *spa = vd->vdev_spa; | |
1866 | ||
1867 | if (vd != vd->vdev_top) | |
1868 | return (SET_ERROR(ENOTSUP)); | |
1869 | ||
1870 | if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL)) | |
1871 | return (SET_ERROR(ENOTSUP)); | |
1872 | ||
cc99f275 DB |
1873 | /* available space in the pool's normal class */ |
1874 | uint64_t available = dsl_dir_space_available( | |
1875 | spa->spa_dsl_pool->dp_root_dir, NULL, 0, B_TRUE); | |
1876 | ||
1877 | metaslab_class_t *mc = vd->vdev_mg->mg_class; | |
1878 | ||
1879 | /* | |
1880 | * When removing a vdev from an allocation class that has | |
1881 | * remaining vdevs, include available space from the class. | |
1882 | */ | |
1883 | if (mc != spa_normal_class(spa) && mc->mc_groups > 1) { | |
1884 | uint64_t class_avail = metaslab_class_get_space(mc) - | |
1885 | metaslab_class_get_alloc(mc); | |
1886 | ||
1887 | /* add class space, adjusted for overhead */ | |
1888 | available += (class_avail * 94) / 100; | |
1889 | } | |
1890 | ||
a1d477c2 MA |
1891 | /* |
1892 | * There has to be enough free space to remove the | |
1893 | * device and leave double the "slop" space (i.e. we | |
1894 | * must leave at least 3% of the pool free, in addition to | |
1895 | * the normal slop space). | |
1896 | */ | |
cc99f275 | 1897 | if (available < vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) { |
a1d477c2 MA |
1898 | return (SET_ERROR(ENOSPC)); |
1899 | } | |
1900 | ||
1901 | /* | |
1902 | * There can not be a removal in progress. | |
1903 | */ | |
1904 | if (spa->spa_removing_phys.sr_state == DSS_SCANNING) | |
1905 | return (SET_ERROR(EBUSY)); | |
1906 | ||
1907 | /* | |
1908 | * The device must have all its data. | |
1909 | */ | |
1910 | if (!vdev_dtl_empty(vd, DTL_MISSING) || | |
1911 | !vdev_dtl_empty(vd, DTL_OUTAGE)) | |
1912 | return (SET_ERROR(EBUSY)); | |
1913 | ||
1914 | /* | |
1915 | * The device must be healthy. | |
1916 | */ | |
1917 | if (!vdev_readable(vd)) | |
1918 | return (SET_ERROR(EIO)); | |
1919 | ||
1920 | /* | |
1921 | * All vdevs in normal class must have the same ashift. | |
1922 | */ | |
1923 | if (spa->spa_max_ashift != spa->spa_min_ashift) { | |
1924 | return (SET_ERROR(EINVAL)); | |
1925 | } | |
1926 | ||
1927 | /* | |
1928 | * All vdevs in normal class must have the same ashift | |
1929 | * and not be raidz. | |
1930 | */ | |
1931 | vdev_t *rvd = spa->spa_root_vdev; | |
1932 | int num_indirect = 0; | |
1933 | for (uint64_t id = 0; id < rvd->vdev_children; id++) { | |
1934 | vdev_t *cvd = rvd->vdev_child[id]; | |
1935 | if (cvd->vdev_ashift != 0 && !cvd->vdev_islog) | |
1936 | ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift); | |
1937 | if (cvd->vdev_ops == &vdev_indirect_ops) | |
1938 | num_indirect++; | |
1939 | if (!vdev_is_concrete(cvd)) | |
1940 | continue; | |
1941 | if (cvd->vdev_ops == &vdev_raidz_ops) | |
1942 | return (SET_ERROR(EINVAL)); | |
1943 | /* | |
1944 | * Need the mirror to be mirror of leaf vdevs only | |
1945 | */ | |
1946 | if (cvd->vdev_ops == &vdev_mirror_ops) { | |
1947 | for (uint64_t cid = 0; | |
1948 | cid < cvd->vdev_children; cid++) { | |
1949 | if (!cvd->vdev_child[cid]->vdev_ops-> | |
1950 | vdev_op_leaf) | |
1951 | return (SET_ERROR(EINVAL)); | |
1952 | } | |
1953 | } | |
1954 | } | |
1955 | ||
1956 | return (0); | |
1957 | } | |
1958 | ||
1959 | /* | |
1960 | * Initiate removal of a top-level vdev, reducing the total space in the pool. | |
1961 | * The config lock is held for the specified TXG. Once initiated, | |
1962 | * evacuation of all allocated space (copying it to other vdevs) happens | |
1963 | * in the background (see spa_vdev_remove_thread()), and can be canceled | |
1964 | * (see spa_vdev_remove_cancel()). If successful, the vdev will | |
1965 | * be transformed to an indirect vdev (see spa_vdev_remove_complete()). | |
1966 | */ | |
1967 | static int | |
1968 | spa_vdev_remove_top(vdev_t *vd, uint64_t *txg) | |
1969 | { | |
1970 | spa_t *spa = vd->vdev_spa; | |
1971 | int error; | |
1972 | ||
1973 | /* | |
1974 | * Check for errors up-front, so that we don't waste time | |
1975 | * passivating the metaslab group and clearing the ZIL if there | |
1976 | * are errors. | |
1977 | */ | |
1978 | error = spa_vdev_remove_top_check(vd); | |
1979 | if (error != 0) | |
1980 | return (error); | |
1981 | ||
1982 | /* | |
1983 | * Stop allocating from this vdev. Note that we must check | |
1984 | * that this is not the only device in the pool before | |
1985 | * passivating, otherwise we will not be able to make | |
1986 | * progress because we can't allocate from any vdevs. | |
1987 | * The above check for sufficient free space serves this | |
1988 | * purpose. | |
1989 | */ | |
1990 | metaslab_group_t *mg = vd->vdev_mg; | |
1991 | metaslab_group_passivate(mg); | |
1992 | ||
1993 | /* | |
1994 | * Wait for the youngest allocations and frees to sync, | |
1995 | * and then wait for the deferral of those frees to finish. | |
1996 | */ | |
1997 | spa_vdev_config_exit(spa, NULL, | |
1998 | *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG); | |
1999 | ||
2000 | /* | |
2001 | * We must ensure that no "stubby" log blocks are allocated | |
2002 | * on the device to be removed. These blocks could be | |
2003 | * written at any time, including while we are in the middle | |
2004 | * of copying them. | |
2005 | */ | |
2006 | error = spa_reset_logs(spa); | |
2007 | ||
2008 | *txg = spa_vdev_config_enter(spa); | |
2009 | ||
2010 | /* | |
2011 | * Things might have changed while the config lock was dropped | |
2012 | * (e.g. space usage). Check for errors again. | |
2013 | */ | |
2014 | if (error == 0) | |
2015 | error = spa_vdev_remove_top_check(vd); | |
2016 | ||
2017 | if (error != 0) { | |
2018 | metaslab_group_activate(mg); | |
2019 | return (error); | |
2020 | } | |
2021 | ||
2022 | vd->vdev_removing = B_TRUE; | |
2023 | ||
2024 | vdev_dirty_leaves(vd, VDD_DTL, *txg); | |
2025 | vdev_config_dirty(vd); | |
2026 | dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg); | |
2027 | dsl_sync_task_nowait(spa->spa_dsl_pool, | |
2028 | vdev_remove_initiate_sync, | |
9e052db4 | 2029 | (void *)(uintptr_t)vd->vdev_id, 0, ZFS_SPACE_CHECK_NONE, tx); |
a1d477c2 MA |
2030 | dmu_tx_commit(tx); |
2031 | ||
2032 | return (0); | |
2033 | } | |
2034 | ||
2035 | /* | |
2036 | * Remove a device from the pool. | |
2037 | * | |
2038 | * Removing a device from the vdev namespace requires several steps | |
2039 | * and can take a significant amount of time. As a result we use | |
2040 | * the spa_vdev_config_[enter/exit] functions which allow us to | |
2041 | * grab and release the spa_config_lock while still holding the namespace | |
2042 | * lock. During each step the configuration is synced out. | |
2043 | */ | |
2044 | int | |
2045 | spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare) | |
2046 | { | |
2047 | vdev_t *vd; | |
2048 | nvlist_t **spares, **l2cache, *nv; | |
2049 | uint64_t txg = 0; | |
2050 | uint_t nspares, nl2cache; | |
2051 | int error = 0; | |
2052 | boolean_t locked = MUTEX_HELD(&spa_namespace_lock); | |
2053 | sysevent_t *ev = NULL; | |
2054 | ||
2055 | ASSERT(spa_writeable(spa)); | |
2056 | ||
2057 | if (!locked) | |
2058 | txg = spa_vdev_enter(spa); | |
2059 | ||
d2734cce SD |
2060 | ASSERT(MUTEX_HELD(&spa_namespace_lock)); |
2061 | if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) { | |
2062 | error = (spa_has_checkpoint(spa)) ? | |
2063 | ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT; | |
2064 | ||
2065 | if (!locked) | |
2066 | return (spa_vdev_exit(spa, NULL, txg, error)); | |
2067 | ||
2068 | return (error); | |
2069 | } | |
2070 | ||
a1d477c2 MA |
2071 | vd = spa_lookup_by_guid(spa, guid, B_FALSE); |
2072 | ||
2073 | if (spa->spa_spares.sav_vdevs != NULL && | |
2074 | nvlist_lookup_nvlist_array(spa->spa_spares.sav_config, | |
2075 | ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 && | |
2076 | (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) { | |
2077 | /* | |
2078 | * Only remove the hot spare if it's not currently in use | |
2079 | * in this pool. | |
2080 | */ | |
2081 | if (vd == NULL || unspare) { | |
2082 | if (vd == NULL) | |
2083 | vd = spa_lookup_by_guid(spa, guid, B_TRUE); | |
2084 | ev = spa_event_create(spa, vd, NULL, | |
2085 | ESC_ZFS_VDEV_REMOVE_AUX); | |
2086 | ||
2087 | char *nvstr = fnvlist_lookup_string(nv, | |
2088 | ZPOOL_CONFIG_PATH); | |
2089 | spa_history_log_internal(spa, "vdev remove", NULL, | |
2090 | "%s vdev (%s) %s", spa_name(spa), | |
2091 | VDEV_TYPE_SPARE, nvstr); | |
2092 | spa_vdev_remove_aux(spa->spa_spares.sav_config, | |
2093 | ZPOOL_CONFIG_SPARES, spares, nspares, nv); | |
2094 | spa_load_spares(spa); | |
2095 | spa->spa_spares.sav_sync = B_TRUE; | |
2096 | } else { | |
2097 | error = SET_ERROR(EBUSY); | |
2098 | } | |
2099 | } else if (spa->spa_l2cache.sav_vdevs != NULL && | |
2100 | nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config, | |
2101 | ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 && | |
2102 | (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) { | |
2103 | char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH); | |
2104 | spa_history_log_internal(spa, "vdev remove", NULL, | |
2105 | "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr); | |
2106 | /* | |
2107 | * Cache devices can always be removed. | |
2108 | */ | |
2109 | vd = spa_lookup_by_guid(spa, guid, B_TRUE); | |
2110 | ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX); | |
2111 | spa_vdev_remove_aux(spa->spa_l2cache.sav_config, | |
2112 | ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv); | |
2113 | spa_load_l2cache(spa); | |
2114 | spa->spa_l2cache.sav_sync = B_TRUE; | |
2115 | } else if (vd != NULL && vd->vdev_islog) { | |
2116 | ASSERT(!locked); | |
2117 | error = spa_vdev_remove_log(vd, &txg); | |
2118 | } else if (vd != NULL) { | |
2119 | ASSERT(!locked); | |
2120 | error = spa_vdev_remove_top(vd, &txg); | |
2121 | } else { | |
2122 | /* | |
2123 | * There is no vdev of any kind with the specified guid. | |
2124 | */ | |
2125 | error = SET_ERROR(ENOENT); | |
2126 | } | |
2127 | ||
2128 | if (!locked) | |
2129 | error = spa_vdev_exit(spa, NULL, txg, error); | |
2130 | ||
2131 | if (ev != NULL) | |
2132 | spa_event_post(ev); | |
2133 | ||
2134 | return (error); | |
2135 | } | |
2136 | ||
2137 | int | |
2138 | spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs) | |
2139 | { | |
2140 | prs->prs_state = spa->spa_removing_phys.sr_state; | |
2141 | ||
2142 | if (prs->prs_state == DSS_NONE) | |
2143 | return (SET_ERROR(ENOENT)); | |
2144 | ||
2145 | prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev; | |
2146 | prs->prs_start_time = spa->spa_removing_phys.sr_start_time; | |
2147 | prs->prs_end_time = spa->spa_removing_phys.sr_end_time; | |
2148 | prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy; | |
2149 | prs->prs_copied = spa->spa_removing_phys.sr_copied; | |
2150 | ||
2151 | if (spa->spa_vdev_removal != NULL) { | |
2152 | for (int i = 0; i < TXG_SIZE; i++) { | |
2153 | prs->prs_copied += | |
2154 | spa->spa_vdev_removal->svr_bytes_done[i]; | |
2155 | } | |
2156 | } | |
2157 | ||
2158 | prs->prs_mapping_memory = 0; | |
2159 | uint64_t indirect_vdev_id = | |
2160 | spa->spa_removing_phys.sr_prev_indirect_vdev; | |
2161 | while (indirect_vdev_id != -1) { | |
2162 | vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id]; | |
2163 | vdev_indirect_config_t *vic = &vd->vdev_indirect_config; | |
2164 | vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping; | |
2165 | ||
2166 | ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops); | |
2167 | prs->prs_mapping_memory += vdev_indirect_mapping_size(vim); | |
2168 | indirect_vdev_id = vic->vic_prev_indirect_vdev; | |
2169 | } | |
2170 | ||
2171 | return (0); | |
2172 | } | |
2173 | ||
93ce2b4c | 2174 | #if defined(_KERNEL) |
a1d477c2 MA |
2175 | module_param(zfs_remove_max_segment, int, 0644); |
2176 | MODULE_PARM_DESC(zfs_remove_max_segment, | |
2177 | "Largest contiguous segment to allocate when removing device"); | |
2178 | ||
0dc2f70c MA |
2179 | module_param(vdev_removal_max_span, int, 0644); |
2180 | MODULE_PARM_DESC(vdev_removal_max_span, | |
2181 | "Largest span of free chunks a remap segment can span"); | |
2182 | ||
d2734cce SD |
2183 | /* BEGIN CSTYLED */ |
2184 | module_param(zfs_remove_max_bytes_pause, ulong, 0644); | |
2185 | MODULE_PARM_DESC(zfs_remove_max_bytes_pause, | |
2186 | "Pause device removal after this many bytes are copied " | |
2187 | "(debug use only - causes removal to hang)"); | |
2188 | /* END CSTYLED */ | |
2189 | ||
a1d477c2 MA |
2190 | EXPORT_SYMBOL(free_from_removing_vdev); |
2191 | EXPORT_SYMBOL(spa_removal_get_stats); | |
2192 | EXPORT_SYMBOL(spa_remove_init); | |
2193 | EXPORT_SYMBOL(spa_restart_removal); | |
2194 | EXPORT_SYMBOL(spa_vdev_removal_destroy); | |
2195 | EXPORT_SYMBOL(spa_vdev_remove); | |
2196 | EXPORT_SYMBOL(spa_vdev_remove_cancel); | |
2197 | EXPORT_SYMBOL(spa_vdev_remove_suspend); | |
2198 | EXPORT_SYMBOL(svr_sync); | |
2199 | #endif |