4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016 by Delphix. All rights reserved.
27 #include <sys/spa_impl.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/refcount.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/dsl_synctask.h>
34 #include <sys/dmu_tx.h>
37 * Maximum number of metaslabs per group that can be initialized
40 int max_initialize_ms
= 3;
43 * Value that is written to disk during initialization.
46 unsigned long zfs_initialize_value
= 0xdeadbeefUL
;
48 unsigned long zfs_initialize_value
= 0xdeadbeefdeadbeeeULL
;
51 /* maximum number of I/Os outstanding per leaf vdev */
52 int zfs_initialize_limit
= 1;
54 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
55 uint64_t zfs_initialize_chunk_size
= 1024 * 1024;
58 vdev_initialize_should_stop(vdev_t
*vd
)
60 return (vd
->vdev_initialize_exit_wanted
|| !vdev_writeable(vd
) ||
61 vd
->vdev_detached
|| vd
->vdev_top
->vdev_removing
);
65 vdev_initialize_zap_update_sync(void *arg
, dmu_tx_t
*tx
)
68 * We pass in the guid instead of the vdev_t since the vdev may
69 * have been freed prior to the sync task being processed. This
70 * happens when a vdev is detached as we call spa_config_vdev_exit(),
71 * stop the intializing thread, schedule the sync task, and free
72 * the vdev. Later when the scheduled sync task is invoked, it would
73 * find that the vdev has been freed.
75 uint64_t guid
= *(uint64_t *)arg
;
76 uint64_t txg
= dmu_tx_get_txg(tx
);
77 kmem_free(arg
, sizeof (uint64_t));
79 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
80 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
|| !vdev_is_concrete(vd
))
83 uint64_t last_offset
= vd
->vdev_initialize_offset
[txg
& TXG_MASK
];
84 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = 0;
86 VERIFY(vd
->vdev_leaf_zap
!= 0);
88 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
90 if (last_offset
> 0) {
91 vd
->vdev_initialize_last_offset
= last_offset
;
92 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
93 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
94 sizeof (last_offset
), 1, &last_offset
, tx
));
96 if (vd
->vdev_initialize_action_time
> 0) {
97 uint64_t val
= (uint64_t)vd
->vdev_initialize_action_time
;
98 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
99 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
, sizeof (val
),
103 uint64_t initialize_state
= vd
->vdev_initialize_state
;
104 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
105 VDEV_LEAF_ZAP_INITIALIZE_STATE
, sizeof (initialize_state
), 1,
106 &initialize_state
, tx
));
110 vdev_initialize_change_state(vdev_t
*vd
, vdev_initializing_state_t new_state
)
112 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
113 spa_t
*spa
= vd
->vdev_spa
;
115 if (new_state
== vd
->vdev_initialize_state
)
119 * Copy the vd's guid, this will be freed by the sync task.
121 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
122 *guid
= vd
->vdev_guid
;
125 * If we're suspending, then preserving the original start time.
127 if (vd
->vdev_initialize_state
!= VDEV_INITIALIZE_SUSPENDED
) {
128 vd
->vdev_initialize_action_time
= gethrestime_sec();
130 vd
->vdev_initialize_state
= new_state
;
132 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
133 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
134 dsl_sync_task_nowait(spa_get_dsl(spa
), vdev_initialize_zap_update_sync
,
135 guid
, 2, ZFS_SPACE_CHECK_RESERVED
, tx
);
138 case VDEV_INITIALIZE_ACTIVE
:
139 spa_history_log_internal(spa
, "initialize", tx
,
140 "vdev=%s activated", vd
->vdev_path
);
142 case VDEV_INITIALIZE_SUSPENDED
:
143 spa_history_log_internal(spa
, "initialize", tx
,
144 "vdev=%s suspended", vd
->vdev_path
);
146 case VDEV_INITIALIZE_CANCELED
:
147 spa_history_log_internal(spa
, "initialize", tx
,
148 "vdev=%s canceled", vd
->vdev_path
);
150 case VDEV_INITIALIZE_COMPLETE
:
151 spa_history_log_internal(spa
, "initialize", tx
,
152 "vdev=%s complete", vd
->vdev_path
);
155 panic("invalid state %llu", (unsigned long long)new_state
);
162 vdev_initialize_cb(zio_t
*zio
)
164 vdev_t
*vd
= zio
->io_vd
;
165 mutex_enter(&vd
->vdev_initialize_io_lock
);
166 if (zio
->io_error
== ENXIO
&& !vdev_writeable(vd
)) {
168 * The I/O failed because the vdev was unavailable; roll the
169 * last offset back. (This works because spa_sync waits on
170 * spa_txg_zio before it runs sync tasks.)
173 &vd
->vdev_initialize_offset
[zio
->io_txg
& TXG_MASK
];
174 *off
= MIN(*off
, zio
->io_offset
);
177 * Since initializing is best-effort, we ignore I/O errors and
178 * rely on vdev_probe to determine if the errors are more
181 if (zio
->io_error
!= 0)
182 vd
->vdev_stat
.vs_initialize_errors
++;
184 vd
->vdev_initialize_bytes_done
+= zio
->io_orig_size
;
186 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
187 vd
->vdev_initialize_inflight
--;
188 cv_broadcast(&vd
->vdev_initialize_io_cv
);
189 mutex_exit(&vd
->vdev_initialize_io_lock
);
191 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
194 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
196 vdev_initialize_write(vdev_t
*vd
, uint64_t start
, uint64_t size
, abd_t
*data
)
198 spa_t
*spa
= vd
->vdev_spa
;
200 /* Limit inflight initializing I/Os */
201 mutex_enter(&vd
->vdev_initialize_io_lock
);
202 while (vd
->vdev_initialize_inflight
>= zfs_initialize_limit
) {
203 cv_wait(&vd
->vdev_initialize_io_cv
,
204 &vd
->vdev_initialize_io_lock
);
206 vd
->vdev_initialize_inflight
++;
207 mutex_exit(&vd
->vdev_initialize_io_lock
);
209 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
210 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
211 uint64_t txg
= dmu_tx_get_txg(tx
);
213 spa_config_enter(spa
, SCL_STATE_ALL
, vd
, RW_READER
);
214 mutex_enter(&vd
->vdev_initialize_lock
);
216 if (vd
->vdev_initialize_offset
[txg
& TXG_MASK
] == 0) {
217 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
218 *guid
= vd
->vdev_guid
;
220 /* This is the first write of this txg. */
221 dsl_sync_task_nowait(spa_get_dsl(spa
),
222 vdev_initialize_zap_update_sync
, guid
, 2,
223 ZFS_SPACE_CHECK_RESERVED
, tx
);
227 * We know the vdev struct will still be around since all
228 * consumers of vdev_free must stop the initialization first.
230 if (vdev_initialize_should_stop(vd
)) {
231 mutex_enter(&vd
->vdev_initialize_io_lock
);
232 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
233 vd
->vdev_initialize_inflight
--;
234 mutex_exit(&vd
->vdev_initialize_io_lock
);
235 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
236 mutex_exit(&vd
->vdev_initialize_lock
);
238 return (SET_ERROR(EINTR
));
240 mutex_exit(&vd
->vdev_initialize_lock
);
242 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = start
+ size
;
243 zio_nowait(zio_write_phys(spa
->spa_txg_zio
[txg
& TXG_MASK
], vd
, start
,
244 size
, data
, ZIO_CHECKSUM_OFF
, vdev_initialize_cb
, NULL
,
245 ZIO_PRIORITY_INITIALIZING
, ZIO_FLAG_CANFAIL
, B_FALSE
));
246 /* vdev_initialize_cb releases SCL_STATE_ALL */
254 * Translate a logical range to the physical range for the specified vdev_t.
255 * This function is initially called with a leaf vdev and will walk each
256 * parent vdev until it reaches a top-level vdev. Once the top-level is
257 * reached the physical range is initialized and the recursive function
258 * begins to unwind. As it unwinds it calls the parent's vdev specific
259 * translation function to do the real conversion.
262 vdev_xlate(vdev_t
*vd
, const range_seg_t
*logical_rs
, range_seg_t
*physical_rs
)
265 * Walk up the vdev tree
267 if (vd
!= vd
->vdev_top
) {
268 vdev_xlate(vd
->vdev_parent
, logical_rs
, physical_rs
);
271 * We've reached the top-level vdev, initialize the
272 * physical range to the logical range and start to
275 physical_rs
->rs_start
= logical_rs
->rs_start
;
276 physical_rs
->rs_end
= logical_rs
->rs_end
;
280 vdev_t
*pvd
= vd
->vdev_parent
;
281 ASSERT3P(pvd
, !=, NULL
);
282 ASSERT3P(pvd
->vdev_ops
->vdev_op_xlate
, !=, NULL
);
285 * As this recursive function unwinds, translate the logical
286 * range into its physical components by calling the
287 * vdev specific translate function.
289 range_seg_t intermediate
= { { { 0, 0 } } };
290 pvd
->vdev_ops
->vdev_op_xlate(vd
, physical_rs
, &intermediate
);
292 physical_rs
->rs_start
= intermediate
.rs_start
;
293 physical_rs
->rs_end
= intermediate
.rs_end
;
297 * Callback to fill each ABD chunk with zfs_initialize_value. len must be
298 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
299 * allocation will guarantee these for us.
303 vdev_initialize_block_fill(void *buf
, size_t len
, void *unused
)
305 ASSERT0(len
% sizeof (uint64_t));
307 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint32_t)) {
308 *(uint32_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
311 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint64_t)) {
312 *(uint64_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
319 vdev_initialize_block_alloc(void)
321 /* Allocate ABD for filler data */
322 abd_t
*data
= abd_alloc_for_io(zfs_initialize_chunk_size
, B_FALSE
);
324 ASSERT0(zfs_initialize_chunk_size
% sizeof (uint64_t));
325 (void) abd_iterate_func(data
, 0, zfs_initialize_chunk_size
,
326 vdev_initialize_block_fill
, NULL
);
332 vdev_initialize_block_free(abd_t
*data
)
338 vdev_initialize_ranges(vdev_t
*vd
, abd_t
*data
)
340 avl_tree_t
*rt
= &vd
->vdev_initialize_tree
->rt_root
;
342 for (range_seg_t
*rs
= avl_first(rt
); rs
!= NULL
;
343 rs
= AVL_NEXT(rt
, rs
)) {
344 uint64_t size
= rs
->rs_end
- rs
->rs_start
;
346 /* Split range into legally-sized physical chunks */
347 uint64_t writes_required
=
348 ((size
- 1) / zfs_initialize_chunk_size
) + 1;
350 for (uint64_t w
= 0; w
< writes_required
; w
++) {
353 error
= vdev_initialize_write(vd
,
354 VDEV_LABEL_START_SIZE
+ rs
->rs_start
+
355 (w
* zfs_initialize_chunk_size
),
356 MIN(size
- (w
* zfs_initialize_chunk_size
),
357 zfs_initialize_chunk_size
), data
);
366 vdev_initialize_ms_load(metaslab_t
*msp
)
368 ASSERT(MUTEX_HELD(&msp
->ms_lock
));
370 metaslab_load_wait(msp
);
372 VERIFY0(metaslab_load(msp
));
376 vdev_initialize_mg_wait(metaslab_group_t
*mg
)
378 ASSERT(MUTEX_HELD(&mg
->mg_ms_initialize_lock
));
379 while (mg
->mg_initialize_updating
) {
380 cv_wait(&mg
->mg_ms_initialize_cv
, &mg
->mg_ms_initialize_lock
);
385 vdev_initialize_mg_mark(metaslab_group_t
*mg
)
387 ASSERT(MUTEX_HELD(&mg
->mg_ms_initialize_lock
));
388 ASSERT(mg
->mg_initialize_updating
);
390 while (mg
->mg_ms_initializing
>= max_initialize_ms
) {
391 cv_wait(&mg
->mg_ms_initialize_cv
, &mg
->mg_ms_initialize_lock
);
393 mg
->mg_ms_initializing
++;
394 ASSERT3U(mg
->mg_ms_initializing
, <=, max_initialize_ms
);
398 * Mark the metaslab as being initialized to prevent any allocations
399 * on this metaslab. We must also track how many metaslabs are currently
400 * being initialized within a metaslab group and limit them to prevent
401 * allocation failures from occurring because all metaslabs are being
405 vdev_initialize_ms_mark(metaslab_t
*msp
)
407 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
408 metaslab_group_t
*mg
= msp
->ms_group
;
410 mutex_enter(&mg
->mg_ms_initialize_lock
);
413 * To keep an accurate count of how many threads are initializing
414 * a specific metaslab group, we only allow one thread to mark
415 * the metaslab group at a time. This ensures that the value of
416 * ms_initializing will be accurate when we decide to mark a metaslab
417 * group as being initialized. To do this we force all other threads
418 * to wait till the metaslab's mg_initialize_updating flag is no
421 vdev_initialize_mg_wait(mg
);
422 mg
->mg_initialize_updating
= B_TRUE
;
423 if (msp
->ms_initializing
== 0) {
424 vdev_initialize_mg_mark(mg
);
426 mutex_enter(&msp
->ms_lock
);
427 msp
->ms_initializing
++;
428 mutex_exit(&msp
->ms_lock
);
430 mg
->mg_initialize_updating
= B_FALSE
;
431 cv_broadcast(&mg
->mg_ms_initialize_cv
);
432 mutex_exit(&mg
->mg_ms_initialize_lock
);
436 vdev_initialize_ms_unmark(metaslab_t
*msp
)
438 ASSERT(!MUTEX_HELD(&msp
->ms_lock
));
439 metaslab_group_t
*mg
= msp
->ms_group
;
440 mutex_enter(&mg
->mg_ms_initialize_lock
);
441 mutex_enter(&msp
->ms_lock
);
442 if (--msp
->ms_initializing
== 0) {
443 mg
->mg_ms_initializing
--;
444 cv_broadcast(&mg
->mg_ms_initialize_cv
);
446 mutex_exit(&msp
->ms_lock
);
447 mutex_exit(&mg
->mg_ms_initialize_lock
);
451 vdev_initialize_calculate_progress(vdev_t
*vd
)
453 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
454 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
455 ASSERT(vd
->vdev_leaf_zap
!= 0);
457 vd
->vdev_initialize_bytes_est
= 0;
458 vd
->vdev_initialize_bytes_done
= 0;
460 for (uint64_t i
= 0; i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
461 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
462 mutex_enter(&msp
->ms_lock
);
464 uint64_t ms_free
= msp
->ms_size
-
465 space_map_allocated(msp
->ms_sm
);
467 if (vd
->vdev_top
->vdev_ops
== &vdev_raidz_ops
)
468 ms_free
/= vd
->vdev_top
->vdev_children
;
471 * Convert the metaslab range to a physical range
472 * on our vdev. We use this to determine if we are
473 * in the middle of this metaslab range.
475 range_seg_t logical_rs
, physical_rs
;
476 logical_rs
.rs_start
= msp
->ms_start
;
477 logical_rs
.rs_end
= msp
->ms_start
+ msp
->ms_size
;
478 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
480 if (vd
->vdev_initialize_last_offset
<= physical_rs
.rs_start
) {
481 vd
->vdev_initialize_bytes_est
+= ms_free
;
482 mutex_exit(&msp
->ms_lock
);
484 } else if (vd
->vdev_initialize_last_offset
>
485 physical_rs
.rs_end
) {
486 vd
->vdev_initialize_bytes_done
+= ms_free
;
487 vd
->vdev_initialize_bytes_est
+= ms_free
;
488 mutex_exit(&msp
->ms_lock
);
493 * If we get here, we're in the middle of initializing this
494 * metaslab. Load it and walk the free tree for more accurate
495 * progress estimation.
497 vdev_initialize_ms_load(msp
);
499 for (range_seg_t
*rs
= avl_first(&msp
->ms_allocatable
->rt_root
);
500 rs
; rs
= AVL_NEXT(&msp
->ms_allocatable
->rt_root
, rs
)) {
501 logical_rs
.rs_start
= rs
->rs_start
;
502 logical_rs
.rs_end
= rs
->rs_end
;
503 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
505 uint64_t size
= physical_rs
.rs_end
-
506 physical_rs
.rs_start
;
507 vd
->vdev_initialize_bytes_est
+= size
;
508 if (vd
->vdev_initialize_last_offset
>
509 physical_rs
.rs_end
) {
510 vd
->vdev_initialize_bytes_done
+= size
;
511 } else if (vd
->vdev_initialize_last_offset
>
512 physical_rs
.rs_start
&&
513 vd
->vdev_initialize_last_offset
<
514 physical_rs
.rs_end
) {
515 vd
->vdev_initialize_bytes_done
+=
516 vd
->vdev_initialize_last_offset
-
517 physical_rs
.rs_start
;
520 mutex_exit(&msp
->ms_lock
);
525 vdev_initialize_load(vdev_t
*vd
)
528 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
529 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
530 ASSERT(vd
->vdev_leaf_zap
!= 0);
532 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_ACTIVE
||
533 vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
) {
534 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
535 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
536 sizeof (vd
->vdev_initialize_last_offset
), 1,
537 &vd
->vdev_initialize_last_offset
);
539 vd
->vdev_initialize_last_offset
= 0;
544 vdev_initialize_calculate_progress(vd
);
550 * Convert the logical range into a physcial range and add it to our
554 vdev_initialize_range_add(void *arg
, uint64_t start
, uint64_t size
)
557 range_seg_t logical_rs
, physical_rs
;
558 logical_rs
.rs_start
= start
;
559 logical_rs
.rs_end
= start
+ size
;
561 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
562 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
564 IMPLY(vd
->vdev_top
== vd
,
565 logical_rs
.rs_start
== physical_rs
.rs_start
);
566 IMPLY(vd
->vdev_top
== vd
,
567 logical_rs
.rs_end
== physical_rs
.rs_end
);
569 /* Only add segments that we have not visited yet */
570 if (physical_rs
.rs_end
<= vd
->vdev_initialize_last_offset
)
573 /* Pick up where we left off mid-range. */
574 if (vd
->vdev_initialize_last_offset
> physical_rs
.rs_start
) {
575 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
576 "(%llu, %llu)", vd
->vdev_path
,
577 (u_longlong_t
)physical_rs
.rs_start
,
578 (u_longlong_t
)physical_rs
.rs_end
,
579 (u_longlong_t
)vd
->vdev_initialize_last_offset
,
580 (u_longlong_t
)physical_rs
.rs_end
);
581 ASSERT3U(physical_rs
.rs_end
, >,
582 vd
->vdev_initialize_last_offset
);
583 physical_rs
.rs_start
= vd
->vdev_initialize_last_offset
;
585 ASSERT3U(physical_rs
.rs_end
, >=, physical_rs
.rs_start
);
588 * With raidz, it's possible that the logical range does not live on
589 * this leaf vdev. We only add the physical range to this vdev's if it
590 * has a length greater than 0.
592 if (physical_rs
.rs_end
> physical_rs
.rs_start
) {
593 range_tree_add(vd
->vdev_initialize_tree
, physical_rs
.rs_start
,
594 physical_rs
.rs_end
- physical_rs
.rs_start
);
596 ASSERT3U(physical_rs
.rs_end
, ==, physical_rs
.rs_start
);
601 vdev_initialize_thread(void *arg
)
604 spa_t
*spa
= vd
->vdev_spa
;
606 uint64_t ms_count
= 0;
608 ASSERT(vdev_is_concrete(vd
));
609 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
611 vd
->vdev_initialize_last_offset
= 0;
612 VERIFY0(vdev_initialize_load(vd
));
614 abd_t
*deadbeef
= vdev_initialize_block_alloc();
616 vd
->vdev_initialize_tree
= range_tree_create(NULL
, NULL
);
618 for (uint64_t i
= 0; !vd
->vdev_detached
&&
619 i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
620 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
623 * If we've expanded the top-level vdev or it's our
624 * first pass, calculate our progress.
626 if (vd
->vdev_top
->vdev_ms_count
!= ms_count
) {
627 vdev_initialize_calculate_progress(vd
);
628 ms_count
= vd
->vdev_top
->vdev_ms_count
;
631 vdev_initialize_ms_mark(msp
);
632 mutex_enter(&msp
->ms_lock
);
633 vdev_initialize_ms_load(msp
);
635 range_tree_walk(msp
->ms_allocatable
, vdev_initialize_range_add
,
637 mutex_exit(&msp
->ms_lock
);
639 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
640 error
= vdev_initialize_ranges(vd
, deadbeef
);
641 vdev_initialize_ms_unmark(msp
);
642 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
644 range_tree_vacate(vd
->vdev_initialize_tree
, NULL
, NULL
);
649 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
650 mutex_enter(&vd
->vdev_initialize_io_lock
);
651 while (vd
->vdev_initialize_inflight
> 0) {
652 cv_wait(&vd
->vdev_initialize_io_cv
,
653 &vd
->vdev_initialize_io_lock
);
655 mutex_exit(&vd
->vdev_initialize_io_lock
);
657 range_tree_destroy(vd
->vdev_initialize_tree
);
658 vdev_initialize_block_free(deadbeef
);
659 vd
->vdev_initialize_tree
= NULL
;
661 mutex_enter(&vd
->vdev_initialize_lock
);
662 if (!vd
->vdev_initialize_exit_wanted
&& vdev_writeable(vd
)) {
663 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_COMPLETE
);
665 ASSERT(vd
->vdev_initialize_thread
!= NULL
||
666 vd
->vdev_initialize_inflight
== 0);
669 * Drop the vdev_initialize_lock while we sync out the
670 * txg since it's possible that a device might be trying to
671 * come online and must check to see if it needs to restart an
672 * initialization. That thread will be holding the spa_config_lock
673 * which would prevent the txg_wait_synced from completing.
675 mutex_exit(&vd
->vdev_initialize_lock
);
676 txg_wait_synced(spa_get_dsl(spa
), 0);
677 mutex_enter(&vd
->vdev_initialize_lock
);
679 vd
->vdev_initialize_thread
= NULL
;
680 cv_broadcast(&vd
->vdev_initialize_cv
);
681 mutex_exit(&vd
->vdev_initialize_lock
);
685 * Initiates a device. Caller must hold vdev_initialize_lock.
686 * Device must be a leaf and not already be initializing.
689 vdev_initialize(vdev_t
*vd
)
691 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
692 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
693 ASSERT(vdev_is_concrete(vd
));
694 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
695 ASSERT(!vd
->vdev_detached
);
696 ASSERT(!vd
->vdev_initialize_exit_wanted
);
697 ASSERT(!vd
->vdev_top
->vdev_removing
);
699 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_ACTIVE
);
700 vd
->vdev_initialize_thread
= thread_create(NULL
, 0,
701 vdev_initialize_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
705 * Stop initializng a device, with the resultant initialing state being
706 * tgt_state. Blocks until the initializing thread has exited.
707 * Caller must hold vdev_initialize_lock and must not be writing to the spa
708 * config, as the initializing thread may try to enter the config as a reader
712 vdev_initialize_stop(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
714 ASSERTV(spa_t
*spa
= vd
->vdev_spa
);
715 ASSERT(!spa_config_held(spa
, SCL_CONFIG
| SCL_STATE
, RW_WRITER
));
717 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
718 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
719 ASSERT(vdev_is_concrete(vd
));
722 * Allow cancel requests to proceed even if the initialize thread
725 if (vd
->vdev_initialize_thread
== NULL
&&
726 tgt_state
!= VDEV_INITIALIZE_CANCELED
) {
730 vdev_initialize_change_state(vd
, tgt_state
);
731 vd
->vdev_initialize_exit_wanted
= B_TRUE
;
732 while (vd
->vdev_initialize_thread
!= NULL
)
733 cv_wait(&vd
->vdev_initialize_cv
, &vd
->vdev_initialize_lock
);
735 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
736 vd
->vdev_initialize_exit_wanted
= B_FALSE
;
740 vdev_initialize_stop_all_impl(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
742 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_is_concrete(vd
)) {
743 mutex_enter(&vd
->vdev_initialize_lock
);
744 vdev_initialize_stop(vd
, tgt_state
);
745 mutex_exit(&vd
->vdev_initialize_lock
);
749 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
750 vdev_initialize_stop_all_impl(vd
->vdev_child
[i
], tgt_state
);
755 * Convenience function to stop initializing of a vdev tree and set all
756 * initialize thread pointers to NULL.
759 vdev_initialize_stop_all(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
761 vdev_initialize_stop_all_impl(vd
, tgt_state
);
763 if (vd
->vdev_spa
->spa_sync_on
) {
764 /* Make sure that our state has been synced to disk */
765 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
770 vdev_initialize_restart(vdev_t
*vd
)
772 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
773 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
775 if (vd
->vdev_leaf_zap
!= 0) {
776 mutex_enter(&vd
->vdev_initialize_lock
);
777 uint64_t initialize_state
= VDEV_INITIALIZE_NONE
;
778 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
779 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_STATE
,
780 sizeof (initialize_state
), 1, &initialize_state
);
781 ASSERT(err
== 0 || err
== ENOENT
);
782 vd
->vdev_initialize_state
= initialize_state
;
784 uint64_t timestamp
= 0;
785 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
786 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
,
787 sizeof (timestamp
), 1, ×tamp
);
788 ASSERT(err
== 0 || err
== ENOENT
);
789 vd
->vdev_initialize_action_time
= (time_t)timestamp
;
791 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
||
793 /* load progress for reporting, but don't resume */
794 VERIFY0(vdev_initialize_load(vd
));
795 } else if (vd
->vdev_initialize_state
==
796 VDEV_INITIALIZE_ACTIVE
&& vdev_writeable(vd
)) {
800 mutex_exit(&vd
->vdev_initialize_lock
);
803 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
804 vdev_initialize_restart(vd
->vdev_child
[i
]);
809 EXPORT_SYMBOL(vdev_initialize_restart
);
810 EXPORT_SYMBOL(vdev_xlate
);
811 EXPORT_SYMBOL(vdev_initialize_stop_all
);
812 EXPORT_SYMBOL(vdev_initialize
);
813 EXPORT_SYMBOL(vdev_initialize_stop
);
816 module_param(zfs_initialize_value
, ulong
, 0644);
817 MODULE_PARM_DESC(zfs_initialize_value
,
818 "Value written during zpool initialize");