4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2016, 2019 by Delphix. All rights reserved.
27 #include <sys/spa_impl.h>
29 #include <sys/vdev_impl.h>
30 #include <sys/refcount.h>
31 #include <sys/metaslab_impl.h>
32 #include <sys/dsl_synctask.h>
34 #include <sys/dmu_tx.h>
35 #include <sys/vdev_initialize.h>
38 * Value that is written to disk during initialization.
41 unsigned long zfs_initialize_value
= 0xdeadbeefUL
;
43 unsigned long zfs_initialize_value
= 0xdeadbeefdeadbeeeULL
;
46 /* maximum number of I/Os outstanding per leaf vdev */
47 int zfs_initialize_limit
= 1;
49 /* size of initializing writes; default 1MiB, see zfs_remove_max_segment */
50 unsigned long zfs_initialize_chunk_size
= 1024 * 1024;
53 vdev_initialize_should_stop(vdev_t
*vd
)
55 return (vd
->vdev_initialize_exit_wanted
|| !vdev_writeable(vd
) ||
56 vd
->vdev_detached
|| vd
->vdev_top
->vdev_removing
);
60 vdev_initialize_zap_update_sync(void *arg
, dmu_tx_t
*tx
)
63 * We pass in the guid instead of the vdev_t since the vdev may
64 * have been freed prior to the sync task being processed. This
65 * happens when a vdev is detached as we call spa_config_vdev_exit(),
66 * stop the initializing thread, schedule the sync task, and free
67 * the vdev. Later when the scheduled sync task is invoked, it would
68 * find that the vdev has been freed.
70 uint64_t guid
= *(uint64_t *)arg
;
71 uint64_t txg
= dmu_tx_get_txg(tx
);
72 kmem_free(arg
, sizeof (uint64_t));
74 vdev_t
*vd
= spa_lookup_by_guid(tx
->tx_pool
->dp_spa
, guid
, B_FALSE
);
75 if (vd
== NULL
|| vd
->vdev_top
->vdev_removing
|| !vdev_is_concrete(vd
))
78 uint64_t last_offset
= vd
->vdev_initialize_offset
[txg
& TXG_MASK
];
79 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = 0;
81 VERIFY(vd
->vdev_leaf_zap
!= 0);
83 objset_t
*mos
= vd
->vdev_spa
->spa_meta_objset
;
85 if (last_offset
> 0) {
86 vd
->vdev_initialize_last_offset
= last_offset
;
87 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
88 VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
89 sizeof (last_offset
), 1, &last_offset
, tx
));
91 if (vd
->vdev_initialize_action_time
> 0) {
92 uint64_t val
= (uint64_t)vd
->vdev_initialize_action_time
;
93 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
94 VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
, sizeof (val
),
98 uint64_t initialize_state
= vd
->vdev_initialize_state
;
99 VERIFY0(zap_update(mos
, vd
->vdev_leaf_zap
,
100 VDEV_LEAF_ZAP_INITIALIZE_STATE
, sizeof (initialize_state
), 1,
101 &initialize_state
, tx
));
105 vdev_initialize_change_state(vdev_t
*vd
, vdev_initializing_state_t new_state
)
107 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
108 spa_t
*spa
= vd
->vdev_spa
;
110 if (new_state
== vd
->vdev_initialize_state
)
114 * Copy the vd's guid, this will be freed by the sync task.
116 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
117 *guid
= vd
->vdev_guid
;
120 * If we're suspending, then preserving the original start time.
122 if (vd
->vdev_initialize_state
!= VDEV_INITIALIZE_SUSPENDED
) {
123 vd
->vdev_initialize_action_time
= gethrestime_sec();
125 vd
->vdev_initialize_state
= new_state
;
127 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
128 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
129 dsl_sync_task_nowait(spa_get_dsl(spa
), vdev_initialize_zap_update_sync
,
130 guid
, 2, ZFS_SPACE_CHECK_NONE
, tx
);
133 case VDEV_INITIALIZE_ACTIVE
:
134 spa_history_log_internal(spa
, "initialize", tx
,
135 "vdev=%s activated", vd
->vdev_path
);
137 case VDEV_INITIALIZE_SUSPENDED
:
138 spa_history_log_internal(spa
, "initialize", tx
,
139 "vdev=%s suspended", vd
->vdev_path
);
141 case VDEV_INITIALIZE_CANCELED
:
142 spa_history_log_internal(spa
, "initialize", tx
,
143 "vdev=%s canceled", vd
->vdev_path
);
145 case VDEV_INITIALIZE_COMPLETE
:
146 spa_history_log_internal(spa
, "initialize", tx
,
147 "vdev=%s complete", vd
->vdev_path
);
150 panic("invalid state %llu", (unsigned long long)new_state
);
155 if (new_state
!= VDEV_INITIALIZE_ACTIVE
)
156 spa_notify_waiters(spa
);
160 vdev_initialize_cb(zio_t
*zio
)
162 vdev_t
*vd
= zio
->io_vd
;
163 mutex_enter(&vd
->vdev_initialize_io_lock
);
164 if (zio
->io_error
== ENXIO
&& !vdev_writeable(vd
)) {
166 * The I/O failed because the vdev was unavailable; roll the
167 * last offset back. (This works because spa_sync waits on
168 * spa_txg_zio before it runs sync tasks.)
171 &vd
->vdev_initialize_offset
[zio
->io_txg
& TXG_MASK
];
172 *off
= MIN(*off
, zio
->io_offset
);
175 * Since initializing is best-effort, we ignore I/O errors and
176 * rely on vdev_probe to determine if the errors are more
179 if (zio
->io_error
!= 0)
180 vd
->vdev_stat
.vs_initialize_errors
++;
182 vd
->vdev_initialize_bytes_done
+= zio
->io_orig_size
;
184 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
185 vd
->vdev_initialize_inflight
--;
186 cv_broadcast(&vd
->vdev_initialize_io_cv
);
187 mutex_exit(&vd
->vdev_initialize_io_lock
);
189 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
192 /* Takes care of physical writing and limiting # of concurrent ZIOs. */
194 vdev_initialize_write(vdev_t
*vd
, uint64_t start
, uint64_t size
, abd_t
*data
)
196 spa_t
*spa
= vd
->vdev_spa
;
198 /* Limit inflight initializing I/Os */
199 mutex_enter(&vd
->vdev_initialize_io_lock
);
200 while (vd
->vdev_initialize_inflight
>= zfs_initialize_limit
) {
201 cv_wait(&vd
->vdev_initialize_io_cv
,
202 &vd
->vdev_initialize_io_lock
);
204 vd
->vdev_initialize_inflight
++;
205 mutex_exit(&vd
->vdev_initialize_io_lock
);
207 dmu_tx_t
*tx
= dmu_tx_create_dd(spa_get_dsl(spa
)->dp_mos_dir
);
208 VERIFY0(dmu_tx_assign(tx
, TXG_WAIT
));
209 uint64_t txg
= dmu_tx_get_txg(tx
);
211 spa_config_enter(spa
, SCL_STATE_ALL
, vd
, RW_READER
);
212 mutex_enter(&vd
->vdev_initialize_lock
);
214 if (vd
->vdev_initialize_offset
[txg
& TXG_MASK
] == 0) {
215 uint64_t *guid
= kmem_zalloc(sizeof (uint64_t), KM_SLEEP
);
216 *guid
= vd
->vdev_guid
;
218 /* This is the first write of this txg. */
219 dsl_sync_task_nowait(spa_get_dsl(spa
),
220 vdev_initialize_zap_update_sync
, guid
, 2,
221 ZFS_SPACE_CHECK_RESERVED
, tx
);
225 * We know the vdev struct will still be around since all
226 * consumers of vdev_free must stop the initialization first.
228 if (vdev_initialize_should_stop(vd
)) {
229 mutex_enter(&vd
->vdev_initialize_io_lock
);
230 ASSERT3U(vd
->vdev_initialize_inflight
, >, 0);
231 vd
->vdev_initialize_inflight
--;
232 mutex_exit(&vd
->vdev_initialize_io_lock
);
233 spa_config_exit(vd
->vdev_spa
, SCL_STATE_ALL
, vd
);
234 mutex_exit(&vd
->vdev_initialize_lock
);
236 return (SET_ERROR(EINTR
));
238 mutex_exit(&vd
->vdev_initialize_lock
);
240 vd
->vdev_initialize_offset
[txg
& TXG_MASK
] = start
+ size
;
241 zio_nowait(zio_write_phys(spa
->spa_txg_zio
[txg
& TXG_MASK
], vd
, start
,
242 size
, data
, ZIO_CHECKSUM_OFF
, vdev_initialize_cb
, NULL
,
243 ZIO_PRIORITY_INITIALIZING
, ZIO_FLAG_CANFAIL
, B_FALSE
));
244 /* vdev_initialize_cb releases SCL_STATE_ALL */
252 * Callback to fill each ABD chunk with zfs_initialize_value. len must be
253 * divisible by sizeof (uint64_t), and buf must be 8-byte aligned. The ABD
254 * allocation will guarantee these for us.
258 vdev_initialize_block_fill(void *buf
, size_t len
, void *unused
)
260 ASSERT0(len
% sizeof (uint64_t));
262 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint32_t)) {
263 *(uint32_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
266 for (uint64_t i
= 0; i
< len
; i
+= sizeof (uint64_t)) {
267 *(uint64_t *)((char *)(buf
) + i
) = zfs_initialize_value
;
274 vdev_initialize_block_alloc(void)
276 /* Allocate ABD for filler data */
277 abd_t
*data
= abd_alloc_for_io(zfs_initialize_chunk_size
, B_FALSE
);
279 ASSERT0(zfs_initialize_chunk_size
% sizeof (uint64_t));
280 (void) abd_iterate_func(data
, 0, zfs_initialize_chunk_size
,
281 vdev_initialize_block_fill
, NULL
);
287 vdev_initialize_block_free(abd_t
*data
)
293 vdev_initialize_ranges(vdev_t
*vd
, abd_t
*data
)
295 range_tree_t
*rt
= vd
->vdev_initialize_tree
;
296 zfs_btree_t
*bt
= &rt
->rt_root
;
297 zfs_btree_index_t where
;
299 for (range_seg_t
*rs
= zfs_btree_first(bt
, &where
); rs
!= NULL
;
300 rs
= zfs_btree_next(bt
, &where
, &where
)) {
301 uint64_t size
= rs_get_end(rs
, rt
) - rs_get_start(rs
, rt
);
303 /* Split range into legally-sized physical chunks */
304 uint64_t writes_required
=
305 ((size
- 1) / zfs_initialize_chunk_size
) + 1;
307 for (uint64_t w
= 0; w
< writes_required
; w
++) {
310 error
= vdev_initialize_write(vd
,
311 VDEV_LABEL_START_SIZE
+ rs_get_start(rs
, rt
) +
312 (w
* zfs_initialize_chunk_size
),
313 MIN(size
- (w
* zfs_initialize_chunk_size
),
314 zfs_initialize_chunk_size
), data
);
323 vdev_initialize_calculate_progress(vdev_t
*vd
)
325 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
326 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
327 ASSERT(vd
->vdev_leaf_zap
!= 0);
329 vd
->vdev_initialize_bytes_est
= 0;
330 vd
->vdev_initialize_bytes_done
= 0;
332 for (uint64_t i
= 0; i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
333 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
334 mutex_enter(&msp
->ms_lock
);
336 uint64_t ms_free
= msp
->ms_size
-
337 metaslab_allocated_space(msp
);
339 if (vd
->vdev_top
->vdev_ops
== &vdev_raidz_ops
)
340 ms_free
/= vd
->vdev_top
->vdev_children
;
343 * Convert the metaslab range to a physical range
344 * on our vdev. We use this to determine if we are
345 * in the middle of this metaslab range.
347 range_seg64_t logical_rs
, physical_rs
;
348 logical_rs
.rs_start
= msp
->ms_start
;
349 logical_rs
.rs_end
= msp
->ms_start
+ msp
->ms_size
;
350 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
352 if (vd
->vdev_initialize_last_offset
<= physical_rs
.rs_start
) {
353 vd
->vdev_initialize_bytes_est
+= ms_free
;
354 mutex_exit(&msp
->ms_lock
);
356 } else if (vd
->vdev_initialize_last_offset
>
357 physical_rs
.rs_end
) {
358 vd
->vdev_initialize_bytes_done
+= ms_free
;
359 vd
->vdev_initialize_bytes_est
+= ms_free
;
360 mutex_exit(&msp
->ms_lock
);
365 * If we get here, we're in the middle of initializing this
366 * metaslab. Load it and walk the free tree for more accurate
367 * progress estimation.
369 VERIFY0(metaslab_load(msp
));
371 zfs_btree_index_t where
;
372 range_tree_t
*rt
= msp
->ms_allocatable
;
373 for (range_seg_t
*rs
=
374 zfs_btree_first(&rt
->rt_root
, &where
); rs
;
375 rs
= zfs_btree_next(&rt
->rt_root
, &where
,
377 logical_rs
.rs_start
= rs_get_start(rs
, rt
);
378 logical_rs
.rs_end
= rs_get_end(rs
, rt
);
379 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
381 uint64_t size
= physical_rs
.rs_end
-
382 physical_rs
.rs_start
;
383 vd
->vdev_initialize_bytes_est
+= size
;
384 if (vd
->vdev_initialize_last_offset
>
385 physical_rs
.rs_end
) {
386 vd
->vdev_initialize_bytes_done
+= size
;
387 } else if (vd
->vdev_initialize_last_offset
>
388 physical_rs
.rs_start
&&
389 vd
->vdev_initialize_last_offset
<
390 physical_rs
.rs_end
) {
391 vd
->vdev_initialize_bytes_done
+=
392 vd
->vdev_initialize_last_offset
-
393 physical_rs
.rs_start
;
396 mutex_exit(&msp
->ms_lock
);
401 vdev_initialize_load(vdev_t
*vd
)
404 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_READER
) ||
405 spa_config_held(vd
->vdev_spa
, SCL_CONFIG
, RW_WRITER
));
406 ASSERT(vd
->vdev_leaf_zap
!= 0);
408 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_ACTIVE
||
409 vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
) {
410 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
411 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_LAST_OFFSET
,
412 sizeof (vd
->vdev_initialize_last_offset
), 1,
413 &vd
->vdev_initialize_last_offset
);
415 vd
->vdev_initialize_last_offset
= 0;
420 vdev_initialize_calculate_progress(vd
);
425 * Convert the logical range into a physical range and add it to our
429 vdev_initialize_range_add(void *arg
, uint64_t start
, uint64_t size
)
432 range_seg64_t logical_rs
, physical_rs
;
433 logical_rs
.rs_start
= start
;
434 logical_rs
.rs_end
= start
+ size
;
436 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
437 vdev_xlate(vd
, &logical_rs
, &physical_rs
);
439 IMPLY(vd
->vdev_top
== vd
,
440 logical_rs
.rs_start
== physical_rs
.rs_start
);
441 IMPLY(vd
->vdev_top
== vd
,
442 logical_rs
.rs_end
== physical_rs
.rs_end
);
444 /* Only add segments that we have not visited yet */
445 if (physical_rs
.rs_end
<= vd
->vdev_initialize_last_offset
)
448 /* Pick up where we left off mid-range. */
449 if (vd
->vdev_initialize_last_offset
> physical_rs
.rs_start
) {
450 zfs_dbgmsg("range write: vd %s changed (%llu, %llu) to "
451 "(%llu, %llu)", vd
->vdev_path
,
452 (u_longlong_t
)physical_rs
.rs_start
,
453 (u_longlong_t
)physical_rs
.rs_end
,
454 (u_longlong_t
)vd
->vdev_initialize_last_offset
,
455 (u_longlong_t
)physical_rs
.rs_end
);
456 ASSERT3U(physical_rs
.rs_end
, >,
457 vd
->vdev_initialize_last_offset
);
458 physical_rs
.rs_start
= vd
->vdev_initialize_last_offset
;
460 ASSERT3U(physical_rs
.rs_end
, >=, physical_rs
.rs_start
);
463 * With raidz, it's possible that the logical range does not live on
464 * this leaf vdev. We only add the physical range to this vdev's if it
465 * has a length greater than 0.
467 if (physical_rs
.rs_end
> physical_rs
.rs_start
) {
468 range_tree_add(vd
->vdev_initialize_tree
, physical_rs
.rs_start
,
469 physical_rs
.rs_end
- physical_rs
.rs_start
);
471 ASSERT3U(physical_rs
.rs_end
, ==, physical_rs
.rs_start
);
476 vdev_initialize_thread(void *arg
)
479 spa_t
*spa
= vd
->vdev_spa
;
481 uint64_t ms_count
= 0;
483 ASSERT(vdev_is_concrete(vd
));
484 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
486 vd
->vdev_initialize_last_offset
= 0;
487 VERIFY0(vdev_initialize_load(vd
));
489 abd_t
*deadbeef
= vdev_initialize_block_alloc();
491 vd
->vdev_initialize_tree
= range_tree_create(NULL
, RANGE_SEG64
, NULL
,
494 for (uint64_t i
= 0; !vd
->vdev_detached
&&
495 i
< vd
->vdev_top
->vdev_ms_count
; i
++) {
496 metaslab_t
*msp
= vd
->vdev_top
->vdev_ms
[i
];
497 boolean_t unload_when_done
= B_FALSE
;
500 * If we've expanded the top-level vdev or it's our
501 * first pass, calculate our progress.
503 if (vd
->vdev_top
->vdev_ms_count
!= ms_count
) {
504 vdev_initialize_calculate_progress(vd
);
505 ms_count
= vd
->vdev_top
->vdev_ms_count
;
508 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
509 metaslab_disable(msp
);
510 mutex_enter(&msp
->ms_lock
);
511 if (!msp
->ms_loaded
&& !msp
->ms_loading
)
512 unload_when_done
= B_TRUE
;
513 VERIFY0(metaslab_load(msp
));
515 range_tree_walk(msp
->ms_allocatable
, vdev_initialize_range_add
,
517 mutex_exit(&msp
->ms_lock
);
519 error
= vdev_initialize_ranges(vd
, deadbeef
);
520 metaslab_enable(msp
, B_TRUE
, unload_when_done
);
521 spa_config_enter(spa
, SCL_CONFIG
, FTAG
, RW_READER
);
523 range_tree_vacate(vd
->vdev_initialize_tree
, NULL
, NULL
);
528 spa_config_exit(spa
, SCL_CONFIG
, FTAG
);
529 mutex_enter(&vd
->vdev_initialize_io_lock
);
530 while (vd
->vdev_initialize_inflight
> 0) {
531 cv_wait(&vd
->vdev_initialize_io_cv
,
532 &vd
->vdev_initialize_io_lock
);
534 mutex_exit(&vd
->vdev_initialize_io_lock
);
536 range_tree_destroy(vd
->vdev_initialize_tree
);
537 vdev_initialize_block_free(deadbeef
);
538 vd
->vdev_initialize_tree
= NULL
;
540 mutex_enter(&vd
->vdev_initialize_lock
);
541 if (!vd
->vdev_initialize_exit_wanted
&& vdev_writeable(vd
)) {
542 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_COMPLETE
);
544 ASSERT(vd
->vdev_initialize_thread
!= NULL
||
545 vd
->vdev_initialize_inflight
== 0);
548 * Drop the vdev_initialize_lock while we sync out the
549 * txg since it's possible that a device might be trying to
550 * come online and must check to see if it needs to restart an
551 * initialization. That thread will be holding the spa_config_lock
552 * which would prevent the txg_wait_synced from completing.
554 mutex_exit(&vd
->vdev_initialize_lock
);
555 txg_wait_synced(spa_get_dsl(spa
), 0);
556 mutex_enter(&vd
->vdev_initialize_lock
);
558 vd
->vdev_initialize_thread
= NULL
;
559 cv_broadcast(&vd
->vdev_initialize_cv
);
560 mutex_exit(&vd
->vdev_initialize_lock
);
564 * Initiates a device. Caller must hold vdev_initialize_lock.
565 * Device must be a leaf and not already be initializing.
568 vdev_initialize(vdev_t
*vd
)
570 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
571 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
572 ASSERT(vdev_is_concrete(vd
));
573 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
574 ASSERT(!vd
->vdev_detached
);
575 ASSERT(!vd
->vdev_initialize_exit_wanted
);
576 ASSERT(!vd
->vdev_top
->vdev_removing
);
578 vdev_initialize_change_state(vd
, VDEV_INITIALIZE_ACTIVE
);
579 vd
->vdev_initialize_thread
= thread_create(NULL
, 0,
580 vdev_initialize_thread
, vd
, 0, &p0
, TS_RUN
, maxclsyspri
);
584 * Wait for the initialize thread to be terminated (cancelled or stopped).
587 vdev_initialize_stop_wait_impl(vdev_t
*vd
)
589 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
591 while (vd
->vdev_initialize_thread
!= NULL
)
592 cv_wait(&vd
->vdev_initialize_cv
, &vd
->vdev_initialize_lock
);
594 ASSERT3P(vd
->vdev_initialize_thread
, ==, NULL
);
595 vd
->vdev_initialize_exit_wanted
= B_FALSE
;
599 * Wait for vdev initialize threads which were either to cleanly exit.
602 vdev_initialize_stop_wait(spa_t
*spa
, list_t
*vd_list
)
606 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
608 while ((vd
= list_remove_head(vd_list
)) != NULL
) {
609 mutex_enter(&vd
->vdev_initialize_lock
);
610 vdev_initialize_stop_wait_impl(vd
);
611 mutex_exit(&vd
->vdev_initialize_lock
);
616 * Stop initializing a device, with the resultant initializing state being
617 * tgt_state. For blocking behavior pass NULL for vd_list. Otherwise, when
618 * a list_t is provided the stopping vdev is inserted in to the list. Callers
619 * are then required to call vdev_initialize_stop_wait() to block for all the
620 * initialization threads to exit. The caller must hold vdev_initialize_lock
621 * and must not be writing to the spa config, as the initializing thread may
622 * try to enter the config as a reader before exiting.
625 vdev_initialize_stop(vdev_t
*vd
, vdev_initializing_state_t tgt_state
,
628 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_CONFIG
|SCL_STATE
, RW_WRITER
));
629 ASSERT(MUTEX_HELD(&vd
->vdev_initialize_lock
));
630 ASSERT(vd
->vdev_ops
->vdev_op_leaf
);
631 ASSERT(vdev_is_concrete(vd
));
634 * Allow cancel requests to proceed even if the initialize thread
637 if (vd
->vdev_initialize_thread
== NULL
&&
638 tgt_state
!= VDEV_INITIALIZE_CANCELED
) {
642 vdev_initialize_change_state(vd
, tgt_state
);
643 vd
->vdev_initialize_exit_wanted
= B_TRUE
;
645 if (vd_list
== NULL
) {
646 vdev_initialize_stop_wait_impl(vd
);
648 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
649 list_insert_tail(vd_list
, vd
);
654 vdev_initialize_stop_all_impl(vdev_t
*vd
, vdev_initializing_state_t tgt_state
,
657 if (vd
->vdev_ops
->vdev_op_leaf
&& vdev_is_concrete(vd
)) {
658 mutex_enter(&vd
->vdev_initialize_lock
);
659 vdev_initialize_stop(vd
, tgt_state
, vd_list
);
660 mutex_exit(&vd
->vdev_initialize_lock
);
664 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
665 vdev_initialize_stop_all_impl(vd
->vdev_child
[i
], tgt_state
,
671 * Convenience function to stop initializing of a vdev tree and set all
672 * initialize thread pointers to NULL.
675 vdev_initialize_stop_all(vdev_t
*vd
, vdev_initializing_state_t tgt_state
)
677 spa_t
*spa
= vd
->vdev_spa
;
680 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
682 list_create(&vd_list
, sizeof (vdev_t
),
683 offsetof(vdev_t
, vdev_initialize_node
));
685 vdev_initialize_stop_all_impl(vd
, tgt_state
, &vd_list
);
686 vdev_initialize_stop_wait(spa
, &vd_list
);
688 if (vd
->vdev_spa
->spa_sync_on
) {
689 /* Make sure that our state has been synced to disk */
690 txg_wait_synced(spa_get_dsl(vd
->vdev_spa
), 0);
693 list_destroy(&vd_list
);
697 vdev_initialize_restart(vdev_t
*vd
)
699 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
700 ASSERT(!spa_config_held(vd
->vdev_spa
, SCL_ALL
, RW_WRITER
));
702 if (vd
->vdev_leaf_zap
!= 0) {
703 mutex_enter(&vd
->vdev_initialize_lock
);
704 uint64_t initialize_state
= VDEV_INITIALIZE_NONE
;
705 int err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
706 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_STATE
,
707 sizeof (initialize_state
), 1, &initialize_state
);
708 ASSERT(err
== 0 || err
== ENOENT
);
709 vd
->vdev_initialize_state
= initialize_state
;
711 uint64_t timestamp
= 0;
712 err
= zap_lookup(vd
->vdev_spa
->spa_meta_objset
,
713 vd
->vdev_leaf_zap
, VDEV_LEAF_ZAP_INITIALIZE_ACTION_TIME
,
714 sizeof (timestamp
), 1, ×tamp
);
715 ASSERT(err
== 0 || err
== ENOENT
);
716 vd
->vdev_initialize_action_time
= (time_t)timestamp
;
718 if (vd
->vdev_initialize_state
== VDEV_INITIALIZE_SUSPENDED
||
720 /* load progress for reporting, but don't resume */
721 VERIFY0(vdev_initialize_load(vd
));
722 } else if (vd
->vdev_initialize_state
==
723 VDEV_INITIALIZE_ACTIVE
&& vdev_writeable(vd
) &&
724 !vd
->vdev_top
->vdev_removing
&&
725 vd
->vdev_initialize_thread
== NULL
) {
729 mutex_exit(&vd
->vdev_initialize_lock
);
732 for (uint64_t i
= 0; i
< vd
->vdev_children
; i
++) {
733 vdev_initialize_restart(vd
->vdev_child
[i
]);
737 EXPORT_SYMBOL(vdev_initialize
);
738 EXPORT_SYMBOL(vdev_initialize_stop
);
739 EXPORT_SYMBOL(vdev_initialize_stop_all
);
740 EXPORT_SYMBOL(vdev_initialize_stop_wait
);
741 EXPORT_SYMBOL(vdev_initialize_restart
);
744 ZFS_MODULE_PARAM(zfs
, zfs_
, initialize_value
, ULONG
, ZMOD_RW
,
745 "Value written during zpool initialize");
747 ZFS_MODULE_PARAM(zfs
, zfs_
, initialize_chunk_size
, ULONG
, ZMOD_RW
,
748 "Size in bytes of writes by zpool initialize");