4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 #include <sys/dataset_kstats.h>
27 #include <sys/dmu_traverse.h>
28 #include <sys/dsl_dataset.h>
29 #include <sys/dsl_prop.h>
30 #include <sys/dsl_dir.h>
32 #include <sys/zfeature.h>
33 #include <sys/zil_impl.h>
34 #include <sys/dmu_tx.h>
36 #include <sys/zfs_rlock.h>
37 #include <sys/spa_impl.h>
39 #include <sys/zvol_impl.h>
42 #include <linux/blkdev_compat.h>
43 #include <linux/task_io_accounting_ops.h>
46 #include <linux/blk-mq.h>
49 static void zvol_request_impl(zvol_state_t
*zv
, struct bio
*bio
,
50 struct request
*rq
, boolean_t force_sync
);
52 static unsigned int zvol_major
= ZVOL_MAJOR
;
53 static unsigned int zvol_request_sync
= 0;
54 static unsigned int zvol_prefetch_bytes
= (128 * 1024);
55 static unsigned long zvol_max_discard_blocks
= 16384;
58 * Switch taskq at multiple of 512 MB offset. This can be set to a lower value
59 * to utilize more threads for small files but may affect prefetch hits.
61 #define ZVOL_TASKQ_OFFSET_SHIFT 29
63 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
64 static unsigned int zvol_open_timeout_ms
= 1000;
67 static unsigned int zvol_threads
= 0;
69 static unsigned int zvol_blk_mq_threads
= 0;
70 static unsigned int zvol_blk_mq_actual_threads
;
71 static boolean_t zvol_use_blk_mq
= B_FALSE
;
74 * The maximum number of volblocksize blocks to process per thread. Typically,
75 * write heavy workloads preform better with higher values here, and read
76 * heavy workloads preform better with lower values, but that's not a hard
77 * and fast rule. It's basically a knob to tune between "less overhead with
78 * less parallelism" and "more overhead, but more parallelism".
80 * '8' was chosen as a reasonable, balanced, default based off of sequential
81 * read and write tests to a zvol in an NVMe pool (with 16 CPUs).
83 static unsigned int zvol_blk_mq_blocks_per_thread
= 8;
86 static unsigned int zvol_num_taskqs
= 0;
88 #ifndef BLKDEV_DEFAULT_RQ
89 /* BLKDEV_MAX_RQ was renamed to BLKDEV_DEFAULT_RQ in the 5.16 kernel */
90 #define BLKDEV_DEFAULT_RQ BLKDEV_MAX_RQ
94 * Finalize our BIO or request.
97 #define END_IO(zv, bio, rq, error) do { \
99 BIO_END_IO(bio, error); \
101 blk_mq_end_request(rq, errno_to_bi_status(error)); \
105 #define END_IO(zv, bio, rq, error) BIO_END_IO(bio, error)
109 static unsigned int zvol_blk_mq_queue_depth
= BLKDEV_DEFAULT_RQ
;
110 static unsigned int zvol_actual_blk_mq_queue_depth
;
113 struct zvol_state_os
{
114 struct gendisk
*zvo_disk
; /* generic disk */
115 struct request_queue
*zvo_queue
; /* request queue */
116 dev_t zvo_dev
; /* device id */
119 struct blk_mq_tag_set tag_set
;
122 /* Set from the global 'zvol_use_blk_mq' at zvol load */
123 boolean_t use_blk_mq
;
126 typedef struct zv_taskq
{
130 static zv_taskq_t zvol_taskqs
;
131 static struct ida zvol_ida
;
133 typedef struct zv_request_stack
{
139 typedef struct zv_work
{
141 struct work_struct work
;
144 typedef struct zv_request_task
{
149 static zv_request_task_t
*
150 zv_request_task_create(zv_request_t zvr
)
152 zv_request_task_t
*task
;
153 task
= kmem_alloc(sizeof (zv_request_task_t
), KM_SLEEP
);
154 taskq_init_ent(&task
->ent
);
160 zv_request_task_free(zv_request_task_t
*task
)
162 kmem_free(task
, sizeof (*task
));
168 * This is called when a new block multiqueue request comes in. A request
169 * contains one or more BIOs.
171 static blk_status_t
zvol_mq_queue_rq(struct blk_mq_hw_ctx
*hctx
,
172 const struct blk_mq_queue_data
*bd
)
174 struct request
*rq
= bd
->rq
;
175 zvol_state_t
*zv
= rq
->q
->queuedata
;
177 /* Tell the kernel that we are starting to process this request */
178 blk_mq_start_request(rq
);
180 if (blk_rq_is_passthrough(rq
)) {
181 /* Skip non filesystem request */
182 blk_mq_end_request(rq
, BLK_STS_IOERR
);
183 return (BLK_STS_IOERR
);
186 zvol_request_impl(zv
, NULL
, rq
, 0);
188 /* Acknowledge to the kernel that we got this request */
192 static struct blk_mq_ops zvol_blk_mq_queue_ops
= {
193 .queue_rq
= zvol_mq_queue_rq
,
196 /* Initialize our blk-mq struct */
197 static int zvol_blk_mq_alloc_tag_set(zvol_state_t
*zv
)
199 struct zvol_state_os
*zso
= zv
->zv_zso
;
201 memset(&zso
->tag_set
, 0, sizeof (zso
->tag_set
));
203 /* Initialize tag set. */
204 zso
->tag_set
.ops
= &zvol_blk_mq_queue_ops
;
205 zso
->tag_set
.nr_hw_queues
= zvol_blk_mq_actual_threads
;
206 zso
->tag_set
.queue_depth
= zvol_actual_blk_mq_queue_depth
;
207 zso
->tag_set
.numa_node
= NUMA_NO_NODE
;
208 zso
->tag_set
.cmd_size
= 0;
211 * We need BLK_MQ_F_BLOCKING here since we do blocking calls in
212 * zvol_request_impl()
214 zso
->tag_set
.flags
= BLK_MQ_F_SHOULD_MERGE
| BLK_MQ_F_BLOCKING
;
215 zso
->tag_set
.driver_data
= zv
;
217 return (blk_mq_alloc_tag_set(&zso
->tag_set
));
219 #endif /* HAVE_BLK_MQ */
222 * Given a path, return TRUE if path is a ZVOL.
225 zvol_os_is_zvol(const char *path
)
229 if (vdev_lookup_bdev(path
, &dev
) != 0)
232 if (MAJOR(dev
) == zvol_major
)
239 zvol_write(zv_request_t
*zvr
)
241 struct bio
*bio
= zvr
->bio
;
242 struct request
*rq
= zvr
->rq
;
245 zvol_state_t
*zv
= zvr
->zv
;
246 struct request_queue
*q
;
247 struct gendisk
*disk
;
248 unsigned long start_time
= 0;
249 boolean_t acct
= B_FALSE
;
251 ASSERT3P(zv
, !=, NULL
);
252 ASSERT3U(zv
->zv_open_count
, >, 0);
253 ASSERT3P(zv
->zv_zilog
, !=, NULL
);
255 q
= zv
->zv_zso
->zvo_queue
;
256 disk
= zv
->zv_zso
->zvo_disk
;
258 /* bio marked as FLUSH need to flush before write */
259 if (io_is_flush(bio
, rq
))
260 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
262 /* Some requests are just for flush and nothing else. */
263 if (io_size(bio
, rq
) == 0) {
264 rw_exit(&zv
->zv_suspend_lock
);
265 END_IO(zv
, bio
, rq
, 0);
269 zfs_uio_bvec_init(&uio
, bio
, rq
);
271 ssize_t start_resid
= uio
.uio_resid
;
274 * With use_blk_mq, accounting is done by blk_mq_start_request()
275 * and blk_mq_end_request(), so we can skip it here.
278 acct
= blk_queue_io_stat(q
);
280 start_time
= blk_generic_start_io_acct(q
, disk
, WRITE
,
286 io_is_fua(bio
, rq
) || zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
;
288 zfs_locked_range_t
*lr
= zfs_rangelock_enter(&zv
->zv_rangelock
,
289 uio
.uio_loffset
, uio
.uio_resid
, RL_WRITER
);
291 uint64_t volsize
= zv
->zv_volsize
;
292 while (uio
.uio_resid
> 0 && uio
.uio_loffset
< volsize
) {
293 uint64_t bytes
= MIN(uio
.uio_resid
, DMU_MAX_ACCESS
>> 1);
294 uint64_t off
= uio
.uio_loffset
;
295 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
297 if (bytes
> volsize
- off
) /* don't write past the end */
298 bytes
= volsize
- off
;
300 dmu_tx_hold_write_by_dnode(tx
, zv
->zv_dn
, off
, bytes
);
302 /* This will only fail for ENOSPC */
303 error
= dmu_tx_assign(tx
, TXG_WAIT
);
308 error
= dmu_write_uio_dnode(zv
->zv_dn
, &uio
, bytes
, tx
);
310 zvol_log_write(zv
, tx
, off
, bytes
, sync
);
317 zfs_rangelock_exit(lr
);
319 int64_t nwritten
= start_resid
- uio
.uio_resid
;
320 dataset_kstats_update_write_kstats(&zv
->zv_kstat
, nwritten
);
321 task_io_account_write(nwritten
);
324 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
326 rw_exit(&zv
->zv_suspend_lock
);
329 blk_generic_end_io_acct(q
, disk
, WRITE
, bio
, start_time
);
332 END_IO(zv
, bio
, rq
, -error
);
336 zvol_write_task(void *arg
)
338 zv_request_task_t
*task
= arg
;
339 zvol_write(&task
->zvr
);
340 zv_request_task_free(task
);
344 zvol_discard(zv_request_t
*zvr
)
346 struct bio
*bio
= zvr
->bio
;
347 struct request
*rq
= zvr
->rq
;
348 zvol_state_t
*zv
= zvr
->zv
;
349 uint64_t start
= io_offset(bio
, rq
);
350 uint64_t size
= io_size(bio
, rq
);
351 uint64_t end
= start
+ size
;
355 struct request_queue
*q
= zv
->zv_zso
->zvo_queue
;
356 struct gendisk
*disk
= zv
->zv_zso
->zvo_disk
;
357 unsigned long start_time
= 0;
358 boolean_t acct
= B_FALSE
;
360 ASSERT3P(zv
, !=, NULL
);
361 ASSERT3U(zv
->zv_open_count
, >, 0);
362 ASSERT3P(zv
->zv_zilog
, !=, NULL
);
365 acct
= blk_queue_io_stat(q
);
367 start_time
= blk_generic_start_io_acct(q
, disk
, WRITE
,
372 sync
= io_is_fua(bio
, rq
) || zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
;
374 if (end
> zv
->zv_volsize
) {
375 error
= SET_ERROR(EIO
);
380 * Align the request to volume block boundaries when a secure erase is
381 * not required. This will prevent dnode_free_range() from zeroing out
382 * the unaligned parts which is slow (read-modify-write) and useless
383 * since we are not freeing any space by doing so.
385 if (!io_is_secure_erase(bio
, rq
)) {
386 start
= P2ROUNDUP(start
, zv
->zv_volblocksize
);
387 end
= P2ALIGN(end
, zv
->zv_volblocksize
);
394 zfs_locked_range_t
*lr
= zfs_rangelock_enter(&zv
->zv_rangelock
,
395 start
, size
, RL_WRITER
);
397 tx
= dmu_tx_create(zv
->zv_objset
);
398 dmu_tx_mark_netfree(tx
);
399 error
= dmu_tx_assign(tx
, TXG_WAIT
);
403 zvol_log_truncate(zv
, tx
, start
, size
);
405 error
= dmu_free_long_range(zv
->zv_objset
,
406 ZVOL_OBJ
, start
, size
);
408 zfs_rangelock_exit(lr
);
410 if (error
== 0 && sync
)
411 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
414 rw_exit(&zv
->zv_suspend_lock
);
417 blk_generic_end_io_acct(q
, disk
, WRITE
, bio
,
421 END_IO(zv
, bio
, rq
, -error
);
425 zvol_discard_task(void *arg
)
427 zv_request_task_t
*task
= arg
;
428 zvol_discard(&task
->zvr
);
429 zv_request_task_free(task
);
433 zvol_read(zv_request_t
*zvr
)
435 struct bio
*bio
= zvr
->bio
;
436 struct request
*rq
= zvr
->rq
;
439 boolean_t acct
= B_FALSE
;
440 zvol_state_t
*zv
= zvr
->zv
;
441 struct request_queue
*q
;
442 struct gendisk
*disk
;
443 unsigned long start_time
= 0;
445 ASSERT3P(zv
, !=, NULL
);
446 ASSERT3U(zv
->zv_open_count
, >, 0);
448 zfs_uio_bvec_init(&uio
, bio
, rq
);
450 q
= zv
->zv_zso
->zvo_queue
;
451 disk
= zv
->zv_zso
->zvo_disk
;
453 ssize_t start_resid
= uio
.uio_resid
;
456 * When blk-mq is being used, accounting is done by
457 * blk_mq_start_request() and blk_mq_end_request().
460 acct
= blk_queue_io_stat(q
);
462 start_time
= blk_generic_start_io_acct(q
, disk
, READ
,
466 zfs_locked_range_t
*lr
= zfs_rangelock_enter(&zv
->zv_rangelock
,
467 uio
.uio_loffset
, uio
.uio_resid
, RL_READER
);
469 uint64_t volsize
= zv
->zv_volsize
;
471 while (uio
.uio_resid
> 0 && uio
.uio_loffset
< volsize
) {
472 uint64_t bytes
= MIN(uio
.uio_resid
, DMU_MAX_ACCESS
>> 1);
474 /* don't read past the end */
475 if (bytes
> volsize
- uio
.uio_loffset
)
476 bytes
= volsize
- uio
.uio_loffset
;
478 error
= dmu_read_uio_dnode(zv
->zv_dn
, &uio
, bytes
);
480 /* convert checksum errors into IO errors */
482 error
= SET_ERROR(EIO
);
486 zfs_rangelock_exit(lr
);
488 int64_t nread
= start_resid
- uio
.uio_resid
;
489 dataset_kstats_update_read_kstats(&zv
->zv_kstat
, nread
);
490 task_io_account_read(nread
);
492 rw_exit(&zv
->zv_suspend_lock
);
495 blk_generic_end_io_acct(q
, disk
, READ
, bio
, start_time
);
498 END_IO(zv
, bio
, rq
, -error
);
502 zvol_read_task(void *arg
)
504 zv_request_task_t
*task
= arg
;
505 zvol_read(&task
->zvr
);
506 zv_request_task_free(task
);
511 * Process a BIO or request
513 * Either 'bio' or 'rq' should be set depending on if we are processing a
514 * bio or a request (both should not be set).
516 * force_sync: Set to 0 to defer processing to a background taskq
517 * Set to 1 to process data synchronously
520 zvol_request_impl(zvol_state_t
*zv
, struct bio
*bio
, struct request
*rq
,
521 boolean_t force_sync
)
523 fstrans_cookie_t cookie
= spl_fstrans_mark();
524 uint64_t offset
= io_offset(bio
, rq
);
525 uint64_t size
= io_size(bio
, rq
);
526 int rw
= io_data_dir(bio
, rq
);
528 if (zvol_request_sync
|| zv
->zv_threading
== B_FALSE
)
537 if (io_has_data(bio
, rq
) && offset
+ size
> zv
->zv_volsize
) {
538 printk(KERN_INFO
"%s: bad access: offset=%llu, size=%lu\n",
539 zv
->zv_zso
->zvo_disk
->disk_name
,
540 (long long unsigned)offset
,
541 (long unsigned)size
);
543 END_IO(zv
, bio
, rq
, -SET_ERROR(EIO
));
547 zv_request_task_t
*task
;
548 zv_taskq_t
*ztqs
= &zvol_taskqs
;
549 uint_t blk_mq_hw_queue
= 0;
554 blk_mq_hw_queue
= rq
->mq_hctx
->queue_num
;
556 taskq_hash
= cityhash4((uintptr_t)zv
, offset
>> ZVOL_TASKQ_OFFSET_SHIFT
,
558 tq_idx
= taskq_hash
% ztqs
->tqs_cnt
;
561 if (unlikely(zv
->zv_flags
& ZVOL_RDONLY
)) {
562 END_IO(zv
, bio
, rq
, -SET_ERROR(EROFS
));
567 * Prevents the zvol from being suspended, or the ZIL being
568 * concurrently opened. Will be released after the i/o
571 rw_enter(&zv
->zv_suspend_lock
, RW_READER
);
574 * Open a ZIL if this is the first time we have written to this
575 * zvol. We protect zv->zv_zilog with zv_suspend_lock rather
576 * than zv_state_lock so that we don't need to acquire an
577 * additional lock in this path.
579 if (zv
->zv_zilog
== NULL
) {
580 rw_exit(&zv
->zv_suspend_lock
);
581 rw_enter(&zv
->zv_suspend_lock
, RW_WRITER
);
582 if (zv
->zv_zilog
== NULL
) {
583 zv
->zv_zilog
= zil_open(zv
->zv_objset
,
584 zvol_get_data
, &zv
->zv_kstat
.dk_zil_sums
);
585 zv
->zv_flags
|= ZVOL_WRITTEN_TO
;
586 /* replay / destroy done in zvol_create_minor */
587 VERIFY0((zv
->zv_zilog
->zl_header
->zh_flags
&
590 rw_downgrade(&zv
->zv_suspend_lock
);
594 * We don't want this thread to be blocked waiting for i/o to
595 * complete, so we instead wait from a taskq callback. The
596 * i/o may be a ZIL write (via zil_commit()), or a read of an
597 * indirect block, or a read of a data block (if this is a
598 * partial-block write). We will indicate that the i/o is
599 * complete by calling END_IO() from the taskq callback.
601 * This design allows the calling thread to continue and
602 * initiate more concurrent operations by calling
603 * zvol_request() again. There are typically only a small
604 * number of threads available to call zvol_request() (e.g.
605 * one per iSCSI target), so keeping the latency of
606 * zvol_request() low is important for performance.
608 * The zvol_request_sync module parameter allows this
609 * behavior to be altered, for performance evaluation
610 * purposes. If the callback blocks, setting
611 * zvol_request_sync=1 will result in much worse performance.
613 * We can have up to zvol_threads concurrent i/o's being
614 * processed for all zvols on the system. This is typically
615 * a vast improvement over the zvol_request_sync=1 behavior
616 * of one i/o at a time per zvol. However, an even better
617 * design would be for zvol_request() to initiate the zio
618 * directly, and then be notified by the zio_done callback,
619 * which would call END_IO(). Unfortunately, the DMU/ZIL
620 * interfaces lack this functionality (they block waiting for
621 * the i/o to complete).
623 if (io_is_discard(bio
, rq
) || io_is_secure_erase(bio
, rq
)) {
627 task
= zv_request_task_create(zvr
);
628 taskq_dispatch_ent(ztqs
->tqs_taskq
[tq_idx
],
629 zvol_discard_task
, task
, 0, &task
->ent
);
635 task
= zv_request_task_create(zvr
);
636 taskq_dispatch_ent(ztqs
->tqs_taskq
[tq_idx
],
637 zvol_write_task
, task
, 0, &task
->ent
);
642 * The SCST driver, and possibly others, may issue READ I/Os
643 * with a length of zero bytes. These empty I/Os contain no
644 * data and require no additional handling.
647 END_IO(zv
, bio
, rq
, 0);
651 rw_enter(&zv
->zv_suspend_lock
, RW_READER
);
653 /* See comment in WRITE case above. */
657 task
= zv_request_task_create(zvr
);
658 taskq_dispatch_ent(ztqs
->tqs_taskq
[tq_idx
],
659 zvol_read_task
, task
, 0, &task
->ent
);
664 spl_fstrans_unmark(cookie
);
667 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
668 #ifdef HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID
670 zvol_submit_bio(struct bio
*bio
)
673 zvol_submit_bio(struct bio
*bio
)
676 static MAKE_REQUEST_FN_RET
677 zvol_request(struct request_queue
*q
, struct bio
*bio
)
680 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
681 #if defined(HAVE_BIO_BDEV_DISK)
682 struct request_queue
*q
= bio
->bi_bdev
->bd_disk
->queue
;
684 struct request_queue
*q
= bio
->bi_disk
->queue
;
687 zvol_state_t
*zv
= q
->queuedata
;
689 zvol_request_impl(zv
, bio
, NULL
, 0);
690 #if defined(HAVE_MAKE_REQUEST_FN_RET_QC) || \
691 defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
692 !defined(HAVE_BDEV_SUBMIT_BIO_RETURNS_VOID)
693 return (BLK_QC_T_NONE
);
698 #ifdef HAVE_BLK_MODE_T
699 zvol_open(struct gendisk
*disk
, blk_mode_t flag
)
701 zvol_open(struct block_device
*bdev
, fmode_t flag
)
706 boolean_t drop_suspend
= B_FALSE
;
707 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
708 hrtime_t timeout
= MSEC2NSEC(zvol_open_timeout_ms
);
709 hrtime_t start
= gethrtime();
713 rw_enter(&zvol_state_lock
, RW_READER
);
715 * Obtain a copy of private_data under the zvol_state_lock to make
716 * sure that either the result of zvol free code path setting
717 * disk->private_data to NULL is observed, or zvol_os_free()
718 * is not called on this zv because of the positive zv_open_count.
720 #ifdef HAVE_BLK_MODE_T
721 zv
= disk
->private_data
;
723 zv
= bdev
->bd_disk
->private_data
;
726 rw_exit(&zvol_state_lock
);
727 return (SET_ERROR(-ENXIO
));
730 mutex_enter(&zv
->zv_state_lock
);
732 * Make sure zvol is not suspended during first open
733 * (hold zv_suspend_lock) and respect proper lock acquisition
734 * ordering - zv_suspend_lock before zv_state_lock
736 if (zv
->zv_open_count
== 0) {
737 if (!rw_tryenter(&zv
->zv_suspend_lock
, RW_READER
)) {
738 mutex_exit(&zv
->zv_state_lock
);
739 rw_enter(&zv
->zv_suspend_lock
, RW_READER
);
740 mutex_enter(&zv
->zv_state_lock
);
741 /* check to see if zv_suspend_lock is needed */
742 if (zv
->zv_open_count
!= 0) {
743 rw_exit(&zv
->zv_suspend_lock
);
745 drop_suspend
= B_TRUE
;
748 drop_suspend
= B_TRUE
;
751 rw_exit(&zvol_state_lock
);
753 ASSERT(MUTEX_HELD(&zv
->zv_state_lock
));
755 if (zv
->zv_open_count
== 0) {
756 boolean_t drop_namespace
= B_FALSE
;
758 ASSERT(RW_READ_HELD(&zv
->zv_suspend_lock
));
761 * In all other call paths the spa_namespace_lock is taken
762 * before the bdev->bd_mutex lock. However, on open(2)
763 * the __blkdev_get() function calls fops->open() with the
764 * bdev->bd_mutex lock held. This can result in a deadlock
765 * when zvols from one pool are used as vdevs in another.
767 * To prevent a lock inversion deadlock we preemptively
768 * take the spa_namespace_lock. Normally the lock will not
769 * be contended and this is safe because spa_open_common()
770 * handles the case where the caller already holds the
771 * spa_namespace_lock.
773 * When the lock cannot be aquired after multiple retries
774 * this must be the vdev on zvol deadlock case and we have
775 * no choice but to return an error. For 5.12 and older
776 * kernels returning -ERESTARTSYS will result in the
777 * bdev->bd_mutex being dropped, then reacquired, and
778 * fops->open() being called again. This process can be
779 * repeated safely until both locks are acquired. For 5.13
780 * and newer the -ERESTARTSYS retry logic was removed from
781 * the kernel so the only option is to return the error for
782 * the caller to handle it.
784 if (!mutex_owned(&spa_namespace_lock
)) {
785 if (!mutex_tryenter(&spa_namespace_lock
)) {
786 mutex_exit(&zv
->zv_state_lock
);
787 rw_exit(&zv
->zv_suspend_lock
);
789 #ifdef HAVE_BLKDEV_GET_ERESTARTSYS
791 return (SET_ERROR(-ERESTARTSYS
));
793 if ((gethrtime() - start
) > timeout
)
794 return (SET_ERROR(-ERESTARTSYS
));
796 schedule_timeout(MSEC_TO_TICK(10));
800 drop_namespace
= B_TRUE
;
804 error
= -zvol_first_open(zv
, !(blk_mode_is_open_write(flag
)));
807 mutex_exit(&spa_namespace_lock
);
811 if ((blk_mode_is_open_write(flag
)) &&
812 (zv
->zv_flags
& ZVOL_RDONLY
)) {
813 if (zv
->zv_open_count
== 0)
816 error
= SET_ERROR(-EROFS
);
822 mutex_exit(&zv
->zv_state_lock
);
824 rw_exit(&zv
->zv_suspend_lock
);
827 #ifdef HAVE_BLK_MODE_T
828 disk_check_media_change(disk
);
830 zfs_check_media_change(bdev
);
837 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG
838 zvol_release(struct gendisk
*disk
)
840 zvol_release(struct gendisk
*disk
, fmode_t unused
)
843 #if !defined(HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_1ARG)
847 boolean_t drop_suspend
= B_TRUE
;
849 rw_enter(&zvol_state_lock
, RW_READER
);
850 zv
= disk
->private_data
;
852 mutex_enter(&zv
->zv_state_lock
);
853 ASSERT3U(zv
->zv_open_count
, >, 0);
855 * make sure zvol is not suspended during last close
856 * (hold zv_suspend_lock) and respect proper lock acquisition
857 * ordering - zv_suspend_lock before zv_state_lock
859 if (zv
->zv_open_count
== 1) {
860 if (!rw_tryenter(&zv
->zv_suspend_lock
, RW_READER
)) {
861 mutex_exit(&zv
->zv_state_lock
);
862 rw_enter(&zv
->zv_suspend_lock
, RW_READER
);
863 mutex_enter(&zv
->zv_state_lock
);
864 /* check to see if zv_suspend_lock is needed */
865 if (zv
->zv_open_count
!= 1) {
866 rw_exit(&zv
->zv_suspend_lock
);
867 drop_suspend
= B_FALSE
;
871 drop_suspend
= B_FALSE
;
873 rw_exit(&zvol_state_lock
);
875 ASSERT(MUTEX_HELD(&zv
->zv_state_lock
));
878 if (zv
->zv_open_count
== 0) {
879 ASSERT(RW_READ_HELD(&zv
->zv_suspend_lock
));
883 mutex_exit(&zv
->zv_state_lock
);
886 rw_exit(&zv
->zv_suspend_lock
);
890 zvol_ioctl(struct block_device
*bdev
, fmode_t mode
,
891 unsigned int cmd
, unsigned long arg
)
893 zvol_state_t
*zv
= bdev
->bd_disk
->private_data
;
896 ASSERT3U(zv
->zv_open_count
, >, 0);
900 #ifdef HAVE_FSYNC_BDEV
902 #elif defined(HAVE_SYNC_BLOCKDEV)
905 #error "Neither fsync_bdev() nor sync_blockdev() found"
907 invalidate_bdev(bdev
);
908 rw_enter(&zv
->zv_suspend_lock
, RW_READER
);
910 if (!(zv
->zv_flags
& ZVOL_RDONLY
))
911 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
913 rw_exit(&zv
->zv_suspend_lock
);
917 mutex_enter(&zv
->zv_state_lock
);
918 error
= copy_to_user((void *)arg
, zv
->zv_name
, MAXNAMELEN
);
919 mutex_exit(&zv
->zv_state_lock
);
927 return (SET_ERROR(error
));
932 zvol_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
933 unsigned cmd
, unsigned long arg
)
935 return (zvol_ioctl(bdev
, mode
, cmd
, arg
));
938 #define zvol_compat_ioctl NULL
942 zvol_check_events(struct gendisk
*disk
, unsigned int clearing
)
944 unsigned int mask
= 0;
946 rw_enter(&zvol_state_lock
, RW_READER
);
948 zvol_state_t
*zv
= disk
->private_data
;
950 mutex_enter(&zv
->zv_state_lock
);
951 mask
= zv
->zv_changed
? DISK_EVENT_MEDIA_CHANGE
: 0;
953 mutex_exit(&zv
->zv_state_lock
);
956 rw_exit(&zvol_state_lock
);
962 zvol_revalidate_disk(struct gendisk
*disk
)
964 rw_enter(&zvol_state_lock
, RW_READER
);
966 zvol_state_t
*zv
= disk
->private_data
;
968 mutex_enter(&zv
->zv_state_lock
);
969 set_capacity(zv
->zv_zso
->zvo_disk
,
970 zv
->zv_volsize
>> SECTOR_BITS
);
971 mutex_exit(&zv
->zv_state_lock
);
974 rw_exit(&zvol_state_lock
);
980 zvol_os_update_volsize(zvol_state_t
*zv
, uint64_t volsize
)
982 struct gendisk
*disk
= zv
->zv_zso
->zvo_disk
;
984 #if defined(HAVE_REVALIDATE_DISK_SIZE)
985 revalidate_disk_size(disk
, zvol_revalidate_disk(disk
) == 0);
986 #elif defined(HAVE_REVALIDATE_DISK)
987 revalidate_disk(disk
);
989 zvol_revalidate_disk(disk
);
995 zvol_os_clear_private(zvol_state_t
*zv
)
998 * Cleared while holding zvol_state_lock as a writer
999 * which will prevent zvol_open() from opening it.
1001 zv
->zv_zso
->zvo_disk
->private_data
= NULL
;
1005 * Provide a simple virtual geometry for legacy compatibility. For devices
1006 * smaller than 1 MiB a small head and sector count is used to allow very
1007 * tiny devices. For devices over 1 Mib a standard head and sector count
1008 * is used to keep the cylinders count reasonable.
1011 zvol_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1013 zvol_state_t
*zv
= bdev
->bd_disk
->private_data
;
1016 ASSERT3U(zv
->zv_open_count
, >, 0);
1018 sectors
= get_capacity(zv
->zv_zso
->zvo_disk
);
1020 if (sectors
> 2048) {
1029 geo
->cylinders
= sectors
/ (geo
->heads
* geo
->sectors
);
1035 * Why have two separate block_device_operations structs?
1037 * Normally we'd just have one, and assign 'submit_bio' as needed. However,
1038 * it's possible the user's kernel is built with CONSTIFY_PLUGIN, meaning we
1039 * can't just change submit_bio dynamically at runtime. So just create two
1040 * separate structs to get around this.
1042 static const struct block_device_operations zvol_ops_blk_mq
= {
1044 .release
= zvol_release
,
1045 .ioctl
= zvol_ioctl
,
1046 .compat_ioctl
= zvol_compat_ioctl
,
1047 .check_events
= zvol_check_events
,
1048 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
1049 .revalidate_disk
= zvol_revalidate_disk
,
1051 .getgeo
= zvol_getgeo
,
1052 .owner
= THIS_MODULE
,
1055 static const struct block_device_operations zvol_ops
= {
1057 .release
= zvol_release
,
1058 .ioctl
= zvol_ioctl
,
1059 .compat_ioctl
= zvol_compat_ioctl
,
1060 .check_events
= zvol_check_events
,
1061 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
1062 .revalidate_disk
= zvol_revalidate_disk
,
1064 .getgeo
= zvol_getgeo
,
1065 .owner
= THIS_MODULE
,
1066 #ifdef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
1067 .submit_bio
= zvol_submit_bio
,
1072 zvol_alloc_non_blk_mq(struct zvol_state_os
*zso
)
1074 #if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS)
1075 #if defined(HAVE_BLK_ALLOC_DISK)
1076 zso
->zvo_disk
= blk_alloc_disk(NUMA_NO_NODE
);
1077 if (zso
->zvo_disk
== NULL
)
1080 zso
->zvo_disk
->minors
= ZVOL_MINORS
;
1081 zso
->zvo_queue
= zso
->zvo_disk
->queue
;
1082 #elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
1083 struct gendisk
*disk
= blk_alloc_disk(NULL
, NUMA_NO_NODE
);
1085 zso
->zvo_disk
= NULL
;
1089 zso
->zvo_disk
= disk
;
1090 zso
->zvo_disk
->minors
= ZVOL_MINORS
;
1091 zso
->zvo_queue
= zso
->zvo_disk
->queue
;
1093 zso
->zvo_queue
= blk_alloc_queue(NUMA_NO_NODE
);
1094 if (zso
->zvo_queue
== NULL
)
1097 zso
->zvo_disk
= alloc_disk(ZVOL_MINORS
);
1098 if (zso
->zvo_disk
== NULL
) {
1099 blk_cleanup_queue(zso
->zvo_queue
);
1103 zso
->zvo_disk
->queue
= zso
->zvo_queue
;
1104 #endif /* HAVE_BLK_ALLOC_DISK */
1106 zso
->zvo_queue
= blk_generic_alloc_queue(zvol_request
, NUMA_NO_NODE
);
1107 if (zso
->zvo_queue
== NULL
)
1110 zso
->zvo_disk
= alloc_disk(ZVOL_MINORS
);
1111 if (zso
->zvo_disk
== NULL
) {
1112 blk_cleanup_queue(zso
->zvo_queue
);
1116 zso
->zvo_disk
->queue
= zso
->zvo_queue
;
1117 #endif /* HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
1123 zvol_alloc_blk_mq(zvol_state_t
*zv
)
1126 struct zvol_state_os
*zso
= zv
->zv_zso
;
1128 /* Allocate our blk-mq tag_set */
1129 if (zvol_blk_mq_alloc_tag_set(zv
) != 0)
1132 #if defined(HAVE_BLK_ALLOC_DISK)
1133 zso
->zvo_disk
= blk_mq_alloc_disk(&zso
->tag_set
, zv
);
1134 if (zso
->zvo_disk
== NULL
) {
1135 blk_mq_free_tag_set(&zso
->tag_set
);
1138 zso
->zvo_queue
= zso
->zvo_disk
->queue
;
1139 zso
->zvo_disk
->minors
= ZVOL_MINORS
;
1140 #elif defined(HAVE_BLK_ALLOC_DISK_2ARG)
1141 struct gendisk
*disk
= blk_mq_alloc_disk(&zso
->tag_set
, NULL
, zv
);
1143 zso
->zvo_disk
= NULL
;
1144 blk_mq_free_tag_set(&zso
->tag_set
);
1148 zso
->zvo_disk
= disk
;
1149 zso
->zvo_queue
= zso
->zvo_disk
->queue
;
1150 zso
->zvo_disk
->minors
= ZVOL_MINORS
;
1152 zso
->zvo_disk
= alloc_disk(ZVOL_MINORS
);
1153 if (zso
->zvo_disk
== NULL
) {
1154 blk_cleanup_queue(zso
->zvo_queue
);
1155 blk_mq_free_tag_set(&zso
->tag_set
);
1158 /* Allocate queue */
1159 zso
->zvo_queue
= blk_mq_init_queue(&zso
->tag_set
);
1160 if (IS_ERR(zso
->zvo_queue
)) {
1161 blk_mq_free_tag_set(&zso
->tag_set
);
1165 /* Our queue is now created, assign it to our disk */
1166 zso
->zvo_disk
->queue
= zso
->zvo_queue
;
1174 * Allocate memory for a new zvol_state_t and setup the required
1175 * request queue and generic disk structures for the block device.
1177 static zvol_state_t
*
1178 zvol_alloc(dev_t dev
, const char *name
)
1181 struct zvol_state_os
*zso
;
1185 if (dsl_prop_get_integer(name
, "volmode", &volmode
, NULL
) != 0)
1188 if (volmode
== ZFS_VOLMODE_DEFAULT
)
1189 volmode
= zvol_volmode
;
1191 if (volmode
== ZFS_VOLMODE_NONE
)
1194 zv
= kmem_zalloc(sizeof (zvol_state_t
), KM_SLEEP
);
1195 zso
= kmem_zalloc(sizeof (struct zvol_state_os
), KM_SLEEP
);
1197 zv
->zv_volmode
= volmode
;
1199 list_link_init(&zv
->zv_next
);
1200 mutex_init(&zv
->zv_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1203 zv
->zv_zso
->use_blk_mq
= zvol_use_blk_mq
;
1207 * The block layer has 3 interfaces for getting BIOs:
1209 * 1. blk-mq request queues (new)
1210 * 2. submit_bio() (oldest)
1211 * 3. regular request queues (old).
1213 * Each of those interfaces has two permutations:
1215 * a) We have blk_alloc_disk()/blk_mq_alloc_disk(), which allocates
1216 * both the disk and its queue (5.14 kernel or newer)
1218 * b) We don't have blk_*alloc_disk(), and have to allocate the
1219 * disk and the queue separately. (5.13 kernel or older)
1221 if (zv
->zv_zso
->use_blk_mq
) {
1222 ret
= zvol_alloc_blk_mq(zv
);
1223 zso
->zvo_disk
->fops
= &zvol_ops_blk_mq
;
1225 ret
= zvol_alloc_non_blk_mq(zso
);
1226 zso
->zvo_disk
->fops
= &zvol_ops
;
1231 blk_queue_set_write_cache(zso
->zvo_queue
, B_TRUE
, B_TRUE
);
1233 /* Limit read-ahead to a single page to prevent over-prefetching. */
1234 blk_queue_set_read_ahead(zso
->zvo_queue
, 1);
1236 if (!zv
->zv_zso
->use_blk_mq
) {
1237 /* Disable write merging in favor of the ZIO pipeline. */
1238 blk_queue_flag_set(QUEUE_FLAG_NOMERGES
, zso
->zvo_queue
);
1241 /* Enable /proc/diskstats */
1242 blk_queue_flag_set(QUEUE_FLAG_IO_STAT
, zso
->zvo_queue
);
1244 zso
->zvo_queue
->queuedata
= zv
;
1246 zv
->zv_open_count
= 0;
1247 strlcpy(zv
->zv_name
, name
, sizeof (zv
->zv_name
));
1249 zfs_rangelock_init(&zv
->zv_rangelock
, NULL
, NULL
);
1250 rw_init(&zv
->zv_suspend_lock
, NULL
, RW_DEFAULT
, NULL
);
1252 zso
->zvo_disk
->major
= zvol_major
;
1253 zso
->zvo_disk
->events
= DISK_EVENT_MEDIA_CHANGE
;
1256 * Setting ZFS_VOLMODE_DEV disables partitioning on ZVOL devices.
1257 * This is accomplished by limiting the number of minors for the
1258 * device to one and explicitly disabling partition scanning.
1260 if (volmode
== ZFS_VOLMODE_DEV
) {
1261 zso
->zvo_disk
->minors
= 1;
1262 zso
->zvo_disk
->flags
&= ~ZFS_GENHD_FL_EXT_DEVT
;
1263 zso
->zvo_disk
->flags
|= ZFS_GENHD_FL_NO_PART
;
1266 zso
->zvo_disk
->first_minor
= (dev
& MINORMASK
);
1267 zso
->zvo_disk
->private_data
= zv
;
1268 snprintf(zso
->zvo_disk
->disk_name
, DISK_NAME_LEN
, "%s%d",
1269 ZVOL_DEV_NAME
, (dev
& MINORMASK
));
1274 kmem_free(zso
, sizeof (struct zvol_state_os
));
1275 kmem_free(zv
, sizeof (zvol_state_t
));
1280 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1281 * At this time, the structure is not opened by anyone, is taken off
1282 * the zvol_state_list, and has its private data set to NULL.
1283 * The zvol_state_lock is dropped.
1285 * This function may take many milliseconds to complete (e.g. we've seen
1286 * it take over 256ms), due to the calls to "blk_cleanup_queue" and
1287 * "del_gendisk". Thus, consumers need to be careful to account for this
1288 * latency when calling this function.
1291 zvol_os_free(zvol_state_t
*zv
)
1294 ASSERT(!RW_LOCK_HELD(&zv
->zv_suspend_lock
));
1295 ASSERT(!MUTEX_HELD(&zv
->zv_state_lock
));
1296 ASSERT0(zv
->zv_open_count
);
1297 ASSERT3P(zv
->zv_zso
->zvo_disk
->private_data
, ==, NULL
);
1299 rw_destroy(&zv
->zv_suspend_lock
);
1300 zfs_rangelock_fini(&zv
->zv_rangelock
);
1302 del_gendisk(zv
->zv_zso
->zvo_disk
);
1303 #if defined(HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS) && \
1304 (defined(HAVE_BLK_ALLOC_DISK) || defined(HAVE_BLK_ALLOC_DISK_2ARG))
1305 #if defined(HAVE_BLK_CLEANUP_DISK)
1306 blk_cleanup_disk(zv
->zv_zso
->zvo_disk
);
1308 put_disk(zv
->zv_zso
->zvo_disk
);
1311 blk_cleanup_queue(zv
->zv_zso
->zvo_queue
);
1312 put_disk(zv
->zv_zso
->zvo_disk
);
1316 if (zv
->zv_zso
->use_blk_mq
)
1317 blk_mq_free_tag_set(&zv
->zv_zso
->tag_set
);
1320 ida_simple_remove(&zvol_ida
,
1321 MINOR(zv
->zv_zso
->zvo_dev
) >> ZVOL_MINOR_BITS
);
1323 mutex_destroy(&zv
->zv_state_lock
);
1324 dataset_kstats_destroy(&zv
->zv_kstat
);
1326 kmem_free(zv
->zv_zso
, sizeof (struct zvol_state_os
));
1327 kmem_free(zv
, sizeof (zvol_state_t
));
1331 zvol_wait_close(zvol_state_t
*zv
)
1336 * Create a block device minor node and setup the linkage between it
1337 * and the specified volume. Once this function returns the block
1338 * device is live and ready for use.
1341 zvol_os_create_minor(const char *name
)
1345 dmu_object_info_t
*doi
;
1351 uint64_t hash
= zvol_name_hash(name
);
1352 uint64_t volthreading
;
1353 bool replayed_zil
= B_FALSE
;
1355 if (zvol_inhibit_dev
)
1358 idx
= ida_simple_get(&zvol_ida
, 0, 0, kmem_flags_convert(KM_SLEEP
));
1360 return (SET_ERROR(-idx
));
1361 minor
= idx
<< ZVOL_MINOR_BITS
;
1362 if (MINOR(minor
) != minor
) {
1363 /* too many partitions can cause an overflow */
1364 zfs_dbgmsg("zvol: create minor overflow: %s, minor %u/%u",
1365 name
, minor
, MINOR(minor
));
1366 ida_simple_remove(&zvol_ida
, idx
);
1367 return (SET_ERROR(EINVAL
));
1370 zv
= zvol_find_by_name_hash(name
, hash
, RW_NONE
);
1372 ASSERT(MUTEX_HELD(&zv
->zv_state_lock
));
1373 mutex_exit(&zv
->zv_state_lock
);
1374 ida_simple_remove(&zvol_ida
, idx
);
1375 return (SET_ERROR(EEXIST
));
1378 doi
= kmem_alloc(sizeof (dmu_object_info_t
), KM_SLEEP
);
1380 error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_TRUE
, B_TRUE
, FTAG
, &os
);
1384 error
= dmu_object_info(os
, ZVOL_OBJ
, doi
);
1386 goto out_dmu_objset_disown
;
1388 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
1390 goto out_dmu_objset_disown
;
1392 zv
= zvol_alloc(MKDEV(zvol_major
, minor
), name
);
1394 error
= SET_ERROR(EAGAIN
);
1395 goto out_dmu_objset_disown
;
1399 if (dmu_objset_is_snapshot(os
))
1400 zv
->zv_flags
|= ZVOL_RDONLY
;
1402 zv
->zv_volblocksize
= doi
->doi_data_block_size
;
1403 zv
->zv_volsize
= volsize
;
1407 zv
->zv_threading
= B_TRUE
;
1408 if (dsl_prop_get_integer(name
, "volthreading", &volthreading
, NULL
)
1410 zv
->zv_threading
= volthreading
;
1412 set_capacity(zv
->zv_zso
->zvo_disk
, zv
->zv_volsize
>> 9);
1414 blk_queue_max_hw_sectors(zv
->zv_zso
->zvo_queue
,
1415 (DMU_MAX_ACCESS
/ 4) >> 9);
1417 if (zv
->zv_zso
->use_blk_mq
) {
1419 * IO requests can be really big (1MB). When an IO request
1420 * comes in, it is passed off to zvol_read() or zvol_write()
1421 * in a new thread, where it is chunked up into 'volblocksize'
1422 * sized pieces and processed. So for example, if the request
1423 * is a 1MB write and your volblocksize is 128k, one zvol_write
1424 * thread will take that request and sequentially do ten 128k
1425 * IOs. This is due to the fact that the thread needs to lock
1426 * each volblocksize sized block. So you might be wondering:
1427 * "instead of passing the whole 1MB request to one thread,
1428 * why not pass ten individual 128k chunks to ten threads and
1429 * process the whole write in parallel?" The short answer is
1430 * that there's a sweet spot number of chunks that balances
1431 * the greater parallelism with the added overhead of more
1432 * threads. The sweet spot can be different depending on if you
1433 * have a read or write heavy workload. Writes typically want
1434 * high chunk counts while reads typically want lower ones. On
1435 * a test pool with 6 NVMe drives in a 3x 2-disk mirror
1436 * configuration, with volblocksize=8k, the sweet spot for good
1437 * sequential reads and writes was at 8 chunks.
1441 * Below we tell the kernel how big we want our requests
1442 * to be. You would think that blk_queue_io_opt() would be
1443 * used to do this since it is used to "set optimal request
1444 * size for the queue", but that doesn't seem to do
1445 * anything - the kernel still gives you huge requests
1446 * with tons of little PAGE_SIZE segments contained within it.
1448 * Knowing that the kernel will just give you PAGE_SIZE segments
1449 * no matter what, you can say "ok, I want PAGE_SIZE byte
1450 * segments, and I want 'N' of them per request", where N is
1451 * the correct number of segments for the volblocksize and
1452 * number of chunks you want.
1455 if (zvol_blk_mq_blocks_per_thread
!= 0) {
1456 unsigned int chunks
;
1457 chunks
= MIN(zvol_blk_mq_blocks_per_thread
, UINT16_MAX
);
1459 blk_queue_max_segment_size(zv
->zv_zso
->zvo_queue
,
1461 blk_queue_max_segments(zv
->zv_zso
->zvo_queue
,
1462 (zv
->zv_volblocksize
* chunks
) / PAGE_SIZE
);
1465 * Special case: zvol_blk_mq_blocks_per_thread = 0
1466 * Max everything out.
1468 blk_queue_max_segments(zv
->zv_zso
->zvo_queue
,
1470 blk_queue_max_segment_size(zv
->zv_zso
->zvo_queue
,
1475 blk_queue_max_segments(zv
->zv_zso
->zvo_queue
, UINT16_MAX
);
1476 blk_queue_max_segment_size(zv
->zv_zso
->zvo_queue
, UINT_MAX
);
1479 blk_queue_physical_block_size(zv
->zv_zso
->zvo_queue
,
1480 zv
->zv_volblocksize
);
1481 blk_queue_io_opt(zv
->zv_zso
->zvo_queue
, zv
->zv_volblocksize
);
1482 blk_queue_max_discard_sectors(zv
->zv_zso
->zvo_queue
,
1483 (zvol_max_discard_blocks
* zv
->zv_volblocksize
) >> 9);
1484 blk_queue_discard_granularity(zv
->zv_zso
->zvo_queue
,
1485 zv
->zv_volblocksize
);
1486 #ifdef QUEUE_FLAG_DISCARD
1487 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, zv
->zv_zso
->zvo_queue
);
1489 #ifdef QUEUE_FLAG_NONROT
1490 blk_queue_flag_set(QUEUE_FLAG_NONROT
, zv
->zv_zso
->zvo_queue
);
1492 #ifdef QUEUE_FLAG_ADD_RANDOM
1493 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM
, zv
->zv_zso
->zvo_queue
);
1495 /* This flag was introduced in kernel version 4.12. */
1496 #ifdef QUEUE_FLAG_SCSI_PASSTHROUGH
1497 blk_queue_flag_set(QUEUE_FLAG_SCSI_PASSTHROUGH
, zv
->zv_zso
->zvo_queue
);
1500 ASSERT3P(zv
->zv_kstat
.dk_kstats
, ==, NULL
);
1501 error
= dataset_kstats_create(&zv
->zv_kstat
, zv
->zv_objset
);
1503 goto out_dmu_objset_disown
;
1504 ASSERT3P(zv
->zv_zilog
, ==, NULL
);
1505 zv
->zv_zilog
= zil_open(os
, zvol_get_data
, &zv
->zv_kstat
.dk_zil_sums
);
1506 if (spa_writeable(dmu_objset_spa(os
))) {
1507 if (zil_replay_disable
)
1508 replayed_zil
= zil_destroy(zv
->zv_zilog
, B_FALSE
);
1510 replayed_zil
= zil_replay(os
, zv
, zvol_replay_vector
);
1513 zil_close(zv
->zv_zilog
);
1514 zv
->zv_zilog
= NULL
;
1517 * When udev detects the addition of the device it will immediately
1518 * invoke blkid(8) to determine the type of content on the device.
1519 * Prefetching the blocks commonly scanned by blkid(8) will speed
1522 len
= MIN(zvol_prefetch_bytes
, SPA_MAXBLOCKSIZE
);
1524 dmu_prefetch(os
, ZVOL_OBJ
, 0, 0, len
, ZIO_PRIORITY_SYNC_READ
);
1525 dmu_prefetch(os
, ZVOL_OBJ
, 0, volsize
- len
, len
,
1526 ZIO_PRIORITY_SYNC_READ
);
1529 zv
->zv_objset
= NULL
;
1530 out_dmu_objset_disown
:
1531 dmu_objset_disown(os
, B_TRUE
, FTAG
);
1533 kmem_free(doi
, sizeof (dmu_object_info_t
));
1536 * Keep in mind that once add_disk() is called, the zvol is
1537 * announced to the world, and zvol_open()/zvol_release() can
1538 * be called at any time. Incidentally, add_disk() itself calls
1539 * zvol_open()->zvol_first_open() and zvol_release()->zvol_last_close()
1543 rw_enter(&zvol_state_lock
, RW_WRITER
);
1545 rw_exit(&zvol_state_lock
);
1546 #ifdef HAVE_ADD_DISK_RET
1547 error
= add_disk(zv
->zv_zso
->zvo_disk
);
1549 add_disk(zv
->zv_zso
->zvo_disk
);
1552 ida_simple_remove(&zvol_ida
, idx
);
1559 zvol_os_rename_minor(zvol_state_t
*zv
, const char *newname
)
1561 int readonly
= get_disk_ro(zv
->zv_zso
->zvo_disk
);
1563 ASSERT(RW_LOCK_HELD(&zvol_state_lock
));
1564 ASSERT(MUTEX_HELD(&zv
->zv_state_lock
));
1566 strlcpy(zv
->zv_name
, newname
, sizeof (zv
->zv_name
));
1568 /* move to new hashtable entry */
1569 zv
->zv_hash
= zvol_name_hash(zv
->zv_name
);
1570 hlist_del(&zv
->zv_hlink
);
1571 hlist_add_head(&zv
->zv_hlink
, ZVOL_HT_HEAD(zv
->zv_hash
));
1574 * The block device's read-only state is briefly changed causing
1575 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1576 * the name change and fixes the symlinks. This does not change
1577 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1578 * changes. This would normally be done using kobject_uevent() but
1579 * that is a GPL-only symbol which is why we need this workaround.
1581 set_disk_ro(zv
->zv_zso
->zvo_disk
, !readonly
);
1582 set_disk_ro(zv
->zv_zso
->zvo_disk
, readonly
);
1584 dataset_kstats_rename(&zv
->zv_kstat
, newname
);
1588 zvol_os_set_disk_ro(zvol_state_t
*zv
, int flags
)
1591 set_disk_ro(zv
->zv_zso
->zvo_disk
, flags
);
1595 zvol_os_set_capacity(zvol_state_t
*zv
, uint64_t capacity
)
1598 set_capacity(zv
->zv_zso
->zvo_disk
, capacity
);
1607 * zvol_threads is the module param the user passes in.
1609 * zvol_actual_threads is what we use internally, since the user can
1610 * pass zvol_thread = 0 to mean "use all the CPUs" (the default).
1612 static unsigned int zvol_actual_threads
;
1614 if (zvol_threads
== 0) {
1616 * See dde9380a1 for why 32 was chosen here. This should
1617 * probably be refined to be some multiple of the number
1620 zvol_actual_threads
= MAX(num_online_cpus(), 32);
1622 zvol_actual_threads
= MIN(MAX(zvol_threads
, 1), 1024);
1626 * Use atleast 32 zvol_threads but for many core system,
1627 * prefer 6 threads per taskq, but no more taskqs
1628 * than threads in them on large systems.
1631 * cpus taskqs threads threads
1632 * ------- ------- ------- -------
1643 zv_taskq_t
*ztqs
= &zvol_taskqs
;
1644 uint_t num_tqs
= MIN(num_online_cpus(), zvol_num_taskqs
);
1646 num_tqs
= 1 + num_online_cpus() / 6;
1647 while (num_tqs
* num_tqs
> zvol_actual_threads
)
1650 uint_t per_tq_thread
= zvol_actual_threads
/ num_tqs
;
1651 if (per_tq_thread
* num_tqs
< zvol_actual_threads
)
1653 ztqs
->tqs_cnt
= num_tqs
;
1654 ztqs
->tqs_taskq
= kmem_alloc(num_tqs
* sizeof (taskq_t
*), KM_SLEEP
);
1655 error
= register_blkdev(zvol_major
, ZVOL_DRIVER
);
1657 kmem_free(ztqs
->tqs_taskq
, ztqs
->tqs_cnt
* sizeof (taskq_t
*));
1658 ztqs
->tqs_taskq
= NULL
;
1659 printk(KERN_INFO
"ZFS: register_blkdev() failed %d\n", error
);
1664 if (zvol_blk_mq_queue_depth
== 0) {
1665 zvol_actual_blk_mq_queue_depth
= BLKDEV_DEFAULT_RQ
;
1667 zvol_actual_blk_mq_queue_depth
=
1668 MAX(zvol_blk_mq_queue_depth
, BLKDEV_MIN_RQ
);
1671 if (zvol_blk_mq_threads
== 0) {
1672 zvol_blk_mq_actual_threads
= num_online_cpus();
1674 zvol_blk_mq_actual_threads
= MIN(MAX(zvol_blk_mq_threads
, 1),
1678 for (uint_t i
= 0; i
< num_tqs
; i
++) {
1680 (void) snprintf(name
, sizeof (name
), "%s_tq-%u",
1682 ztqs
->tqs_taskq
[i
] = taskq_create(name
, per_tq_thread
,
1683 maxclsyspri
, per_tq_thread
, INT_MAX
,
1684 TASKQ_PREPOPULATE
| TASKQ_DYNAMIC
);
1685 if (ztqs
->tqs_taskq
[i
] == NULL
) {
1686 for (int j
= i
- 1; j
>= 0; j
--)
1687 taskq_destroy(ztqs
->tqs_taskq
[j
]);
1688 unregister_blkdev(zvol_major
, ZVOL_DRIVER
);
1689 kmem_free(ztqs
->tqs_taskq
, ztqs
->tqs_cnt
*
1690 sizeof (taskq_t
*));
1691 ztqs
->tqs_taskq
= NULL
;
1697 ida_init(&zvol_ida
);
1704 zv_taskq_t
*ztqs
= &zvol_taskqs
;
1706 unregister_blkdev(zvol_major
, ZVOL_DRIVER
);
1708 if (ztqs
->tqs_taskq
== NULL
) {
1709 ASSERT3U(ztqs
->tqs_cnt
, ==, 0);
1711 for (uint_t i
= 0; i
< ztqs
->tqs_cnt
; i
++) {
1712 ASSERT3P(ztqs
->tqs_taskq
[i
], !=, NULL
);
1713 taskq_destroy(ztqs
->tqs_taskq
[i
]);
1715 kmem_free(ztqs
->tqs_taskq
, ztqs
->tqs_cnt
*
1716 sizeof (taskq_t
*));
1717 ztqs
->tqs_taskq
= NULL
;
1720 ida_destroy(&zvol_ida
);
1724 module_param(zvol_inhibit_dev
, uint
, 0644);
1725 MODULE_PARM_DESC(zvol_inhibit_dev
, "Do not create zvol device nodes");
1727 module_param(zvol_major
, uint
, 0444);
1728 MODULE_PARM_DESC(zvol_major
, "Major number for zvol device");
1730 module_param(zvol_threads
, uint
, 0444);
1731 MODULE_PARM_DESC(zvol_threads
, "Number of threads to handle I/O requests. Set"
1732 "to 0 to use all active CPUs");
1734 module_param(zvol_request_sync
, uint
, 0644);
1735 MODULE_PARM_DESC(zvol_request_sync
, "Synchronously handle bio requests");
1737 module_param(zvol_max_discard_blocks
, ulong
, 0444);
1738 MODULE_PARM_DESC(zvol_max_discard_blocks
, "Max number of blocks to discard");
1740 module_param(zvol_num_taskqs
, uint
, 0444);
1741 MODULE_PARM_DESC(zvol_num_taskqs
, "Number of zvol taskqs");
1743 module_param(zvol_prefetch_bytes
, uint
, 0644);
1744 MODULE_PARM_DESC(zvol_prefetch_bytes
, "Prefetch N bytes at zvol start+end");
1746 module_param(zvol_volmode
, uint
, 0644);
1747 MODULE_PARM_DESC(zvol_volmode
, "Default volmode property value");
1750 module_param(zvol_blk_mq_queue_depth
, uint
, 0644);
1751 MODULE_PARM_DESC(zvol_blk_mq_queue_depth
, "Default blk-mq queue depth");
1753 module_param(zvol_use_blk_mq
, uint
, 0644);
1754 MODULE_PARM_DESC(zvol_use_blk_mq
, "Use the blk-mq API for zvols");
1756 module_param(zvol_blk_mq_blocks_per_thread
, uint
, 0644);
1757 MODULE_PARM_DESC(zvol_blk_mq_blocks_per_thread
,
1758 "Process volblocksize blocks per thread");
1761 #ifndef HAVE_BLKDEV_GET_ERESTARTSYS
1762 module_param(zvol_open_timeout_ms
, uint
, 0644);
1763 MODULE_PARM_DESC(zvol_open_timeout_ms
, "Timeout for ZVOL open retries");