4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
27 * ZFS volume emulation driver.
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
32 * /dev/<pool_name>/<dataset_name>
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
37 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
38 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
42 #include <sys/dmu_traverse.h>
43 #include <sys/dsl_dataset.h>
44 #include <sys/dsl_prop.h>
45 #include <sys/dsl_dir.h>
47 #include <sys/zfeature.h>
48 #include <sys/zil_impl.h>
49 #include <sys/dmu_tx.h>
51 #include <sys/zfs_rlock.h>
52 #include <sys/zfs_znode.h>
53 #include <sys/spa_impl.h>
55 #include <linux/blkdev_compat.h>
57 unsigned int zvol_inhibit_dev
= 0;
58 unsigned int zvol_major
= ZVOL_MAJOR
;
59 unsigned int zvol_prefetch_bytes
= (128 * 1024);
60 unsigned long zvol_max_discard_blocks
= 16384;
62 static kmutex_t zvol_state_lock
;
63 static list_t zvol_state_list
;
64 void *zvol_tag
= "zvol_tag";
67 * The in-core state of each volume.
69 typedef struct zvol_state
{
70 char zv_name
[MAXNAMELEN
]; /* name */
71 uint64_t zv_volsize
; /* advertised space */
72 uint64_t zv_volblocksize
; /* volume block size */
73 objset_t
*zv_objset
; /* objset handle */
74 uint32_t zv_flags
; /* ZVOL_* flags */
75 uint32_t zv_open_count
; /* open counts */
76 uint32_t zv_changed
; /* disk changed */
77 zilog_t
*zv_zilog
; /* ZIL handle */
78 zfs_rlock_t zv_range_lock
; /* range lock */
79 dmu_buf_t
*zv_dbuf
; /* bonus handle */
80 dev_t zv_dev
; /* device id */
81 struct gendisk
*zv_disk
; /* generic disk */
82 struct request_queue
*zv_queue
; /* request queue */
83 list_node_t zv_next
; /* next zvol_state_t linkage */
87 ZVOL_ASYNC_CREATE_MINORS
,
88 ZVOL_ASYNC_REMOVE_MINORS
,
89 ZVOL_ASYNC_RENAME_MINORS
,
90 ZVOL_ASYNC_SET_SNAPDEV
,
96 char pool
[MAXNAMELEN
];
97 char name1
[MAXNAMELEN
];
98 char name2
[MAXNAMELEN
];
99 zprop_source_t source
;
103 #define ZVOL_RDONLY 0x1
106 * Find the next available range of ZVOL_MINORS minor numbers. The
107 * zvol_state_list is kept in ascending minor order so we simply need
108 * to scan the list for the first gap in the sequence. This allows us
109 * to recycle minor number as devices are created and removed.
112 zvol_find_minor(unsigned *minor
)
117 ASSERT(MUTEX_HELD(&zvol_state_lock
));
118 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
;
119 zv
= list_next(&zvol_state_list
, zv
), *minor
+= ZVOL_MINORS
) {
120 if (MINOR(zv
->zv_dev
) != MINOR(*minor
))
124 /* All minors are in use */
125 if (*minor
>= (1 << MINORBITS
))
126 return (SET_ERROR(ENXIO
));
132 * Find a zvol_state_t given the full major+minor dev_t.
134 static zvol_state_t
*
135 zvol_find_by_dev(dev_t dev
)
139 ASSERT(MUTEX_HELD(&zvol_state_lock
));
140 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
;
141 zv
= list_next(&zvol_state_list
, zv
)) {
142 if (zv
->zv_dev
== dev
)
150 * Find a zvol_state_t given the name provided at zvol_alloc() time.
152 static zvol_state_t
*
153 zvol_find_by_name(const char *name
)
157 ASSERT(MUTEX_HELD(&zvol_state_lock
));
158 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
;
159 zv
= list_next(&zvol_state_list
, zv
)) {
160 if (strncmp(zv
->zv_name
, name
, MAXNAMELEN
) == 0)
169 * Given a path, return TRUE if path is a ZVOL.
172 zvol_is_zvol(const char *device
)
174 struct block_device
*bdev
;
177 bdev
= vdev_lookup_bdev(device
);
181 major
= MAJOR(bdev
->bd_dev
);
184 if (major
== zvol_major
)
191 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
194 zvol_create_cb(objset_t
*os
, void *arg
, cred_t
*cr
, dmu_tx_t
*tx
)
196 zfs_creat_t
*zct
= arg
;
197 nvlist_t
*nvprops
= zct
->zct_props
;
199 uint64_t volblocksize
, volsize
;
201 VERIFY(nvlist_lookup_uint64(nvprops
,
202 zfs_prop_to_name(ZFS_PROP_VOLSIZE
), &volsize
) == 0);
203 if (nvlist_lookup_uint64(nvprops
,
204 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
), &volblocksize
) != 0)
205 volblocksize
= zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE
);
208 * These properties must be removed from the list so the generic
209 * property setting step won't apply to them.
211 VERIFY(nvlist_remove_all(nvprops
,
212 zfs_prop_to_name(ZFS_PROP_VOLSIZE
)) == 0);
213 (void) nvlist_remove_all(nvprops
,
214 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE
));
216 error
= dmu_object_claim(os
, ZVOL_OBJ
, DMU_OT_ZVOL
, volblocksize
,
220 error
= zap_create_claim(os
, ZVOL_ZAP_OBJ
, DMU_OT_ZVOL_PROP
,
224 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
, tx
);
229 * ZFS_IOC_OBJSET_STATS entry point.
232 zvol_get_stats(objset_t
*os
, nvlist_t
*nv
)
235 dmu_object_info_t
*doi
;
238 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &val
);
240 return (SET_ERROR(error
));
242 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLSIZE
, val
);
243 doi
= kmem_alloc(sizeof (dmu_object_info_t
), KM_SLEEP
);
244 error
= dmu_object_info(os
, ZVOL_OBJ
, doi
);
247 dsl_prop_nvlist_add_uint64(nv
, ZFS_PROP_VOLBLOCKSIZE
,
248 doi
->doi_data_block_size
);
251 kmem_free(doi
, sizeof (dmu_object_info_t
));
253 return (SET_ERROR(error
));
257 zvol_size_changed(zvol_state_t
*zv
, uint64_t volsize
)
259 struct block_device
*bdev
;
261 bdev
= bdget_disk(zv
->zv_disk
, 0);
264 set_capacity(zv
->zv_disk
, volsize
>> 9);
265 zv
->zv_volsize
= volsize
;
266 check_disk_size_change(zv
->zv_disk
, bdev
);
272 * Sanity check volume size.
275 zvol_check_volsize(uint64_t volsize
, uint64_t blocksize
)
278 return (SET_ERROR(EINVAL
));
280 if (volsize
% blocksize
!= 0)
281 return (SET_ERROR(EINVAL
));
284 if (volsize
- 1 > SPEC_MAXOFFSET_T
)
285 return (SET_ERROR(EOVERFLOW
));
291 * Ensure the zap is flushed then inform the VFS of the capacity change.
294 zvol_update_volsize(uint64_t volsize
, objset_t
*os
)
300 ASSERT(MUTEX_HELD(&zvol_state_lock
));
302 tx
= dmu_tx_create(os
);
303 dmu_tx_hold_zap(tx
, ZVOL_ZAP_OBJ
, TRUE
, NULL
);
304 dmu_tx_mark_netfree(tx
);
305 error
= dmu_tx_assign(tx
, TXG_WAIT
);
308 return (SET_ERROR(error
));
310 txg
= dmu_tx_get_txg(tx
);
312 error
= zap_update(os
, ZVOL_ZAP_OBJ
, "size", 8, 1,
316 txg_wait_synced(dmu_objset_pool(os
), txg
);
319 error
= dmu_free_long_range(os
,
320 ZVOL_OBJ
, volsize
, DMU_OBJECT_END
);
326 zvol_update_live_volsize(zvol_state_t
*zv
, uint64_t volsize
)
328 zvol_size_changed(zv
, volsize
);
331 * We should post a event here describing the expansion. However,
332 * the zfs_ereport_post() interface doesn't nicely support posting
333 * events for zvols, it assumes events relate to vdevs or zios.
340 * Set ZFS_PROP_VOLSIZE set entry point.
343 zvol_set_volsize(const char *name
, uint64_t volsize
)
345 zvol_state_t
*zv
= NULL
;
348 dmu_object_info_t
*doi
;
350 boolean_t owned
= B_FALSE
;
352 error
= dsl_prop_get_integer(name
,
353 zfs_prop_to_name(ZFS_PROP_READONLY
), &readonly
, NULL
);
355 return (SET_ERROR(error
));
357 return (SET_ERROR(EROFS
));
359 mutex_enter(&zvol_state_lock
);
360 zv
= zvol_find_by_name(name
);
362 if (zv
== NULL
|| zv
->zv_objset
== NULL
) {
363 if ((error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_FALSE
,
365 mutex_exit(&zvol_state_lock
);
366 return (SET_ERROR(error
));
375 doi
= kmem_alloc(sizeof (dmu_object_info_t
), KM_SLEEP
);
377 if ((error
= dmu_object_info(os
, ZVOL_OBJ
, doi
)) ||
378 (error
= zvol_check_volsize(volsize
, doi
->doi_data_block_size
)))
381 error
= zvol_update_volsize(volsize
, os
);
382 kmem_free(doi
, sizeof (dmu_object_info_t
));
384 if (error
== 0 && zv
!= NULL
)
385 error
= zvol_update_live_volsize(zv
, volsize
);
388 dmu_objset_disown(os
, FTAG
);
390 zv
->zv_objset
= NULL
;
392 mutex_exit(&zvol_state_lock
);
397 * Sanity check volume block size.
400 zvol_check_volblocksize(const char *name
, uint64_t volblocksize
)
402 /* Record sizes above 128k need the feature to be enabled */
403 if (volblocksize
> SPA_OLD_MAXBLOCKSIZE
) {
407 if ((error
= spa_open(name
, &spa
, FTAG
)) != 0)
410 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_LARGE_BLOCKS
)) {
411 spa_close(spa
, FTAG
);
412 return (SET_ERROR(ENOTSUP
));
416 * We don't allow setting the property above 1MB,
417 * unless the tunable has been changed.
419 if (volblocksize
> zfs_max_recordsize
)
420 return (SET_ERROR(EDOM
));
422 spa_close(spa
, FTAG
);
425 if (volblocksize
< SPA_MINBLOCKSIZE
||
426 volblocksize
> SPA_MAXBLOCKSIZE
||
428 return (SET_ERROR(EDOM
));
434 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
437 zvol_set_volblocksize(const char *name
, uint64_t volblocksize
)
443 mutex_enter(&zvol_state_lock
);
445 zv
= zvol_find_by_name(name
);
447 error
= SET_ERROR(ENXIO
);
451 if (zv
->zv_flags
& ZVOL_RDONLY
) {
452 error
= SET_ERROR(EROFS
);
456 tx
= dmu_tx_create(zv
->zv_objset
);
457 dmu_tx_hold_bonus(tx
, ZVOL_OBJ
);
458 error
= dmu_tx_assign(tx
, TXG_WAIT
);
462 error
= dmu_object_set_blocksize(zv
->zv_objset
, ZVOL_OBJ
,
463 volblocksize
, 0, tx
);
464 if (error
== ENOTSUP
)
465 error
= SET_ERROR(EBUSY
);
468 zv
->zv_volblocksize
= volblocksize
;
471 mutex_exit(&zvol_state_lock
);
473 return (SET_ERROR(error
));
477 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
478 * implement DKIOCFREE/free-long-range.
481 zvol_replay_truncate(zvol_state_t
*zv
, lr_truncate_t
*lr
, boolean_t byteswap
)
483 uint64_t offset
, length
;
486 byteswap_uint64_array(lr
, sizeof (*lr
));
488 offset
= lr
->lr_offset
;
489 length
= lr
->lr_length
;
491 return (dmu_free_long_range(zv
->zv_objset
, ZVOL_OBJ
, offset
, length
));
495 * Replay a TX_WRITE ZIL transaction that didn't get committed
496 * after a system failure
499 zvol_replay_write(zvol_state_t
*zv
, lr_write_t
*lr
, boolean_t byteswap
)
501 objset_t
*os
= zv
->zv_objset
;
502 char *data
= (char *)(lr
+ 1); /* data follows lr_write_t */
503 uint64_t off
= lr
->lr_offset
;
504 uint64_t len
= lr
->lr_length
;
509 byteswap_uint64_array(lr
, sizeof (*lr
));
511 tx
= dmu_tx_create(os
);
512 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, len
);
513 error
= dmu_tx_assign(tx
, TXG_WAIT
);
517 dmu_write(os
, ZVOL_OBJ
, off
, len
, data
, tx
);
521 return (SET_ERROR(error
));
525 zvol_replay_err(zvol_state_t
*zv
, lr_t
*lr
, boolean_t byteswap
)
527 return (SET_ERROR(ENOTSUP
));
531 * Callback vectors for replaying records.
532 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
534 zil_replay_func_t zvol_replay_vector
[TX_MAX_TYPE
] = {
535 (zil_replay_func_t
)zvol_replay_err
, /* no such transaction type */
536 (zil_replay_func_t
)zvol_replay_err
, /* TX_CREATE */
537 (zil_replay_func_t
)zvol_replay_err
, /* TX_MKDIR */
538 (zil_replay_func_t
)zvol_replay_err
, /* TX_MKXATTR */
539 (zil_replay_func_t
)zvol_replay_err
, /* TX_SYMLINK */
540 (zil_replay_func_t
)zvol_replay_err
, /* TX_REMOVE */
541 (zil_replay_func_t
)zvol_replay_err
, /* TX_RMDIR */
542 (zil_replay_func_t
)zvol_replay_err
, /* TX_LINK */
543 (zil_replay_func_t
)zvol_replay_err
, /* TX_RENAME */
544 (zil_replay_func_t
)zvol_replay_write
, /* TX_WRITE */
545 (zil_replay_func_t
)zvol_replay_truncate
, /* TX_TRUNCATE */
546 (zil_replay_func_t
)zvol_replay_err
, /* TX_SETATTR */
547 (zil_replay_func_t
)zvol_replay_err
, /* TX_ACL */
551 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
553 * We store data in the log buffers if it's small enough.
554 * Otherwise we will later flush the data out via dmu_sync().
556 ssize_t zvol_immediate_write_sz
= 32768;
559 zvol_log_write(zvol_state_t
*zv
, dmu_tx_t
*tx
, uint64_t offset
,
560 uint64_t size
, int sync
)
562 uint32_t blocksize
= zv
->zv_volblocksize
;
563 zilog_t
*zilog
= zv
->zv_zilog
;
565 ssize_t immediate_write_sz
;
567 if (zil_replaying(zilog
, tx
))
570 immediate_write_sz
= (zilog
->zl_logbias
== ZFS_LOGBIAS_THROUGHPUT
)
571 ? 0 : zvol_immediate_write_sz
;
572 slogging
= spa_has_slogs(zilog
->zl_spa
) &&
573 (zilog
->zl_logbias
== ZFS_LOGBIAS_LATENCY
);
579 itx_wr_state_t write_state
;
582 * Unlike zfs_log_write() we can be called with
583 * up to DMU_MAX_ACCESS/2 (5MB) writes.
585 if (blocksize
> immediate_write_sz
&& !slogging
&&
586 size
>= blocksize
&& offset
% blocksize
== 0) {
587 write_state
= WR_INDIRECT
; /* uses dmu_sync */
590 write_state
= WR_COPIED
;
591 len
= MIN(ZIL_MAX_LOG_DATA
, size
);
593 write_state
= WR_NEED_COPY
;
594 len
= MIN(ZIL_MAX_LOG_DATA
, size
);
597 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
) +
598 (write_state
== WR_COPIED
? len
: 0));
599 lr
= (lr_write_t
*)&itx
->itx_lr
;
600 if (write_state
== WR_COPIED
&& dmu_read(zv
->zv_objset
,
601 ZVOL_OBJ
, offset
, len
, lr
+1, DMU_READ_NO_PREFETCH
) != 0) {
602 zil_itx_destroy(itx
);
603 itx
= zil_itx_create(TX_WRITE
, sizeof (*lr
));
604 lr
= (lr_write_t
*)&itx
->itx_lr
;
605 write_state
= WR_NEED_COPY
;
608 itx
->itx_wr_state
= write_state
;
609 if (write_state
== WR_NEED_COPY
)
611 lr
->lr_foid
= ZVOL_OBJ
;
612 lr
->lr_offset
= offset
;
615 BP_ZERO(&lr
->lr_blkptr
);
617 itx
->itx_private
= zv
;
618 itx
->itx_sync
= sync
;
620 (void) zil_itx_assign(zilog
, itx
, tx
);
628 zvol_write(zvol_state_t
*zv
, uio_t
*uio
, boolean_t sync
)
630 uint64_t volsize
= zv
->zv_volsize
;
634 ASSERT(zv
&& zv
->zv_open_count
> 0);
636 rl
= zfs_range_lock(&zv
->zv_range_lock
, uio
->uio_loffset
,
637 uio
->uio_resid
, RL_WRITER
);
639 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
640 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
641 uint64_t off
= uio
->uio_loffset
;
642 dmu_tx_t
*tx
= dmu_tx_create(zv
->zv_objset
);
644 if (bytes
> volsize
- off
) /* don't write past the end */
645 bytes
= volsize
- off
;
647 dmu_tx_hold_write(tx
, ZVOL_OBJ
, off
, bytes
);
649 /* This will only fail for ENOSPC */
650 error
= dmu_tx_assign(tx
, TXG_WAIT
);
655 error
= dmu_write_uio_dbuf(zv
->zv_dbuf
, uio
, bytes
, tx
);
657 zvol_log_write(zv
, tx
, off
, bytes
, sync
);
663 zfs_range_unlock(rl
);
665 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
670 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
673 zvol_log_truncate(zvol_state_t
*zv
, dmu_tx_t
*tx
, uint64_t off
, uint64_t len
,
678 zilog_t
*zilog
= zv
->zv_zilog
;
680 if (zil_replaying(zilog
, tx
))
683 itx
= zil_itx_create(TX_TRUNCATE
, sizeof (*lr
));
684 lr
= (lr_truncate_t
*)&itx
->itx_lr
;
685 lr
->lr_foid
= ZVOL_OBJ
;
689 itx
->itx_sync
= sync
;
690 zil_itx_assign(zilog
, itx
, tx
);
694 zvol_discard(struct bio
*bio
)
696 zvol_state_t
*zv
= bio
->bi_bdev
->bd_disk
->private_data
;
697 uint64_t start
= BIO_BI_SECTOR(bio
) << 9;
698 uint64_t size
= BIO_BI_SIZE(bio
);
699 uint64_t end
= start
+ size
;
704 ASSERT(zv
&& zv
->zv_open_count
> 0);
706 if (end
> zv
->zv_volsize
)
707 return (SET_ERROR(EIO
));
710 * Align the request to volume block boundaries when a secure erase is
711 * not required. This will prevent dnode_free_range() from zeroing out
712 * the unaligned parts which is slow (read-modify-write) and useless
713 * since we are not freeing any space by doing so.
715 if (!bio_is_secure_erase(bio
)) {
716 start
= P2ROUNDUP(start
, zv
->zv_volblocksize
);
717 end
= P2ALIGN(end
, zv
->zv_volblocksize
);
724 rl
= zfs_range_lock(&zv
->zv_range_lock
, start
, size
, RL_WRITER
);
725 tx
= dmu_tx_create(zv
->zv_objset
);
726 dmu_tx_mark_netfree(tx
);
727 error
= dmu_tx_assign(tx
, TXG_WAIT
);
731 zvol_log_truncate(zv
, tx
, start
, size
, B_TRUE
);
733 error
= dmu_free_long_range(zv
->zv_objset
,
734 ZVOL_OBJ
, start
, size
);
737 zfs_range_unlock(rl
);
743 zvol_read(zvol_state_t
*zv
, uio_t
*uio
)
745 uint64_t volsize
= zv
->zv_volsize
;
749 ASSERT(zv
&& zv
->zv_open_count
> 0);
751 rl
= zfs_range_lock(&zv
->zv_range_lock
, uio
->uio_loffset
,
752 uio
->uio_resid
, RL_READER
);
753 while (uio
->uio_resid
> 0 && uio
->uio_loffset
< volsize
) {
754 uint64_t bytes
= MIN(uio
->uio_resid
, DMU_MAX_ACCESS
>> 1);
756 /* don't read past the end */
757 if (bytes
> volsize
- uio
->uio_loffset
)
758 bytes
= volsize
- uio
->uio_loffset
;
760 error
= dmu_read_uio_dbuf(zv
->zv_dbuf
, uio
, bytes
);
762 /* convert checksum errors into IO errors */
764 error
= SET_ERROR(EIO
);
768 zfs_range_unlock(rl
);
772 static MAKE_REQUEST_FN_RET
773 zvol_request(struct request_queue
*q
, struct bio
*bio
)
776 zvol_state_t
*zv
= q
->queuedata
;
777 fstrans_cookie_t cookie
= spl_fstrans_mark();
778 int rw
= bio_data_dir(bio
);
779 #ifdef HAVE_GENERIC_IO_ACCT
780 unsigned long start
= jiffies
;
784 uio
.uio_bvec
= &bio
->bi_io_vec
[BIO_BI_IDX(bio
)];
785 uio
.uio_skip
= BIO_BI_SKIP(bio
);
786 uio
.uio_resid
= BIO_BI_SIZE(bio
);
787 uio
.uio_iovcnt
= bio
->bi_vcnt
- BIO_BI_IDX(bio
);
788 uio
.uio_loffset
= BIO_BI_SECTOR(bio
) << 9;
789 uio
.uio_limit
= MAXOFFSET_T
;
790 uio
.uio_segflg
= UIO_BVEC
;
792 if (bio_has_data(bio
) && uio
.uio_loffset
+ uio
.uio_resid
>
795 "%s: bad access: offset=%llu, size=%lu\n",
796 zv
->zv_disk
->disk_name
,
797 (long long unsigned)uio
.uio_loffset
,
798 (long unsigned)uio
.uio_resid
);
799 error
= SET_ERROR(EIO
);
803 generic_start_io_acct(rw
, bio_sectors(bio
), &zv
->zv_disk
->part0
);
806 if (unlikely(zv
->zv_flags
& ZVOL_RDONLY
)) {
807 error
= SET_ERROR(EROFS
);
811 if (bio_is_discard(bio
) || bio_is_secure_erase(bio
)) {
812 error
= zvol_discard(bio
);
817 * Some requests are just for flush and nothing else.
819 if (uio
.uio_resid
== 0) {
820 if (bio_is_flush(bio
))
821 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
825 error
= zvol_write(zv
, &uio
,
826 bio_is_flush(bio
) || bio_is_fua(bio
) ||
827 zv
->zv_objset
->os_sync
== ZFS_SYNC_ALWAYS
);
829 error
= zvol_read(zv
, &uio
);
832 generic_end_io_acct(rw
, &zv
->zv_disk
->part0
, start
);
834 BIO_END_IO(bio
, -error
);
835 spl_fstrans_unmark(cookie
);
836 #ifdef HAVE_MAKE_REQUEST_FN_RET_INT
838 #elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
839 return (BLK_QC_T_NONE
);
844 zvol_get_done(zgd_t
*zgd
, int error
)
847 dmu_buf_rele(zgd
->zgd_db
, zgd
);
849 zfs_range_unlock(zgd
->zgd_rl
);
851 if (error
== 0 && zgd
->zgd_bp
)
852 zil_add_block(zgd
->zgd_zilog
, zgd
->zgd_bp
);
854 kmem_free(zgd
, sizeof (zgd_t
));
858 * Get data to generate a TX_WRITE intent log record.
861 zvol_get_data(void *arg
, lr_write_t
*lr
, char *buf
, zio_t
*zio
)
863 zvol_state_t
*zv
= arg
;
864 objset_t
*os
= zv
->zv_objset
;
865 uint64_t object
= ZVOL_OBJ
;
866 uint64_t offset
= lr
->lr_offset
;
867 uint64_t size
= lr
->lr_length
;
868 blkptr_t
*bp
= &lr
->lr_blkptr
;
876 zgd
= (zgd_t
*)kmem_zalloc(sizeof (zgd_t
), KM_SLEEP
);
877 zgd
->zgd_zilog
= zv
->zv_zilog
;
878 zgd
->zgd_rl
= zfs_range_lock(&zv
->zv_range_lock
, offset
, size
,
882 * Write records come in two flavors: immediate and indirect.
883 * For small writes it's cheaper to store the data with the
884 * log record (immediate); for large writes it's cheaper to
885 * sync the data and get a pointer to it (indirect) so that
886 * we don't have to write the data twice.
888 if (buf
!= NULL
) { /* immediate write */
889 error
= dmu_read(os
, object
, offset
, size
, buf
,
890 DMU_READ_NO_PREFETCH
);
892 size
= zv
->zv_volblocksize
;
893 offset
= P2ALIGN_TYPED(offset
, size
, uint64_t);
894 error
= dmu_buf_hold(os
, object
, offset
, zgd
, &db
,
895 DMU_READ_NO_PREFETCH
);
897 blkptr_t
*obp
= dmu_buf_get_blkptr(db
);
899 ASSERT(BP_IS_HOLE(bp
));
904 zgd
->zgd_bp
= &lr
->lr_blkptr
;
907 ASSERT(db
->db_offset
== offset
);
908 ASSERT(db
->db_size
== size
);
910 error
= dmu_sync(zio
, lr
->lr_common
.lrc_txg
,
918 zvol_get_done(zgd
, error
);
920 return (SET_ERROR(error
));
924 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
927 zvol_insert(zvol_state_t
*zv_insert
)
929 zvol_state_t
*zv
= NULL
;
931 ASSERT(MUTEX_HELD(&zvol_state_lock
));
932 ASSERT3U(MINOR(zv_insert
->zv_dev
) & ZVOL_MINOR_MASK
, ==, 0);
933 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
;
934 zv
= list_next(&zvol_state_list
, zv
)) {
935 if (MINOR(zv
->zv_dev
) > MINOR(zv_insert
->zv_dev
))
939 list_insert_before(&zvol_state_list
, zv
, zv_insert
);
943 * Simply remove the zvol from to list of zvols.
946 zvol_remove(zvol_state_t
*zv_remove
)
948 ASSERT(MUTEX_HELD(&zvol_state_lock
));
949 list_remove(&zvol_state_list
, zv_remove
);
953 zvol_first_open(zvol_state_t
*zv
)
960 /* lie and say we're read-only */
961 error
= dmu_objset_own(zv
->zv_name
, DMU_OST_ZVOL
, 1, zvol_tag
, &os
);
963 return (SET_ERROR(-error
));
967 error
= dsl_prop_get_integer(zv
->zv_name
, "readonly", &ro
, NULL
);
971 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
975 error
= dmu_bonus_hold(os
, ZVOL_OBJ
, zvol_tag
, &zv
->zv_dbuf
);
979 set_capacity(zv
->zv_disk
, volsize
>> 9);
980 zv
->zv_volsize
= volsize
;
981 zv
->zv_zilog
= zil_open(os
, zvol_get_data
);
983 if (ro
|| dmu_objset_is_snapshot(os
) ||
984 !spa_writeable(dmu_objset_spa(os
))) {
985 set_disk_ro(zv
->zv_disk
, 1);
986 zv
->zv_flags
|= ZVOL_RDONLY
;
988 set_disk_ro(zv
->zv_disk
, 0);
989 zv
->zv_flags
&= ~ZVOL_RDONLY
;
994 dmu_objset_disown(os
, zvol_tag
);
995 zv
->zv_objset
= NULL
;
998 return (SET_ERROR(-error
));
1002 zvol_last_close(zvol_state_t
*zv
)
1004 zil_close(zv
->zv_zilog
);
1005 zv
->zv_zilog
= NULL
;
1007 dmu_buf_rele(zv
->zv_dbuf
, zvol_tag
);
1013 if (dsl_dataset_is_dirty(dmu_objset_ds(zv
->zv_objset
)) &&
1014 !(zv
->zv_flags
& ZVOL_RDONLY
))
1015 txg_wait_synced(dmu_objset_pool(zv
->zv_objset
), 0);
1016 (void) dmu_objset_evict_dbufs(zv
->zv_objset
);
1018 dmu_objset_disown(zv
->zv_objset
, zvol_tag
);
1019 zv
->zv_objset
= NULL
;
1023 zvol_open(struct block_device
*bdev
, fmode_t flag
)
1026 int error
= 0, drop_mutex
= 0;
1029 * If the caller is already holding the mutex do not take it
1030 * again, this will happen as part of zvol_create_minor_impl().
1031 * Once add_disk() is called the device is live and the kernel
1032 * will attempt to open it to read the partition information.
1034 if (!mutex_owned(&zvol_state_lock
)) {
1035 mutex_enter(&zvol_state_lock
);
1040 * Obtain a copy of private_data under the lock to make sure
1041 * that either the result of zvol_freeg() setting
1042 * bdev->bd_disk->private_data to NULL is observed, or zvol_free()
1043 * is not called on this zv because of the positive zv_open_count.
1045 zv
= bdev
->bd_disk
->private_data
;
1051 if (zv
->zv_open_count
== 0) {
1052 error
= zvol_first_open(zv
);
1057 if ((flag
& FMODE_WRITE
) && (zv
->zv_flags
& ZVOL_RDONLY
)) {
1059 goto out_open_count
;
1062 zv
->zv_open_count
++;
1064 check_disk_change(bdev
);
1067 if (zv
->zv_open_count
== 0)
1068 zvol_last_close(zv
);
1072 mutex_exit(&zvol_state_lock
);
1074 return (SET_ERROR(error
));
1077 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1082 zvol_release(struct gendisk
*disk
, fmode_t mode
)
1084 zvol_state_t
*zv
= disk
->private_data
;
1087 ASSERT(zv
&& zv
->zv_open_count
> 0);
1089 if (!mutex_owned(&zvol_state_lock
)) {
1090 mutex_enter(&zvol_state_lock
);
1094 zv
->zv_open_count
--;
1095 if (zv
->zv_open_count
== 0)
1096 zvol_last_close(zv
);
1099 mutex_exit(&zvol_state_lock
);
1101 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1107 zvol_ioctl(struct block_device
*bdev
, fmode_t mode
,
1108 unsigned int cmd
, unsigned long arg
)
1110 zvol_state_t
*zv
= bdev
->bd_disk
->private_data
;
1113 ASSERT(zv
&& zv
->zv_open_count
> 0);
1117 zil_commit(zv
->zv_zilog
, ZVOL_OBJ
);
1120 error
= copy_to_user((void *)arg
, zv
->zv_name
, MAXNAMELEN
);
1129 return (SET_ERROR(error
));
1132 #ifdef CONFIG_COMPAT
1134 zvol_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1135 unsigned cmd
, unsigned long arg
)
1137 return (zvol_ioctl(bdev
, mode
, cmd
, arg
));
1140 #define zvol_compat_ioctl NULL
1143 static int zvol_media_changed(struct gendisk
*disk
)
1145 zvol_state_t
*zv
= disk
->private_data
;
1147 ASSERT(zv
&& zv
->zv_open_count
> 0);
1149 return (zv
->zv_changed
);
1152 static int zvol_revalidate_disk(struct gendisk
*disk
)
1154 zvol_state_t
*zv
= disk
->private_data
;
1156 ASSERT(zv
&& zv
->zv_open_count
> 0);
1159 set_capacity(zv
->zv_disk
, zv
->zv_volsize
>> 9);
1165 * Provide a simple virtual geometry for legacy compatibility. For devices
1166 * smaller than 1 MiB a small head and sector count is used to allow very
1167 * tiny devices. For devices over 1 Mib a standard head and sector count
1168 * is used to keep the cylinders count reasonable.
1171 zvol_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1173 zvol_state_t
*zv
= bdev
->bd_disk
->private_data
;
1176 ASSERT(zv
&& zv
->zv_open_count
> 0);
1178 sectors
= get_capacity(zv
->zv_disk
);
1180 if (sectors
> 2048) {
1189 geo
->cylinders
= sectors
/ (geo
->heads
* geo
->sectors
);
1194 static struct kobject
*
1195 zvol_probe(dev_t dev
, int *part
, void *arg
)
1198 struct kobject
*kobj
;
1200 mutex_enter(&zvol_state_lock
);
1201 zv
= zvol_find_by_dev(dev
);
1202 kobj
= zv
? get_disk(zv
->zv_disk
) : NULL
;
1203 mutex_exit(&zvol_state_lock
);
1208 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1209 static struct block_device_operations zvol_ops
= {
1211 .release
= zvol_release
,
1212 .ioctl
= zvol_ioctl
,
1213 .compat_ioctl
= zvol_compat_ioctl
,
1214 .media_changed
= zvol_media_changed
,
1215 .revalidate_disk
= zvol_revalidate_disk
,
1216 .getgeo
= zvol_getgeo
,
1217 .owner
= THIS_MODULE
,
1220 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1223 zvol_open_by_inode(struct inode
*inode
, struct file
*file
)
1225 return (zvol_open(inode
->i_bdev
, file
->f_mode
));
1229 zvol_release_by_inode(struct inode
*inode
, struct file
*file
)
1231 return (zvol_release(inode
->i_bdev
->bd_disk
, file
->f_mode
));
1235 zvol_ioctl_by_inode(struct inode
*inode
, struct file
*file
,
1236 unsigned int cmd
, unsigned long arg
)
1238 if (file
== NULL
|| inode
== NULL
)
1239 return (SET_ERROR(-EINVAL
));
1241 return (zvol_ioctl(inode
->i_bdev
, file
->f_mode
, cmd
, arg
));
1244 #ifdef CONFIG_COMPAT
1246 zvol_compat_ioctl_by_inode(struct file
*file
,
1247 unsigned int cmd
, unsigned long arg
)
1250 return (SET_ERROR(-EINVAL
));
1252 return (zvol_compat_ioctl(file
->f_dentry
->d_inode
->i_bdev
,
1253 file
->f_mode
, cmd
, arg
));
1256 #define zvol_compat_ioctl_by_inode NULL
1259 static struct block_device_operations zvol_ops
= {
1260 .open
= zvol_open_by_inode
,
1261 .release
= zvol_release_by_inode
,
1262 .ioctl
= zvol_ioctl_by_inode
,
1263 .compat_ioctl
= zvol_compat_ioctl_by_inode
,
1264 .media_changed
= zvol_media_changed
,
1265 .revalidate_disk
= zvol_revalidate_disk
,
1266 .getgeo
= zvol_getgeo
,
1267 .owner
= THIS_MODULE
,
1269 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1272 * Allocate memory for a new zvol_state_t and setup the required
1273 * request queue and generic disk structures for the block device.
1275 static zvol_state_t
*
1276 zvol_alloc(dev_t dev
, const char *name
)
1280 zv
= kmem_zalloc(sizeof (zvol_state_t
), KM_SLEEP
);
1282 list_link_init(&zv
->zv_next
);
1284 zv
->zv_queue
= blk_alloc_queue(GFP_ATOMIC
);
1285 if (zv
->zv_queue
== NULL
)
1288 blk_queue_make_request(zv
->zv_queue
, zvol_request
);
1289 blk_queue_set_write_cache(zv
->zv_queue
, B_TRUE
, B_TRUE
);
1291 zv
->zv_disk
= alloc_disk(ZVOL_MINORS
);
1292 if (zv
->zv_disk
== NULL
)
1295 zv
->zv_queue
->queuedata
= zv
;
1297 zv
->zv_open_count
= 0;
1298 strlcpy(zv
->zv_name
, name
, MAXNAMELEN
);
1300 zfs_rlock_init(&zv
->zv_range_lock
);
1302 zv
->zv_disk
->major
= zvol_major
;
1303 zv
->zv_disk
->first_minor
= (dev
& MINORMASK
);
1304 zv
->zv_disk
->fops
= &zvol_ops
;
1305 zv
->zv_disk
->private_data
= zv
;
1306 zv
->zv_disk
->queue
= zv
->zv_queue
;
1307 snprintf(zv
->zv_disk
->disk_name
, DISK_NAME_LEN
, "%s%d",
1308 ZVOL_DEV_NAME
, (dev
& MINORMASK
));
1313 blk_cleanup_queue(zv
->zv_queue
);
1315 kmem_free(zv
, sizeof (zvol_state_t
));
1321 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1324 zvol_free(zvol_state_t
*zv
)
1326 ASSERT(MUTEX_HELD(&zvol_state_lock
));
1327 ASSERT(zv
->zv_open_count
== 0);
1329 zfs_rlock_destroy(&zv
->zv_range_lock
);
1331 zv
->zv_disk
->private_data
= NULL
;
1333 del_gendisk(zv
->zv_disk
);
1334 blk_cleanup_queue(zv
->zv_queue
);
1335 put_disk(zv
->zv_disk
);
1337 kmem_free(zv
, sizeof (zvol_state_t
));
1341 * Create a block device minor node and setup the linkage between it
1342 * and the specified volume. Once this function returns the block
1343 * device is live and ready for use.
1346 zvol_create_minor_impl(const char *name
)
1350 dmu_object_info_t
*doi
;
1356 mutex_enter(&zvol_state_lock
);
1358 zv
= zvol_find_by_name(name
);
1360 error
= SET_ERROR(EEXIST
);
1364 doi
= kmem_alloc(sizeof (dmu_object_info_t
), KM_SLEEP
);
1366 error
= dmu_objset_own(name
, DMU_OST_ZVOL
, B_TRUE
, zvol_tag
, &os
);
1370 error
= dmu_object_info(os
, ZVOL_OBJ
, doi
);
1372 goto out_dmu_objset_disown
;
1374 error
= zap_lookup(os
, ZVOL_ZAP_OBJ
, "size", 8, 1, &volsize
);
1376 goto out_dmu_objset_disown
;
1378 error
= zvol_find_minor(&minor
);
1380 goto out_dmu_objset_disown
;
1382 zv
= zvol_alloc(MKDEV(zvol_major
, minor
), name
);
1384 error
= SET_ERROR(EAGAIN
);
1385 goto out_dmu_objset_disown
;
1388 if (dmu_objset_is_snapshot(os
))
1389 zv
->zv_flags
|= ZVOL_RDONLY
;
1391 zv
->zv_volblocksize
= doi
->doi_data_block_size
;
1392 zv
->zv_volsize
= volsize
;
1395 set_capacity(zv
->zv_disk
, zv
->zv_volsize
>> 9);
1397 blk_queue_max_hw_sectors(zv
->zv_queue
, (DMU_MAX_ACCESS
/ 4) >> 9);
1398 blk_queue_max_segments(zv
->zv_queue
, UINT16_MAX
);
1399 blk_queue_max_segment_size(zv
->zv_queue
, UINT_MAX
);
1400 blk_queue_physical_block_size(zv
->zv_queue
, zv
->zv_volblocksize
);
1401 blk_queue_io_opt(zv
->zv_queue
, zv
->zv_volblocksize
);
1402 blk_queue_max_discard_sectors(zv
->zv_queue
,
1403 (zvol_max_discard_blocks
* zv
->zv_volblocksize
) >> 9);
1404 blk_queue_discard_granularity(zv
->zv_queue
, zv
->zv_volblocksize
);
1405 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, zv
->zv_queue
);
1406 #ifdef QUEUE_FLAG_NONROT
1407 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, zv
->zv_queue
);
1409 #ifdef QUEUE_FLAG_ADD_RANDOM
1410 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM
, zv
->zv_queue
);
1413 if (spa_writeable(dmu_objset_spa(os
))) {
1414 if (zil_replay_disable
)
1415 zil_destroy(dmu_objset_zil(os
), B_FALSE
);
1417 zil_replay(os
, zv
, zvol_replay_vector
);
1421 * When udev detects the addition of the device it will immediately
1422 * invoke blkid(8) to determine the type of content on the device.
1423 * Prefetching the blocks commonly scanned by blkid(8) will speed
1426 len
= MIN(MAX(zvol_prefetch_bytes
, 0), SPA_MAXBLOCKSIZE
);
1428 dmu_prefetch(os
, ZVOL_OBJ
, 0, 0, len
, ZIO_PRIORITY_SYNC_READ
);
1429 dmu_prefetch(os
, ZVOL_OBJ
, 0, volsize
- len
, len
,
1430 ZIO_PRIORITY_SYNC_READ
);
1433 zv
->zv_objset
= NULL
;
1434 out_dmu_objset_disown
:
1435 dmu_objset_disown(os
, zvol_tag
);
1437 kmem_free(doi
, sizeof (dmu_object_info_t
));
1443 * Drop the lock to prevent deadlock with sys_open() ->
1444 * zvol_open(), which first takes bd_disk->bd_mutex and then
1445 * takes zvol_state_lock, whereas this code path first takes
1446 * zvol_state_lock, and then takes bd_disk->bd_mutex.
1448 mutex_exit(&zvol_state_lock
);
1449 add_disk(zv
->zv_disk
);
1451 mutex_exit(&zvol_state_lock
);
1454 return (SET_ERROR(error
));
1458 * Rename a block device minor mode for the specified volume.
1461 zvol_rename_minor(zvol_state_t
*zv
, const char *newname
)
1463 int readonly
= get_disk_ro(zv
->zv_disk
);
1465 ASSERT(MUTEX_HELD(&zvol_state_lock
));
1467 strlcpy(zv
->zv_name
, newname
, sizeof (zv
->zv_name
));
1470 * The block device's read-only state is briefly changed causing
1471 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1472 * the name change and fixes the symlinks. This does not change
1473 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1474 * changes. This would normally be done using kobject_uevent() but
1475 * that is a GPL-only symbol which is why we need this workaround.
1477 set_disk_ro(zv
->zv_disk
, !readonly
);
1478 set_disk_ro(zv
->zv_disk
, readonly
);
1483 * Mask errors to continue dmu_objset_find() traversal
1486 zvol_create_snap_minor_cb(const char *dsname
, void *arg
)
1488 const char *name
= (const char *)arg
;
1490 ASSERT0(MUTEX_HELD(&spa_namespace_lock
));
1492 /* skip the designated dataset */
1493 if (name
&& strcmp(dsname
, name
) == 0)
1496 /* at this point, the dsname should name a snapshot */
1497 if (strchr(dsname
, '@') == 0) {
1498 dprintf("zvol_create_snap_minor_cb(): "
1499 "%s is not a shapshot name\n", dsname
);
1501 (void) zvol_create_minor_impl(dsname
);
1508 * Mask errors to continue dmu_objset_find() traversal
1511 zvol_create_minors_cb(const char *dsname
, void *arg
)
1516 ASSERT0(MUTEX_HELD(&spa_namespace_lock
));
1518 error
= dsl_prop_get_integer(dsname
, "snapdev", &snapdev
, NULL
);
1523 * Given the name and the 'snapdev' property, create device minor nodes
1524 * with the linkages to zvols/snapshots as needed.
1525 * If the name represents a zvol, create a minor node for the zvol, then
1526 * check if its snapshots are 'visible', and if so, iterate over the
1527 * snapshots and create device minor nodes for those.
1529 if (strchr(dsname
, '@') == 0) {
1530 /* create minor for the 'dsname' explicitly */
1531 error
= zvol_create_minor_impl(dsname
);
1532 if ((error
== 0 || error
== EEXIST
) &&
1533 (snapdev
== ZFS_SNAPDEV_VISIBLE
)) {
1534 fstrans_cookie_t cookie
= spl_fstrans_mark();
1536 * traverse snapshots only, do not traverse children,
1537 * and skip the 'dsname'
1539 error
= dmu_objset_find((char *)dsname
,
1540 zvol_create_snap_minor_cb
, (void *)dsname
,
1542 spl_fstrans_unmark(cookie
);
1545 dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
1553 * Create minors for the specified dataset, including children and snapshots.
1554 * Pay attention to the 'snapdev' property and iterate over the snapshots
1555 * only if they are 'visible'. This approach allows one to assure that the
1556 * snapshot metadata is read from disk only if it is needed.
1558 * The name can represent a dataset to be recursively scanned for zvols and
1559 * their snapshots, or a single zvol snapshot. If the name represents a
1560 * dataset, the scan is performed in two nested stages:
1561 * - scan the dataset for zvols, and
1562 * - for each zvol, create a minor node, then check if the zvol's snapshots
1563 * are 'visible', and only then iterate over the snapshots if needed
1565 * If the name represents a snapshot, a check is perfromed if the snapshot is
1566 * 'visible' (which also verifies that the parent is a zvol), and if so,
1567 * a minor node for that snapshot is created.
1570 zvol_create_minors_impl(const char *name
)
1573 fstrans_cookie_t cookie
;
1576 if (zvol_inhibit_dev
)
1579 parent
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
1580 (void) strlcpy(parent
, name
, MAXPATHLEN
);
1582 if ((atp
= strrchr(parent
, '@')) != NULL
) {
1586 error
= dsl_prop_get_integer(parent
, "snapdev",
1589 if (error
== 0 && snapdev
== ZFS_SNAPDEV_VISIBLE
)
1590 error
= zvol_create_minor_impl(name
);
1592 cookie
= spl_fstrans_mark();
1593 error
= dmu_objset_find(parent
, zvol_create_minors_cb
,
1594 NULL
, DS_FIND_CHILDREN
);
1595 spl_fstrans_unmark(cookie
);
1598 kmem_free(parent
, MAXPATHLEN
);
1600 return (SET_ERROR(error
));
1604 * Remove minors for specified dataset including children and snapshots.
1607 zvol_remove_minors_impl(const char *name
)
1609 zvol_state_t
*zv
, *zv_next
;
1610 int namelen
= ((name
) ? strlen(name
) : 0);
1612 if (zvol_inhibit_dev
)
1615 mutex_enter(&zvol_state_lock
);
1617 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
; zv
= zv_next
) {
1618 zv_next
= list_next(&zvol_state_list
, zv
);
1620 if (name
== NULL
|| strcmp(zv
->zv_name
, name
) == 0 ||
1621 (strncmp(zv
->zv_name
, name
, namelen
) == 0 &&
1622 (zv
->zv_name
[namelen
] == '/' ||
1623 zv
->zv_name
[namelen
] == '@'))) {
1625 /* If in use, leave alone */
1626 if (zv
->zv_open_count
> 0)
1634 mutex_exit(&zvol_state_lock
);
1637 /* Remove minor for this specific snapshot only */
1639 zvol_remove_minor_impl(const char *name
)
1641 zvol_state_t
*zv
, *zv_next
;
1643 if (zvol_inhibit_dev
)
1646 if (strchr(name
, '@') == NULL
)
1649 mutex_enter(&zvol_state_lock
);
1651 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
; zv
= zv_next
) {
1652 zv_next
= list_next(&zvol_state_list
, zv
);
1654 if (strcmp(zv
->zv_name
, name
) == 0) {
1655 /* If in use, leave alone */
1656 if (zv
->zv_open_count
> 0)
1664 mutex_exit(&zvol_state_lock
);
1668 * Rename minors for specified dataset including children and snapshots.
1671 zvol_rename_minors_impl(const char *oldname
, const char *newname
)
1673 zvol_state_t
*zv
, *zv_next
;
1674 int oldnamelen
, newnamelen
;
1677 if (zvol_inhibit_dev
)
1680 oldnamelen
= strlen(oldname
);
1681 newnamelen
= strlen(newname
);
1682 name
= kmem_alloc(MAXNAMELEN
, KM_SLEEP
);
1684 mutex_enter(&zvol_state_lock
);
1686 for (zv
= list_head(&zvol_state_list
); zv
!= NULL
; zv
= zv_next
) {
1687 zv_next
= list_next(&zvol_state_list
, zv
);
1689 /* If in use, leave alone */
1690 if (zv
->zv_open_count
> 0)
1693 if (strcmp(zv
->zv_name
, oldname
) == 0) {
1694 zvol_rename_minor(zv
, newname
);
1695 } else if (strncmp(zv
->zv_name
, oldname
, oldnamelen
) == 0 &&
1696 (zv
->zv_name
[oldnamelen
] == '/' ||
1697 zv
->zv_name
[oldnamelen
] == '@')) {
1698 snprintf(name
, MAXNAMELEN
, "%s%c%s", newname
,
1699 zv
->zv_name
[oldnamelen
],
1700 zv
->zv_name
+ oldnamelen
+ 1);
1701 zvol_rename_minor(zv
, name
);
1705 mutex_exit(&zvol_state_lock
);
1707 kmem_free(name
, MAXNAMELEN
);
1710 typedef struct zvol_snapdev_cb_arg
{
1712 } zvol_snapdev_cb_arg_t
;
1715 zvol_set_snapdev_cb(const char *dsname
, void *param
) {
1716 zvol_snapdev_cb_arg_t
*arg
= param
;
1718 if (strchr(dsname
, '@') == NULL
)
1721 switch (arg
->snapdev
) {
1722 case ZFS_SNAPDEV_VISIBLE
:
1723 (void) zvol_create_minor_impl(dsname
);
1725 case ZFS_SNAPDEV_HIDDEN
:
1726 (void) zvol_remove_minor_impl(dsname
);
1734 zvol_set_snapdev_impl(char *name
, uint64_t snapdev
)
1736 zvol_snapdev_cb_arg_t arg
= {snapdev
};
1737 fstrans_cookie_t cookie
= spl_fstrans_mark();
1739 * The zvol_set_snapdev_sync() sets snapdev appropriately
1740 * in the dataset hierarchy. Here, we only scan snapshots.
1742 dmu_objset_find(name
, zvol_set_snapdev_cb
, &arg
, DS_FIND_SNAPSHOTS
);
1743 spl_fstrans_unmark(cookie
);
1746 static zvol_task_t
*
1747 zvol_task_alloc(zvol_async_op_t op
, const char *name1
, const char *name2
,
1753 /* Never allow tasks on hidden names. */
1754 if (name1
[0] == '$')
1757 task
= kmem_zalloc(sizeof (zvol_task_t
), KM_SLEEP
);
1759 task
->snapdev
= snapdev
;
1760 delim
= strchr(name1
, '/');
1761 strlcpy(task
->pool
, name1
, delim
? (delim
- name1
+ 1) : MAXNAMELEN
);
1763 strlcpy(task
->name1
, name1
, MAXNAMELEN
);
1765 strlcpy(task
->name2
, name2
, MAXNAMELEN
);
1771 zvol_task_free(zvol_task_t
*task
)
1773 kmem_free(task
, sizeof (zvol_task_t
));
1777 * The worker thread function performed asynchronously.
1780 zvol_task_cb(void *param
)
1782 zvol_task_t
*task
= (zvol_task_t
*)param
;
1785 case ZVOL_ASYNC_CREATE_MINORS
:
1786 (void) zvol_create_minors_impl(task
->name1
);
1788 case ZVOL_ASYNC_REMOVE_MINORS
:
1789 zvol_remove_minors_impl(task
->name1
);
1791 case ZVOL_ASYNC_RENAME_MINORS
:
1792 zvol_rename_minors_impl(task
->name1
, task
->name2
);
1794 case ZVOL_ASYNC_SET_SNAPDEV
:
1795 zvol_set_snapdev_impl(task
->name1
, task
->snapdev
);
1802 zvol_task_free(task
);
1805 typedef struct zvol_set_snapdev_arg
{
1806 const char *zsda_name
;
1807 uint64_t zsda_value
;
1808 zprop_source_t zsda_source
;
1810 } zvol_set_snapdev_arg_t
;
1813 * Sanity check the dataset for safe use by the sync task. No additional
1814 * conditions are imposed.
1817 zvol_set_snapdev_check(void *arg
, dmu_tx_t
*tx
)
1819 zvol_set_snapdev_arg_t
*zsda
= arg
;
1820 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1824 error
= dsl_dir_hold(dp
, zsda
->zsda_name
, FTAG
, &dd
, NULL
);
1828 dsl_dir_rele(dd
, FTAG
);
1834 zvol_set_snapdev_sync_cb(dsl_pool_t
*dp
, dsl_dataset_t
*ds
, void *arg
)
1836 zvol_set_snapdev_arg_t
*zsda
= arg
;
1837 char dsname
[MAXNAMELEN
];
1840 dsl_dataset_name(ds
, dsname
);
1841 dsl_prop_set_sync_impl(ds
, zfs_prop_to_name(ZFS_PROP_SNAPDEV
),
1842 zsda
->zsda_source
, sizeof (zsda
->zsda_value
), 1,
1843 &zsda
->zsda_value
, zsda
->zsda_tx
);
1845 task
= zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV
, dsname
,
1846 NULL
, zsda
->zsda_value
);
1850 (void) taskq_dispatch(dp
->dp_spa
->spa_zvol_taskq
, zvol_task_cb
,
1856 * Traverse all child snapshot datasets and apply snapdev appropriately.
1859 zvol_set_snapdev_sync(void *arg
, dmu_tx_t
*tx
)
1861 zvol_set_snapdev_arg_t
*zsda
= arg
;
1862 dsl_pool_t
*dp
= dmu_tx_pool(tx
);
1865 VERIFY0(dsl_dir_hold(dp
, zsda
->zsda_name
, FTAG
, &dd
, NULL
));
1868 dmu_objset_find_dp(dp
, dd
->dd_object
, zvol_set_snapdev_sync_cb
,
1869 zsda
, DS_FIND_CHILDREN
);
1871 dsl_dir_rele(dd
, FTAG
);
1875 zvol_set_snapdev(const char *ddname
, zprop_source_t source
, uint64_t snapdev
)
1877 zvol_set_snapdev_arg_t zsda
;
1879 zsda
.zsda_name
= ddname
;
1880 zsda
.zsda_source
= source
;
1881 zsda
.zsda_value
= snapdev
;
1883 return (dsl_sync_task(ddname
, zvol_set_snapdev_check
,
1884 zvol_set_snapdev_sync
, &zsda
, 0, ZFS_SPACE_CHECK_NONE
));
1888 zvol_create_minors(spa_t
*spa
, const char *name
, boolean_t async
)
1893 task
= zvol_task_alloc(ZVOL_ASYNC_CREATE_MINORS
, name
, NULL
, ~0ULL);
1897 id
= taskq_dispatch(spa
->spa_zvol_taskq
, zvol_task_cb
, task
, TQ_SLEEP
);
1898 if ((async
== B_FALSE
) && (id
!= TASKQID_INVALID
))
1899 taskq_wait_id(spa
->spa_zvol_taskq
, id
);
1903 zvol_remove_minors(spa_t
*spa
, const char *name
, boolean_t async
)
1908 task
= zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS
, name
, NULL
, ~0ULL);
1912 id
= taskq_dispatch(spa
->spa_zvol_taskq
, zvol_task_cb
, task
, TQ_SLEEP
);
1913 if ((async
== B_FALSE
) && (id
!= TASKQID_INVALID
))
1914 taskq_wait_id(spa
->spa_zvol_taskq
, id
);
1918 zvol_rename_minors(spa_t
*spa
, const char *name1
, const char *name2
,
1924 task
= zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS
, name1
, name2
, ~0ULL);
1928 id
= taskq_dispatch(spa
->spa_zvol_taskq
, zvol_task_cb
, task
, TQ_SLEEP
);
1929 if ((async
== B_FALSE
) && (id
!= TASKQID_INVALID
))
1930 taskq_wait_id(spa
->spa_zvol_taskq
, id
);
1938 list_create(&zvol_state_list
, sizeof (zvol_state_t
),
1939 offsetof(zvol_state_t
, zv_next
));
1940 mutex_init(&zvol_state_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1942 error
= register_blkdev(zvol_major
, ZVOL_DRIVER
);
1944 printk(KERN_INFO
"ZFS: register_blkdev() failed %d\n", error
);
1948 blk_register_region(MKDEV(zvol_major
, 0), 1UL << MINORBITS
,
1949 THIS_MODULE
, zvol_probe
, NULL
, NULL
);
1954 mutex_destroy(&zvol_state_lock
);
1955 list_destroy(&zvol_state_list
);
1957 return (SET_ERROR(error
));
1963 zvol_remove_minors_impl(NULL
);
1965 blk_unregister_region(MKDEV(zvol_major
, 0), 1UL << MINORBITS
);
1966 unregister_blkdev(zvol_major
, ZVOL_DRIVER
);
1968 list_destroy(&zvol_state_list
);
1969 mutex_destroy(&zvol_state_lock
);
1972 module_param(zvol_inhibit_dev
, uint
, 0644);
1973 MODULE_PARM_DESC(zvol_inhibit_dev
, "Do not create zvol device nodes");
1975 module_param(zvol_major
, uint
, 0444);
1976 MODULE_PARM_DESC(zvol_major
, "Major number for zvol device");
1978 module_param(zvol_max_discard_blocks
, ulong
, 0444);
1979 MODULE_PARM_DESC(zvol_max_discard_blocks
, "Max number of blocks to discard");
1981 module_param(zvol_prefetch_bytes
, uint
, 0644);
1982 MODULE_PARM_DESC(zvol_prefetch_bytes
, "Prefetch N bytes at zvol start+end");