]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zvol.c
OpenZFS 8155 - simplify dmu_write_policy handling of pre-compressed buffers
[mirror_zfs.git] / module / zfs / zvol.c
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
460a0213
DM
36 *
37 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
5428dc51 38 * Copyright (c) 2016 Actifio, Inc. All rights reserved.
60101509
BB
39 */
40
5559ba09
BP
41/*
42 * Note on locking of zvol state structures.
43 *
44 * These structures are used to maintain internal state used to emulate block
45 * devices on top of zvols. In particular, management of device minor number
46 * operations - create, remove, rename, and set_snapdev - involves access to
47 * these structures. The zvol_state_lock is primarily used to protect the
48 * zvol_state_list. The zv->zv_state_lock is used to protect the contents
49 * of the zvol_state_t structures, as well as to make sure that when the
50 * time comes to remove the structure from the list, it is not in use, and
51 * therefore, it can be taken off zvol_state_list and freed.
52 *
53 * The minor operations are issued to the spa->spa_zvol_taskq quues, that are
54 * single-threaded (to preserve order of minor operations), and are executed
55 * through the zvol_task_cb that dispatches the specific operations. Therefore,
56 * these operations are serialized per pool. Consequently, we can be certain
57 * that for a given zvol, there is only one operation at a time in progress.
58 * That is why one can be sure that first, zvol_state_t for a given zvol is
59 * allocated and placed on zvol_state_list, and then other minor operations
60 * for this zvol are going to proceed in the order of issue.
61 *
62 * It is also worth keeping in mind that once add_disk() is called, the zvol is
63 * announced to the world, and zvol_open()/zvol_release() can be called at any
64 * time. Incidentally, add_disk() itself calls zvol_open()->zvol_first_open()
65 * and zvol_release()->zvol_last_close() directly as well.
66 */
67
03c6040b 68#include <sys/dbuf.h>
60101509
BB
69#include <sys/dmu_traverse.h>
70#include <sys/dsl_dataset.h>
71#include <sys/dsl_prop.h>
a0bd735a 72#include <sys/dsl_dir.h>
60101509 73#include <sys/zap.h>
4cb7b9c5 74#include <sys/zfeature.h>
60101509 75#include <sys/zil_impl.h>
460a0213 76#include <sys/dmu_tx.h>
60101509
BB
77#include <sys/zio.h>
78#include <sys/zfs_rlock.h>
79#include <sys/zfs_znode.h>
a0bd735a 80#include <sys/spa_impl.h>
60101509 81#include <sys/zvol.h>
61e90960 82#include <linux/blkdev_compat.h>
60101509 83
74497b7a 84unsigned int zvol_inhibit_dev = 0;
60101509 85unsigned int zvol_major = ZVOL_MAJOR;
692e55b8 86unsigned int zvol_threads = 32;
8fa5250f 87unsigned int zvol_request_sync = 0;
9965059a 88unsigned int zvol_prefetch_bytes = (128 * 1024);
7c0e5708 89unsigned long zvol_max_discard_blocks = 16384;
60101509 90
692e55b8 91static taskq_t *zvol_taskq;
60101509
BB
92static kmutex_t zvol_state_lock;
93static list_t zvol_state_list;
60101509 94
d45e010d
CC
95#define ZVOL_HT_SIZE 1024
96static struct hlist_head *zvol_htable;
97#define ZVOL_HT_HEAD(hash) (&zvol_htable[(hash) & (ZVOL_HT_SIZE-1)])
4a5d7f82
MA
98
99static struct ida zvol_ida;
d45e010d 100
60101509
BB
101/*
102 * The in-core state of each volume.
103 */
040dab99 104struct zvol_state {
4c0d8e50 105 char zv_name[MAXNAMELEN]; /* name */
ce37ebd2
BB
106 uint64_t zv_volsize; /* advertised space */
107 uint64_t zv_volblocksize; /* volume block size */
60101509
BB
108 objset_t *zv_objset; /* objset handle */
109 uint32_t zv_flags; /* ZVOL_* flags */
110 uint32_t zv_open_count; /* open counts */
111 uint32_t zv_changed; /* disk changed */
112 zilog_t *zv_zilog; /* ZIL handle */
d88895a0 113 zfs_rlock_t zv_range_lock; /* range lock */
60101509
BB
114 dmu_buf_t *zv_dbuf; /* bonus handle */
115 dev_t zv_dev; /* device id */
116 struct gendisk *zv_disk; /* generic disk */
117 struct request_queue *zv_queue; /* request queue */
60101509 118 list_node_t zv_next; /* next zvol_state_t linkage */
d45e010d
CC
119 uint64_t zv_hash; /* name hash */
120 struct hlist_node zv_hlink; /* hash link */
5559ba09 121 kmutex_t zv_state_lock; /* protects zvol_state_t */
040dab99
CC
122 atomic_t zv_suspend_ref; /* refcount for suspend */
123 krwlock_t zv_suspend_lock; /* suspend lock */
124};
60101509 125
a0bd735a
BP
126typedef enum {
127 ZVOL_ASYNC_CREATE_MINORS,
128 ZVOL_ASYNC_REMOVE_MINORS,
129 ZVOL_ASYNC_RENAME_MINORS,
130 ZVOL_ASYNC_SET_SNAPDEV,
131 ZVOL_ASYNC_MAX
132} zvol_async_op_t;
133
134typedef struct {
135 zvol_async_op_t op;
136 char pool[MAXNAMELEN];
137 char name1[MAXNAMELEN];
138 char name2[MAXNAMELEN];
139 zprop_source_t source;
140 uint64_t snapdev;
141} zvol_task_t;
142
60101509
BB
143#define ZVOL_RDONLY 0x1
144
d45e010d
CC
145static uint64_t
146zvol_name_hash(const char *name)
60101509 147{
d45e010d
CC
148 int i;
149 uint64_t crc = -1ULL;
150 uint8_t *p = (uint8_t *)name;
151 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
152 for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
153 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
60101509 154 }
d45e010d 155 return (crc);
60101509
BB
156}
157
158/*
159 * Find a zvol_state_t given the full major+minor dev_t.
160 */
161static zvol_state_t *
162zvol_find_by_dev(dev_t dev)
163{
164 zvol_state_t *zv;
165
166 ASSERT(MUTEX_HELD(&zvol_state_lock));
167 for (zv = list_head(&zvol_state_list); zv != NULL;
ce37ebd2 168 zv = list_next(&zvol_state_list, zv)) {
60101509 169 if (zv->zv_dev == dev)
ce37ebd2 170 return (zv);
60101509
BB
171 }
172
ce37ebd2 173 return (NULL);
60101509
BB
174}
175
176/*
d45e010d 177 * Find a zvol_state_t given the name and hash generated by zvol_name_hash.
60101509
BB
178 */
179static zvol_state_t *
d45e010d 180zvol_find_by_name_hash(const char *name, uint64_t hash)
60101509
BB
181{
182 zvol_state_t *zv;
d45e010d 183 struct hlist_node *p;
60101509
BB
184
185 ASSERT(MUTEX_HELD(&zvol_state_lock));
d45e010d
CC
186 hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
187 zv = hlist_entry(p, zvol_state_t, zv_hlink);
188 if (zv->zv_hash == hash &&
189 strncmp(zv->zv_name, name, MAXNAMELEN) == 0)
ce37ebd2 190 return (zv);
60101509 191 }
ce37ebd2 192 return (NULL);
60101509
BB
193}
194
d45e010d
CC
195/*
196 * Find a zvol_state_t given the name provided at zvol_alloc() time.
197 */
198static zvol_state_t *
199zvol_find_by_name(const char *name)
200{
201 return (zvol_find_by_name_hash(name, zvol_name_hash(name)));
202}
203
6c285672
JL
204
205/*
206 * Given a path, return TRUE if path is a ZVOL.
207 */
208boolean_t
209zvol_is_zvol(const char *device)
210{
211 struct block_device *bdev;
212 unsigned int major;
213
e02aaf17 214 bdev = vdev_lookup_bdev(device);
6c285672
JL
215 if (IS_ERR(bdev))
216 return (B_FALSE);
217
218 major = MAJOR(bdev->bd_dev);
219 bdput(bdev);
220
221 if (major == zvol_major)
ce37ebd2 222 return (B_TRUE);
6c285672
JL
223
224 return (B_FALSE);
225}
226
60101509
BB
227/*
228 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
229 */
230void
231zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
232{
233 zfs_creat_t *zct = arg;
234 nvlist_t *nvprops = zct->zct_props;
235 int error;
236 uint64_t volblocksize, volsize;
237
238 VERIFY(nvlist_lookup_uint64(nvprops,
239 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
240 if (nvlist_lookup_uint64(nvprops,
241 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
242 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
243
244 /*
245 * These properties must be removed from the list so the generic
246 * property setting step won't apply to them.
247 */
248 VERIFY(nvlist_remove_all(nvprops,
249 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
250 (void) nvlist_remove_all(nvprops,
251 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
252
253 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
254 DMU_OT_NONE, 0, tx);
255 ASSERT(error == 0);
256
257 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
258 DMU_OT_NONE, 0, tx);
259 ASSERT(error == 0);
260
261 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
262 ASSERT(error == 0);
263}
264
265/*
266 * ZFS_IOC_OBJSET_STATS entry point.
267 */
268int
269zvol_get_stats(objset_t *os, nvlist_t *nv)
270{
271 int error;
272 dmu_object_info_t *doi;
273 uint64_t val;
274
275 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
276 if (error)
ce37ebd2 277 return (SET_ERROR(error));
60101509
BB
278
279 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
ce37ebd2 280 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
60101509
BB
281 error = dmu_object_info(os, ZVOL_OBJ, doi);
282
283 if (error == 0) {
284 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
285 doi->doi_data_block_size);
286 }
287
ce37ebd2 288 kmem_free(doi, sizeof (dmu_object_info_t));
60101509 289
ce37ebd2 290 return (SET_ERROR(error));
60101509
BB
291}
292
35d3e322
BB
293static void
294zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
295{
296 struct block_device *bdev;
297
298 bdev = bdget_disk(zv->zv_disk, 0);
299 if (bdev == NULL)
300 return;
35d3e322
BB
301 set_capacity(zv->zv_disk, volsize >> 9);
302 zv->zv_volsize = volsize;
303 check_disk_size_change(zv->zv_disk, bdev);
35d3e322
BB
304
305 bdput(bdev);
306}
307
60101509
BB
308/*
309 * Sanity check volume size.
310 */
311int
312zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
313{
314 if (volsize == 0)
2e528b49 315 return (SET_ERROR(EINVAL));
60101509
BB
316
317 if (volsize % blocksize != 0)
2e528b49 318 return (SET_ERROR(EINVAL));
60101509
BB
319
320#ifdef _ILP32
82ec9d41 321 if (volsize - 1 > SPEC_MAXOFFSET_T)
2e528b49 322 return (SET_ERROR(EOVERFLOW));
60101509
BB
323#endif
324 return (0);
325}
326
327/*
328 * Ensure the zap is flushed then inform the VFS of the capacity change.
329 */
330static int
35d3e322 331zvol_update_volsize(uint64_t volsize, objset_t *os)
60101509 332{
60101509
BB
333 dmu_tx_t *tx;
334 int error;
513168ab 335 uint64_t txg;
60101509 336
df554c14 337 tx = dmu_tx_create(os);
60101509 338 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
19d55079 339 dmu_tx_mark_netfree(tx);
60101509
BB
340 error = dmu_tx_assign(tx, TXG_WAIT);
341 if (error) {
342 dmu_tx_abort(tx);
ce37ebd2 343 return (SET_ERROR(error));
60101509 344 }
513168ab 345 txg = dmu_tx_get_txg(tx);
60101509 346
df554c14 347 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
60101509
BB
348 &volsize, tx);
349 dmu_tx_commit(tx);
350
513168ab 351 txg_wait_synced(dmu_objset_pool(os), txg);
352
35d3e322
BB
353 if (error == 0)
354 error = dmu_free_long_range(os,
355 ZVOL_OBJ, volsize, DMU_OBJECT_END);
60101509 356
35d3e322
BB
357 return (error);
358}
60101509 359
35d3e322
BB
360static int
361zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
362{
363 zvol_size_changed(zv, volsize);
60101509 364
35d3e322
BB
365 /*
366 * We should post a event here describing the expansion. However,
367 * the zfs_ereport_post() interface doesn't nicely support posting
368 * events for zvols, it assumes events relate to vdevs or zios.
369 */
60101509
BB
370
371 return (0);
372}
373
374/*
375 * Set ZFS_PROP_VOLSIZE set entry point.
376 */
377int
378zvol_set_volsize(const char *name, uint64_t volsize)
379{
35d3e322 380 zvol_state_t *zv = NULL;
60101509 381 objset_t *os = NULL;
60101509 382 int error;
35d3e322
BB
383 dmu_object_info_t *doi;
384 uint64_t readonly;
385 boolean_t owned = B_FALSE;
60101509 386
13fe0198
MA
387 error = dsl_prop_get_integer(name,
388 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
389 if (error != 0)
ce37ebd2 390 return (SET_ERROR(error));
13fe0198 391 if (readonly)
2e528b49 392 return (SET_ERROR(EROFS));
13fe0198 393
60101509 394 mutex_enter(&zvol_state_lock);
60101509 395 zv = zvol_find_by_name(name);
5559ba09
BP
396 if (zv != NULL)
397 mutex_enter(&zv->zv_state_lock);
398 mutex_exit(&zvol_state_lock);
35d3e322
BB
399
400 if (zv == NULL || zv->zv_objset == NULL) {
401 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
402 FTAG, &os)) != 0) {
5559ba09
BP
403 if (zv != NULL)
404 mutex_exit(&zv->zv_state_lock);
35d3e322
BB
405 return (SET_ERROR(error));
406 }
407 owned = B_TRUE;
408 if (zv != NULL)
409 zv->zv_objset = os;
410 } else {
040dab99 411 rw_enter(&zv->zv_suspend_lock, RW_READER);
35d3e322 412 os = zv->zv_objset;
60101509
BB
413 }
414
ce37ebd2 415 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
60101509 416
ce37ebd2
BB
417 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
418 (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
35d3e322 419 goto out;
60101509 420
35d3e322 421 error = zvol_update_volsize(volsize, os);
60101509 422
35d3e322
BB
423 if (error == 0 && zv != NULL)
424 error = zvol_update_live_volsize(zv, volsize);
425out:
3f7d0418 426 kmem_free(doi, sizeof (dmu_object_info_t));
427
35d3e322
BB
428 if (owned) {
429 dmu_objset_disown(os, FTAG);
430 if (zv != NULL)
431 zv->zv_objset = NULL;
040dab99
CC
432 } else {
433 rw_exit(&zv->zv_suspend_lock);
35d3e322 434 }
5559ba09
BP
435
436 if (zv != NULL)
437 mutex_exit(&zv->zv_state_lock);
438 return (SET_ERROR(error));
60101509
BB
439}
440
441/*
442 * Sanity check volume block size.
443 */
444int
4cb7b9c5 445zvol_check_volblocksize(const char *name, uint64_t volblocksize)
60101509 446{
4cb7b9c5
BB
447 /* Record sizes above 128k need the feature to be enabled */
448 if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
449 spa_t *spa;
450 int error;
451
452 if ((error = spa_open(name, &spa, FTAG)) != 0)
453 return (error);
454
455 if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
456 spa_close(spa, FTAG);
457 return (SET_ERROR(ENOTSUP));
458 }
459
460 /*
461 * We don't allow setting the property above 1MB,
462 * unless the tunable has been changed.
463 */
464 if (volblocksize > zfs_max_recordsize)
465 return (SET_ERROR(EDOM));
466
467 spa_close(spa, FTAG);
468 }
469
60101509
BB
470 if (volblocksize < SPA_MINBLOCKSIZE ||
471 volblocksize > SPA_MAXBLOCKSIZE ||
472 !ISP2(volblocksize))
2e528b49 473 return (SET_ERROR(EDOM));
60101509
BB
474
475 return (0);
476}
477
478/*
479 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
480 */
481int
482zvol_set_volblocksize(const char *name, uint64_t volblocksize)
483{
484 zvol_state_t *zv;
485 dmu_tx_t *tx;
486 int error;
487
488 mutex_enter(&zvol_state_lock);
489
490 zv = zvol_find_by_name(name);
491 if (zv == NULL) {
5559ba09
BP
492 mutex_exit(&zvol_state_lock);
493 return (SET_ERROR(ENXIO));
60101509 494 }
5559ba09
BP
495 mutex_enter(&zv->zv_state_lock);
496 mutex_exit(&zvol_state_lock);
60101509 497
ba6a2402 498 if (zv->zv_flags & ZVOL_RDONLY) {
5559ba09
BP
499 mutex_exit(&zv->zv_state_lock);
500 return (SET_ERROR(EROFS));
60101509
BB
501 }
502
040dab99
CC
503 rw_enter(&zv->zv_suspend_lock, RW_READER);
504
60101509
BB
505 tx = dmu_tx_create(zv->zv_objset);
506 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
507 error = dmu_tx_assign(tx, TXG_WAIT);
508 if (error) {
509 dmu_tx_abort(tx);
510 } else {
511 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
512 volblocksize, 0, tx);
513 if (error == ENOTSUP)
2e528b49 514 error = SET_ERROR(EBUSY);
60101509
BB
515 dmu_tx_commit(tx);
516 if (error == 0)
517 zv->zv_volblocksize = volblocksize;
518 }
040dab99 519 rw_exit(&zv->zv_suspend_lock);
5559ba09
BP
520
521 mutex_exit(&zv->zv_state_lock);
60101509 522
ce37ebd2 523 return (SET_ERROR(error));
60101509
BB
524}
525
460a0213
DM
526/*
527 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
528 * implement DKIOCFREE/free-long-range.
529 */
530static int
531zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
532{
533 uint64_t offset, length;
534
535 if (byteswap)
536 byteswap_uint64_array(lr, sizeof (*lr));
537
538 offset = lr->lr_offset;
539 length = lr->lr_length;
540
541 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
542}
543
60101509
BB
544/*
545 * Replay a TX_WRITE ZIL transaction that didn't get committed
546 * after a system failure
547 */
548static int
549zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
550{
551 objset_t *os = zv->zv_objset;
552 char *data = (char *)(lr + 1); /* data follows lr_write_t */
553 uint64_t off = lr->lr_offset;
554 uint64_t len = lr->lr_length;
555 dmu_tx_t *tx;
556 int error;
557
558 if (byteswap)
559 byteswap_uint64_array(lr, sizeof (*lr));
560
561 tx = dmu_tx_create(os);
562 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
563 error = dmu_tx_assign(tx, TXG_WAIT);
564 if (error) {
565 dmu_tx_abort(tx);
566 } else {
567 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
568 dmu_tx_commit(tx);
569 }
570
ce37ebd2 571 return (SET_ERROR(error));
60101509
BB
572}
573
574static int
575zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
576{
2e528b49 577 return (SET_ERROR(ENOTSUP));
60101509
BB
578}
579
580/*
581 * Callback vectors for replaying records.
460a0213 582 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
60101509 583 */
b01615d5
RY
584zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
585 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
586 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
587 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
588 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
589 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
590 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
591 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
592 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
593 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
594 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
460a0213 595 (zil_replay_func_t)zvol_replay_truncate, /* TX_TRUNCATE */
b01615d5
RY
596 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
597 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
60101509
BB
598};
599
600/*
601 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
602 *
603 * We store data in the log buffers if it's small enough.
604 * Otherwise we will later flush the data out via dmu_sync().
605 */
606ssize_t zvol_immediate_write_sz = 32768;
607
608static void
ce37ebd2
BB
609zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
610 uint64_t size, int sync)
60101509
BB
611{
612 uint32_t blocksize = zv->zv_volblocksize;
613 zilog_t *zilog = zv->zv_zilog;
614 boolean_t slogging;
ab85f845 615 ssize_t immediate_write_sz;
60101509
BB
616
617 if (zil_replaying(zilog, tx))
618 return;
619
ab85f845 620 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
02730c33 621 ? 0 : zvol_immediate_write_sz;
ab85f845 622 slogging = spa_has_slogs(zilog->zl_spa) &&
02730c33 623 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
60101509
BB
624
625 while (size) {
626 itx_t *itx;
627 lr_write_t *lr;
628 ssize_t len;
629 itx_wr_state_t write_state;
630
631 /*
632 * Unlike zfs_log_write() we can be called with
633 * up to DMU_MAX_ACCESS/2 (5MB) writes.
634 */
ab85f845 635 if (blocksize > immediate_write_sz && !slogging &&
60101509
BB
636 size >= blocksize && offset % blocksize == 0) {
637 write_state = WR_INDIRECT; /* uses dmu_sync */
638 len = blocksize;
639 } else if (sync) {
640 write_state = WR_COPIED;
641 len = MIN(ZIL_MAX_LOG_DATA, size);
642 } else {
643 write_state = WR_NEED_COPY;
644 len = MIN(ZIL_MAX_LOG_DATA, size);
645 }
646
647 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
648 (write_state == WR_COPIED ? len : 0));
649 lr = (lr_write_t *)&itx->itx_lr;
650 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
651 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
652 zil_itx_destroy(itx);
653 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
654 lr = (lr_write_t *)&itx->itx_lr;
655 write_state = WR_NEED_COPY;
656 }
657
658 itx->itx_wr_state = write_state;
659 if (write_state == WR_NEED_COPY)
660 itx->itx_sod += len;
661 lr->lr_foid = ZVOL_OBJ;
662 lr->lr_offset = offset;
663 lr->lr_length = len;
664 lr->lr_blkoff = 0;
665 BP_ZERO(&lr->lr_blkptr);
666
667 itx->itx_private = zv;
668 itx->itx_sync = sync;
669
670 (void) zil_itx_assign(zilog, itx, tx);
671
672 offset += len;
673 size -= len;
674 }
675}
676
692e55b8
CC
677typedef struct zv_request {
678 zvol_state_t *zv;
679 struct bio *bio;
680 rl_t *rl;
681} zv_request_t;
682
683static void
684uio_from_bio(uio_t *uio, struct bio *bio)
60101509 685{
692e55b8
CC
686 uio->uio_bvec = &bio->bi_io_vec[BIO_BI_IDX(bio)];
687 uio->uio_skip = BIO_BI_SKIP(bio);
688 uio->uio_resid = BIO_BI_SIZE(bio);
689 uio->uio_iovcnt = bio->bi_vcnt - BIO_BI_IDX(bio);
690 uio->uio_loffset = BIO_BI_SECTOR(bio) << 9;
691 uio->uio_limit = MAXOFFSET_T;
692 uio->uio_segflg = UIO_BVEC;
693}
694
695static void
696zvol_write(void *arg)
697{
698 zv_request_t *zvr = arg;
699 struct bio *bio = zvr->bio;
700 uio_t uio;
701 zvol_state_t *zv = zvr->zv;
a765a34a 702 uint64_t volsize = zv->zv_volsize;
692e55b8 703 boolean_t sync;
a765a34a 704 int error = 0;
692e55b8
CC
705 unsigned long start_jif;
706
707 uio_from_bio(&uio, bio);
60101509 708
5428dc51
BP
709 ASSERT(zv && zv->zv_open_count > 0);
710
692e55b8
CC
711 start_jif = jiffies;
712 generic_start_io_acct(WRITE, bio_sectors(bio), &zv->zv_disk->part0);
b18019d2 713
692e55b8
CC
714 sync = bio_is_fua(bio) || zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS;
715
716 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
717 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
718 uint64_t off = uio.uio_loffset;
a765a34a 719 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
b18019d2 720
a765a34a
RY
721 if (bytes > volsize - off) /* don't write past the end */
722 bytes = volsize - off;
2727b9d3 723
a765a34a 724 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
60101509 725
a765a34a
RY
726 /* This will only fail for ENOSPC */
727 error = dmu_tx_assign(tx, TXG_WAIT);
728 if (error) {
729 dmu_tx_abort(tx);
730 break;
731 }
692e55b8 732 error = dmu_write_uio_dbuf(zv->zv_dbuf, &uio, bytes, tx);
a765a34a
RY
733 if (error == 0)
734 zvol_log_write(zv, tx, off, bytes, sync);
735 dmu_tx_commit(tx);
60101509 736
a765a34a
RY
737 if (error)
738 break;
60101509 739 }
692e55b8 740 zfs_range_unlock(zvr->rl);
a765a34a 741 if (sync)
60101509 742 zil_commit(zv->zv_zilog, ZVOL_OBJ);
692e55b8
CC
743
744 rw_exit(&zv->zv_suspend_lock);
745 generic_end_io_acct(WRITE, &zv->zv_disk->part0, start_jif);
746 BIO_END_IO(bio, -error);
747 kmem_free(zvr, sizeof (zv_request_t));
60101509
BB
748}
749
460a0213
DM
750/*
751 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
752 */
753static void
754zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
755 boolean_t sync)
756{
757 itx_t *itx;
758 lr_truncate_t *lr;
759 zilog_t *zilog = zv->zv_zilog;
760
761 if (zil_replaying(zilog, tx))
762 return;
763
764 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
765 lr = (lr_truncate_t *)&itx->itx_lr;
766 lr->lr_foid = ZVOL_OBJ;
767 lr->lr_offset = off;
768 lr->lr_length = len;
769
770 itx->itx_sync = sync;
771 zil_itx_assign(zilog, itx, tx);
772}
773
692e55b8
CC
774static void
775zvol_discard(void *arg)
30930fba 776{
692e55b8
CC
777 zv_request_t *zvr = arg;
778 struct bio *bio = zvr->bio;
779 zvol_state_t *zv = zvr->zv;
37f9dac5
RY
780 uint64_t start = BIO_BI_SECTOR(bio) << 9;
781 uint64_t size = BIO_BI_SIZE(bio);
782 uint64_t end = start + size;
692e55b8 783 int error = 0;
460a0213 784 dmu_tx_t *tx;
692e55b8 785 unsigned long start_jif;
30930fba 786
5428dc51
BP
787 ASSERT(zv && zv->zv_open_count > 0);
788
692e55b8
CC
789 start_jif = jiffies;
790 generic_start_io_acct(WRITE, bio_sectors(bio), &zv->zv_disk->part0);
791
792 if (end > zv->zv_volsize) {
793 error = SET_ERROR(EIO);
794 goto out;
795 }
30930fba 796
089fa91b 797 /*
cf41432c
BB
798 * Align the request to volume block boundaries when a secure erase is
799 * not required. This will prevent dnode_free_range() from zeroing out
800 * the unaligned parts which is slow (read-modify-write) and useless
801 * since we are not freeing any space by doing so.
089fa91b 802 */
cf41432c 803 if (!bio_is_secure_erase(bio)) {
fa565676
RY
804 start = P2ROUNDUP(start, zv->zv_volblocksize);
805 end = P2ALIGN(end, zv->zv_volblocksize);
f52ebcb3 806 size = end - start;
fa565676 807 }
089fa91b 808
37f9dac5 809 if (start >= end)
692e55b8 810 goto out;
30930fba 811
460a0213
DM
812 tx = dmu_tx_create(zv->zv_objset);
813 dmu_tx_mark_netfree(tx);
814 error = dmu_tx_assign(tx, TXG_WAIT);
815 if (error != 0) {
816 dmu_tx_abort(tx);
817 } else {
818 zvol_log_truncate(zv, tx, start, size, B_TRUE);
819 dmu_tx_commit(tx);
820 error = dmu_free_long_range(zv->zv_objset,
821 ZVOL_OBJ, start, size);
822 }
30930fba 823
692e55b8
CC
824out:
825 zfs_range_unlock(zvr->rl);
826 rw_exit(&zv->zv_suspend_lock);
827 generic_end_io_acct(WRITE, &zv->zv_disk->part0, start_jif);
828 BIO_END_IO(bio, -error);
829 kmem_free(zvr, sizeof (zv_request_t));
30930fba 830}
30930fba 831
692e55b8
CC
832static void
833zvol_read(void *arg)
60101509 834{
692e55b8
CC
835 zv_request_t *zvr = arg;
836 struct bio *bio = zvr->bio;
837 uio_t uio;
838 zvol_state_t *zv = zvr->zv;
a765a34a 839 uint64_t volsize = zv->zv_volsize;
a765a34a 840 int error = 0;
692e55b8
CC
841 unsigned long start_jif;
842
843 uio_from_bio(&uio, bio);
b18019d2 844
5428dc51
BP
845 ASSERT(zv && zv->zv_open_count > 0);
846
692e55b8
CC
847 start_jif = jiffies;
848 generic_start_io_acct(READ, bio_sectors(bio), &zv->zv_disk->part0);
849
850 while (uio.uio_resid > 0 && uio.uio_loffset < volsize) {
851 uint64_t bytes = MIN(uio.uio_resid, DMU_MAX_ACCESS >> 1);
60101509 852
a765a34a 853 /* don't read past the end */
692e55b8
CC
854 if (bytes > volsize - uio.uio_loffset)
855 bytes = volsize - uio.uio_loffset;
60101509 856
692e55b8 857 error = dmu_read_uio_dbuf(zv->zv_dbuf, &uio, bytes);
a765a34a
RY
858 if (error) {
859 /* convert checksum errors into IO errors */
860 if (error == ECKSUM)
861 error = SET_ERROR(EIO);
862 break;
863 }
864 }
692e55b8
CC
865 zfs_range_unlock(zvr->rl);
866
867 rw_exit(&zv->zv_suspend_lock);
868 generic_end_io_acct(READ, &zv->zv_disk->part0, start_jif);
869 BIO_END_IO(bio, -error);
870 kmem_free(zvr, sizeof (zv_request_t));
60101509
BB
871}
872
37f9dac5
RY
873static MAKE_REQUEST_FN_RET
874zvol_request(struct request_queue *q, struct bio *bio)
60101509
BB
875{
876 zvol_state_t *zv = q->queuedata;
37f9dac5 877 fstrans_cookie_t cookie = spl_fstrans_mark();
692e55b8
CC
878 uint64_t offset = BIO_BI_SECTOR(bio) << 9;
879 uint64_t size = BIO_BI_SIZE(bio);
8198d18c 880 int rw = bio_data_dir(bio);
692e55b8 881 zv_request_t *zvr;
a765a34a 882
692e55b8 883 if (bio_has_data(bio) && offset + size > zv->zv_volsize) {
37f9dac5 884 printk(KERN_INFO
a765a34a 885 "%s: bad access: offset=%llu, size=%lu\n",
37f9dac5 886 zv->zv_disk->disk_name,
692e55b8
CC
887 (long long unsigned)offset,
888 (long unsigned)size);
37f9dac5 889
692e55b8
CC
890 BIO_END_IO(bio, -SET_ERROR(EIO));
891 goto out;
892 }
8198d18c
RY
893
894 if (rw == WRITE) {
37f9dac5 895 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
692e55b8
CC
896 BIO_END_IO(bio, -SET_ERROR(EROFS));
897 goto out;
60101509
BB
898 }
899
692e55b8
CC
900 /*
901 * To be released in the I/O function. See the comment on
902 * zfs_range_lock below.
903 */
904 rw_enter(&zv->zv_suspend_lock, RW_READER);
905
906 /* bio marked as FLUSH need to flush before write */
907 if (bio_is_flush(bio))
908 zil_commit(zv->zv_zilog, ZVOL_OBJ);
909
910 /* Some requests are just for flush and nothing else. */
911 if (size == 0) {
912 rw_exit(&zv->zv_suspend_lock);
913 BIO_END_IO(bio, 0);
914 goto out;
37f9dac5 915 }
60101509 916
692e55b8
CC
917 zvr = kmem_alloc(sizeof (zv_request_t), KM_SLEEP);
918 zvr->zv = zv;
919 zvr->bio = bio;
920
a765a34a 921 /*
692e55b8
CC
922 * To be released in the I/O function. Since the I/O functions
923 * are asynchronous, we take it here synchronously to make
924 * sure overlapped I/Os are properly ordered.
a765a34a 925 */
692e55b8
CC
926 zvr->rl = zfs_range_lock(&zv->zv_range_lock, offset, size,
927 RL_WRITER);
928 if (bio_is_discard(bio) || bio_is_secure_erase(bio)) {
929 if (zvol_request_sync || taskq_dispatch(zvol_taskq,
930 zvol_discard, zvr, TQ_SLEEP) == TASKQID_INVALID)
931 zvol_discard(zvr);
932 } else {
933 if (zvol_request_sync || taskq_dispatch(zvol_taskq,
934 zvol_write, zvr, TQ_SLEEP) == TASKQID_INVALID)
935 zvol_write(zvr);
a765a34a 936 }
692e55b8
CC
937 } else {
938 zvr = kmem_alloc(sizeof (zv_request_t), KM_SLEEP);
939 zvr->zv = zv;
940 zvr->bio = bio;
a765a34a 941
692e55b8 942 rw_enter(&zv->zv_suspend_lock, RW_READER);
30930fba 943
692e55b8
CC
944 zvr->rl = zfs_range_lock(&zv->zv_range_lock, offset, size,
945 RL_READER);
946 if (zvol_request_sync || taskq_dispatch(zvol_taskq,
947 zvol_read, zvr, TQ_SLEEP) == TASKQID_INVALID)
948 zvol_read(zvr);
949 }
950
951out:
37f9dac5
RY
952 spl_fstrans_unmark(cookie);
953#ifdef HAVE_MAKE_REQUEST_FN_RET_INT
954 return (0);
1a093716
CC
955#elif defined(HAVE_MAKE_REQUEST_FN_RET_QC)
956 return (BLK_QC_T_NONE);
37f9dac5 957#endif
60101509
BB
958}
959
960static void
961zvol_get_done(zgd_t *zgd, int error)
962{
963 if (zgd->zgd_db)
964 dmu_buf_rele(zgd->zgd_db, zgd);
965
966 zfs_range_unlock(zgd->zgd_rl);
967
968 if (error == 0 && zgd->zgd_bp)
969 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
970
971 kmem_free(zgd, sizeof (zgd_t));
972}
973
974/*
975 * Get data to generate a TX_WRITE intent log record.
976 */
977static int
978zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
979{
980 zvol_state_t *zv = arg;
981 objset_t *os = zv->zv_objset;
03c6040b 982 uint64_t object = ZVOL_OBJ;
60101509
BB
983 uint64_t offset = lr->lr_offset;
984 uint64_t size = lr->lr_length;
03c6040b 985 blkptr_t *bp = &lr->lr_blkptr;
60101509
BB
986 dmu_buf_t *db;
987 zgd_t *zgd;
988 int error;
989
990 ASSERT(zio != NULL);
991 ASSERT(size != 0);
992
79c76d5b 993 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
60101509 994 zgd->zgd_zilog = zv->zv_zilog;
d88895a0
CC
995 zgd->zgd_rl = zfs_range_lock(&zv->zv_range_lock, offset, size,
996 RL_READER);
60101509
BB
997
998 /*
999 * Write records come in two flavors: immediate and indirect.
1000 * For small writes it's cheaper to store the data with the
1001 * log record (immediate); for large writes it's cheaper to
1002 * sync the data and get a pointer to it (indirect) so that
1003 * we don't have to write the data twice.
1004 */
1005 if (buf != NULL) { /* immediate write */
03c6040b 1006 error = dmu_read(os, object, offset, size, buf,
60101509
BB
1007 DMU_READ_NO_PREFETCH);
1008 } else {
1009 size = zv->zv_volblocksize;
1010 offset = P2ALIGN_TYPED(offset, size, uint64_t);
03c6040b 1011 error = dmu_buf_hold(os, object, offset, zgd, &db,
60101509
BB
1012 DMU_READ_NO_PREFETCH);
1013 if (error == 0) {
03c6040b
GW
1014 blkptr_t *obp = dmu_buf_get_blkptr(db);
1015 if (obp) {
1016 ASSERT(BP_IS_HOLE(bp));
1017 *bp = *obp;
1018 }
1019
60101509
BB
1020 zgd->zgd_db = db;
1021 zgd->zgd_bp = &lr->lr_blkptr;
1022
1023 ASSERT(db != NULL);
1024 ASSERT(db->db_offset == offset);
1025 ASSERT(db->db_size == size);
1026
1027 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1028 zvol_get_done, zgd);
1029
1030 if (error == 0)
1031 return (0);
1032 }
1033 }
1034
1035 zvol_get_done(zgd, error);
1036
ce37ebd2 1037 return (SET_ERROR(error));
60101509
BB
1038}
1039
1040/*
d45e010d 1041 * The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
60101509
BB
1042 */
1043static void
d45e010d 1044zvol_insert(zvol_state_t *zv)
60101509 1045{
60101509 1046 ASSERT(MUTEX_HELD(&zvol_state_lock));
d45e010d
CC
1047 ASSERT3U(MINOR(zv->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
1048 list_insert_head(&zvol_state_list, zv);
1049 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
60101509
BB
1050}
1051
1052/*
1053 * Simply remove the zvol from to list of zvols.
1054 */
1055static void
d45e010d 1056zvol_remove(zvol_state_t *zv)
60101509
BB
1057{
1058 ASSERT(MUTEX_HELD(&zvol_state_lock));
d45e010d
CC
1059 list_remove(&zvol_state_list, zv);
1060 hlist_del(&zv->zv_hlink);
60101509
BB
1061}
1062
040dab99
CC
1063/*
1064 * Setup zv after we just own the zv->objset
1065 */
60101509 1066static int
040dab99 1067zvol_setup_zv(zvol_state_t *zv)
60101509 1068{
60101509
BB
1069 uint64_t volsize;
1070 int error;
1071 uint64_t ro;
040dab99 1072 objset_t *os = zv->zv_objset;
1ee159f4
BP
1073
1074 error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
1075 if (error)
040dab99 1076 return (SET_ERROR(error));
60101509
BB
1077
1078 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1ee159f4 1079 if (error)
040dab99 1080 return (SET_ERROR(error));
60101509 1081
040dab99 1082 error = dmu_bonus_hold(os, ZVOL_OBJ, zv, &zv->zv_dbuf);
1ee159f4 1083 if (error)
040dab99 1084 return (SET_ERROR(error));
60101509
BB
1085
1086 set_capacity(zv->zv_disk, volsize >> 9);
1087 zv->zv_volsize = volsize;
1088 zv->zv_zilog = zil_open(os, zvol_get_data);
1089
a4430fce
GW
1090 if (ro || dmu_objset_is_snapshot(os) ||
1091 !spa_writeable(dmu_objset_spa(os))) {
babf3f9b
MM
1092 set_disk_ro(zv->zv_disk, 1);
1093 zv->zv_flags |= ZVOL_RDONLY;
60101509 1094 } else {
babf3f9b
MM
1095 set_disk_ro(zv->zv_disk, 0);
1096 zv->zv_flags &= ~ZVOL_RDONLY;
60101509 1097 }
040dab99 1098 return (0);
60101509
BB
1099}
1100
040dab99
CC
1101/*
1102 * Shutdown every zv_objset related stuff except zv_objset itself.
1103 * The is the reverse of zvol_setup_zv.
1104 */
60101509 1105static void
040dab99 1106zvol_shutdown_zv(zvol_state_t *zv)
60101509
BB
1107{
1108 zil_close(zv->zv_zilog);
1109 zv->zv_zilog = NULL;
04434775 1110
040dab99 1111 dmu_buf_rele(zv->zv_dbuf, zv);
60101509 1112 zv->zv_dbuf = NULL;
04434775
MA
1113
1114 /*
1115 * Evict cached data
1116 */
1117 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
1118 !(zv->zv_flags & ZVOL_RDONLY))
1119 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1120 (void) dmu_objset_evict_dbufs(zv->zv_objset);
040dab99
CC
1121}
1122
1123/*
1124 * return the proper tag for rollback and recv
1125 */
1126void *
1127zvol_tag(zvol_state_t *zv)
1128{
1129 ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
1130 return (zv->zv_open_count > 0 ? zv : NULL);
1131}
1132
1133/*
1134 * Suspend the zvol for recv and rollback.
1135 */
1136zvol_state_t *
1137zvol_suspend(const char *name)
1138{
1139 zvol_state_t *zv;
1140
1141 mutex_enter(&zvol_state_lock);
1142 zv = zvol_find_by_name(name);
5559ba09
BP
1143 if (zv == NULL) {
1144 mutex_exit(&zvol_state_lock);
1145 return (NULL);
1146 }
1147 mutex_enter(&zv->zv_state_lock);
1148 mutex_exit(&zvol_state_lock);
04434775 1149
040dab99
CC
1150 /* block all I/O, release in zvol_resume. */
1151 rw_enter(&zv->zv_suspend_lock, RW_WRITER);
1152
1153 atomic_inc(&zv->zv_suspend_ref);
1154
1155 if (zv->zv_open_count > 0)
1156 zvol_shutdown_zv(zv);
5559ba09
BP
1157
1158 mutex_exit(&zv->zv_state_lock);
040dab99
CC
1159 return (zv);
1160}
1161
1162int
1163zvol_resume(zvol_state_t *zv)
1164{
1165 int error = 0;
1166
1167 ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
2d82116e
BP
1168
1169 /*
1170 * Cannot take zv_state_lock here with zv_suspend_lock
1171 * held; however, the latter is held in exclusive mode,
1172 * so it is not necessary to do so
1173 */
040dab99
CC
1174 if (zv->zv_open_count > 0) {
1175 VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
1176 VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
1177 VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
1178 dmu_objset_rele(zv->zv_objset, zv);
1179
1180 error = zvol_setup_zv(zv);
1181 }
1182 rw_exit(&zv->zv_suspend_lock);
1183 /*
1184 * We need this because we don't hold zvol_state_lock while releasing
1185 * zv_suspend_lock. zvol_remove_minors_impl thus cannot check
1186 * zv_suspend_lock to determine it is safe to free because rwlock is
1187 * not inherent atomic.
1188 */
1189 atomic_dec(&zv->zv_suspend_ref);
1190
1191 return (SET_ERROR(error));
1192}
1193
1194static int
1195zvol_first_open(zvol_state_t *zv)
1196{
1197 objset_t *os;
07783588
BP
1198 int error, locked = 0;
1199
1200 /*
1201 * In all other cases the spa_namespace_lock is taken before the
1202 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
1203 * function calls fops->open() with the bdev->bd_mutex lock held.
1204 * This deadlock can be easily observed with zvols used as vdevs.
1205 *
1206 * To avoid a potential lock inversion deadlock we preemptively
1207 * try to take the spa_namespace_lock(). Normally it will not
1208 * be contended and this is safe because spa_open_common() handles
1209 * the case where the caller already holds the spa_namespace_lock.
1210 *
1211 * When it is contended we risk a lock inversion if we were to
1212 * block waiting for the lock. Luckily, the __blkdev_get()
1213 * function allows us to return -ERESTARTSYS which will result in
1214 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
1215 * called again. This process can be repeated safely until both
1216 * locks are acquired.
1217 */
1218 if (!mutex_owned(&spa_namespace_lock)) {
1219 locked = mutex_tryenter(&spa_namespace_lock);
1220 if (!locked)
1221 return (-SET_ERROR(ERESTARTSYS));
1222 }
040dab99
CC
1223
1224 /* lie and say we're read-only */
1225 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zv, &os);
1226 if (error)
07783588 1227 goto out_mutex;
040dab99
CC
1228
1229 zv->zv_objset = os;
1230
1231 error = zvol_setup_zv(zv);
1232
1233 if (error) {
1234 dmu_objset_disown(os, zv);
1235 zv->zv_objset = NULL;
1236 }
1237
07783588
BP
1238out_mutex:
1239 if (locked)
1240 mutex_exit(&spa_namespace_lock);
040dab99
CC
1241 return (SET_ERROR(-error));
1242}
1243
1244static void
1245zvol_last_close(zvol_state_t *zv)
1246{
1247 zvol_shutdown_zv(zv);
1248
1249 dmu_objset_disown(zv->zv_objset, zv);
60101509
BB
1250 zv->zv_objset = NULL;
1251}
1252
1253static int
1254zvol_open(struct block_device *bdev, fmode_t flag)
1255{
5428dc51 1256 zvol_state_t *zv;
5559ba09 1257 int error = 0, drop_suspend = 0;
60101509 1258
5559ba09 1259 ASSERT(!mutex_owned(&zvol_state_lock));
60101509 1260
5559ba09 1261 mutex_enter(&zvol_state_lock);
5428dc51
BP
1262 /*
1263 * Obtain a copy of private_data under the lock to make sure
5559ba09 1264 * that either the result of zvol free code path setting
5428dc51
BP
1265 * bdev->bd_disk->private_data to NULL is observed, or zvol_free()
1266 * is not called on this zv because of the positive zv_open_count.
1267 */
1268 zv = bdev->bd_disk->private_data;
1269 if (zv == NULL) {
5559ba09
BP
1270 mutex_exit(&zvol_state_lock);
1271 return (SET_ERROR(-ENXIO));
5428dc51 1272 }
5559ba09
BP
1273 mutex_enter(&zv->zv_state_lock);
1274 mutex_exit(&zvol_state_lock);
60101509
BB
1275
1276 if (zv->zv_open_count == 0) {
040dab99
CC
1277 /* make sure zvol is not suspended when first open */
1278 rw_enter(&zv->zv_suspend_lock, RW_READER);
1279 drop_suspend = 1;
1280
60101509
BB
1281 error = zvol_first_open(zv);
1282 if (error)
1283 goto out_mutex;
1284 }
1285
ba6a2402 1286 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
60101509
BB
1287 error = -EROFS;
1288 goto out_open_count;
1289 }
1290
1291 zv->zv_open_count++;
1292
5428dc51
BP
1293 check_disk_change(bdev);
1294
60101509
BB
1295out_open_count:
1296 if (zv->zv_open_count == 0)
1297 zvol_last_close(zv);
60101509 1298out_mutex:
040dab99
CC
1299 if (drop_suspend)
1300 rw_exit(&zv->zv_suspend_lock);
5559ba09 1301 mutex_exit(&zv->zv_state_lock);
60101509 1302
ce37ebd2 1303 return (SET_ERROR(error));
60101509
BB
1304}
1305
a1d9543a
CD
1306#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1307static void
1308#else
60101509 1309static int
a1d9543a 1310#endif
60101509
BB
1311zvol_release(struct gendisk *disk, fmode_t mode)
1312{
5559ba09 1313 zvol_state_t *zv;
60101509 1314
5559ba09 1315 ASSERT(!mutex_owned(&zvol_state_lock));
5428dc51 1316
5559ba09
BP
1317 mutex_enter(&zvol_state_lock);
1318 zv = disk->private_data;
1319 ASSERT(zv && zv->zv_open_count > 0);
1320 mutex_enter(&zv->zv_state_lock);
1321 mutex_exit(&zvol_state_lock);
60101509 1322
040dab99
CC
1323 /* make sure zvol is not suspended when last close */
1324 if (zv->zv_open_count == 1)
1325 rw_enter(&zv->zv_suspend_lock, RW_READER);
1326
5428dc51 1327 zv->zv_open_count--;
040dab99 1328 if (zv->zv_open_count == 0) {
5428dc51 1329 zvol_last_close(zv);
040dab99
CC
1330 rw_exit(&zv->zv_suspend_lock);
1331 }
60101509 1332
5559ba09 1333 mutex_exit(&zv->zv_state_lock);
60101509 1334
a1d9543a 1335#ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
60101509 1336 return (0);
a1d9543a 1337#endif
60101509
BB
1338}
1339
1340static int
1341zvol_ioctl(struct block_device *bdev, fmode_t mode,
ce37ebd2 1342 unsigned int cmd, unsigned long arg)
60101509
BB
1343{
1344 zvol_state_t *zv = bdev->bd_disk->private_data;
1345 int error = 0;
1346
5428dc51 1347 ASSERT(zv && zv->zv_open_count > 0);
60101509
BB
1348
1349 switch (cmd) {
1350 case BLKFLSBUF:
ef1bdf36
BB
1351 fsync_bdev(bdev);
1352 invalidate_bdev(bdev);
1353 rw_enter(&zv->zv_suspend_lock, RW_READER);
1354
1355 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
1356 !(zv->zv_flags & ZVOL_RDONLY))
1357 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1358
1359 rw_exit(&zv->zv_suspend_lock);
60101509 1360 break;
ef1bdf36 1361
4c0d8e50 1362 case BLKZNAME:
5559ba09 1363 mutex_enter(&zv->zv_state_lock);
4c0d8e50 1364 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
5559ba09 1365 mutex_exit(&zv->zv_state_lock);
4c0d8e50 1366 break;
60101509
BB
1367
1368 default:
1369 error = -ENOTTY;
1370 break;
60101509
BB
1371 }
1372
ce37ebd2 1373 return (SET_ERROR(error));
60101509
BB
1374}
1375
1376#ifdef CONFIG_COMPAT
1377static int
1378zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
ce37ebd2 1379 unsigned cmd, unsigned long arg)
60101509 1380{
ce37ebd2 1381 return (zvol_ioctl(bdev, mode, cmd, arg));
60101509
BB
1382}
1383#else
ce37ebd2 1384#define zvol_compat_ioctl NULL
60101509
BB
1385#endif
1386
1387static int zvol_media_changed(struct gendisk *disk)
1388{
1389 zvol_state_t *zv = disk->private_data;
1390
5428dc51
BP
1391 ASSERT(zv && zv->zv_open_count > 0);
1392
ce37ebd2 1393 return (zv->zv_changed);
60101509
BB
1394}
1395
1396static int zvol_revalidate_disk(struct gendisk *disk)
1397{
1398 zvol_state_t *zv = disk->private_data;
1399
5428dc51
BP
1400 ASSERT(zv && zv->zv_open_count > 0);
1401
60101509
BB
1402 zv->zv_changed = 0;
1403 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1404
ce37ebd2 1405 return (0);
60101509
BB
1406}
1407
1408/*
1409 * Provide a simple virtual geometry for legacy compatibility. For devices
1410 * smaller than 1 MiB a small head and sector count is used to allow very
1411 * tiny devices. For devices over 1 Mib a standard head and sector count
1412 * is used to keep the cylinders count reasonable.
1413 */
1414static int
1415zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1416{
1417 zvol_state_t *zv = bdev->bd_disk->private_data;
5428dc51
BP
1418 sector_t sectors;
1419
1420 ASSERT(zv && zv->zv_open_count > 0);
1421
1422 sectors = get_capacity(zv->zv_disk);
60101509
BB
1423
1424 if (sectors > 2048) {
1425 geo->heads = 16;
1426 geo->sectors = 63;
1427 } else {
1428 geo->heads = 2;
1429 geo->sectors = 4;
1430 }
1431
1432 geo->start = 0;
1433 geo->cylinders = sectors / (geo->heads * geo->sectors);
1434
ce37ebd2 1435 return (0);
60101509
BB
1436}
1437
1438static struct kobject *
1439zvol_probe(dev_t dev, int *part, void *arg)
1440{
1441 zvol_state_t *zv;
1442 struct kobject *kobj;
1443
1444 mutex_enter(&zvol_state_lock);
1445 zv = zvol_find_by_dev(dev);
23a61ccc 1446 kobj = zv ? get_disk(zv->zv_disk) : NULL;
60101509
BB
1447 mutex_exit(&zvol_state_lock);
1448
ce37ebd2 1449 return (kobj);
60101509
BB
1450}
1451
1452#ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1453static struct block_device_operations zvol_ops = {
ce37ebd2
BB
1454 .open = zvol_open,
1455 .release = zvol_release,
1456 .ioctl = zvol_ioctl,
1457 .compat_ioctl = zvol_compat_ioctl,
1458 .media_changed = zvol_media_changed,
1459 .revalidate_disk = zvol_revalidate_disk,
1460 .getgeo = zvol_getgeo,
1461 .owner = THIS_MODULE,
60101509
BB
1462};
1463
1464#else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1465
1466static int
1467zvol_open_by_inode(struct inode *inode, struct file *file)
1468{
ce37ebd2 1469 return (zvol_open(inode->i_bdev, file->f_mode));
60101509
BB
1470}
1471
1472static int
1473zvol_release_by_inode(struct inode *inode, struct file *file)
1474{
ce37ebd2 1475 return (zvol_release(inode->i_bdev->bd_disk, file->f_mode));
60101509
BB
1476}
1477
1478static int
1479zvol_ioctl_by_inode(struct inode *inode, struct file *file,
ce37ebd2 1480 unsigned int cmd, unsigned long arg)
60101509 1481{
b1c58213 1482 if (file == NULL || inode == NULL)
ce37ebd2
BB
1483 return (SET_ERROR(-EINVAL));
1484
1485 return (zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg));
60101509
BB
1486}
1487
ce37ebd2 1488#ifdef CONFIG_COMPAT
60101509
BB
1489static long
1490zvol_compat_ioctl_by_inode(struct file *file,
ce37ebd2 1491 unsigned int cmd, unsigned long arg)
60101509 1492{
b1c58213 1493 if (file == NULL)
ce37ebd2
BB
1494 return (SET_ERROR(-EINVAL));
1495
1496 return (zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1497 file->f_mode, cmd, arg));
60101509 1498}
ce37ebd2
BB
1499#else
1500#define zvol_compat_ioctl_by_inode NULL
1501#endif
60101509
BB
1502
1503static struct block_device_operations zvol_ops = {
ce37ebd2
BB
1504 .open = zvol_open_by_inode,
1505 .release = zvol_release_by_inode,
1506 .ioctl = zvol_ioctl_by_inode,
1507 .compat_ioctl = zvol_compat_ioctl_by_inode,
1508 .media_changed = zvol_media_changed,
1509 .revalidate_disk = zvol_revalidate_disk,
1510 .getgeo = zvol_getgeo,
1511 .owner = THIS_MODULE,
60101509
BB
1512};
1513#endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1514
1515/*
1516 * Allocate memory for a new zvol_state_t and setup the required
1517 * request queue and generic disk structures for the block device.
1518 */
1519static zvol_state_t *
1520zvol_alloc(dev_t dev, const char *name)
1521{
1522 zvol_state_t *zv;
1523
79c76d5b 1524 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
60101509 1525
2a3871d4
RY
1526 list_link_init(&zv->zv_next);
1527
5559ba09
BP
1528 mutex_init(&zv->zv_state_lock, NULL, MUTEX_DEFAULT, NULL);
1529
37f9dac5 1530 zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
60101509
BB
1531 if (zv->zv_queue == NULL)
1532 goto out_kmem;
1533
37f9dac5 1534 blk_queue_make_request(zv->zv_queue, zvol_request);
cf41432c 1535 blk_queue_set_write_cache(zv->zv_queue, B_TRUE, B_TRUE);
b18019d2 1536
bc17f104
RY
1537 /* Limit read-ahead to a single page to prevent over-prefetching. */
1538 blk_queue_set_read_ahead(zv->zv_queue, 1);
1539
5731140e
R
1540 /* Disable write merging in favor of the ZIO pipeline. */
1541 queue_flag_set(QUEUE_FLAG_NOMERGES, zv->zv_queue);
1542
60101509
BB
1543 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1544 if (zv->zv_disk == NULL)
1545 goto out_queue;
1546
1547 zv->zv_queue->queuedata = zv;
1548 zv->zv_dev = dev;
1549 zv->zv_open_count = 0;
4c0d8e50 1550 strlcpy(zv->zv_name, name, MAXNAMELEN);
60101509 1551
d88895a0 1552 zfs_rlock_init(&zv->zv_range_lock);
040dab99 1553 rw_init(&zv->zv_suspend_lock, NULL, RW_DEFAULT, NULL);
3c4988c8 1554
60101509
BB
1555 zv->zv_disk->major = zvol_major;
1556 zv->zv_disk->first_minor = (dev & MINORMASK);
1557 zv->zv_disk->fops = &zvol_ops;
1558 zv->zv_disk->private_data = zv;
1559 zv->zv_disk->queue = zv->zv_queue;
4c0d8e50
FN
1560 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1561 ZVOL_DEV_NAME, (dev & MINORMASK));
60101509 1562
ce37ebd2 1563 return (zv);
60101509
BB
1564
1565out_queue:
1566 blk_cleanup_queue(zv->zv_queue);
1567out_kmem:
1568 kmem_free(zv, sizeof (zvol_state_t));
0a6bef26 1569
ce37ebd2 1570 return (NULL);
60101509
BB
1571}
1572
1573/*
5559ba09
BP
1574 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1575 * At this time, the structure is not opened by anyone, is taken off
1576 * the zvol_state_list, and has its private data set to NULL.
1577 * The zvol_state_lock is dropped.
60101509
BB
1578 */
1579static void
5559ba09 1580zvol_free(void *arg)
60101509 1581{
899662e3 1582 zvol_state_t *zv = arg;
5559ba09
BP
1583
1584 ASSERT(!MUTEX_HELD(&zvol_state_lock));
5428dc51 1585 ASSERT(zv->zv_open_count == 0);
5559ba09 1586 ASSERT(zv->zv_disk->private_data == NULL);
5428dc51 1587
040dab99 1588 rw_destroy(&zv->zv_suspend_lock);
d88895a0 1589 zfs_rlock_destroy(&zv->zv_range_lock);
60101509
BB
1590
1591 del_gendisk(zv->zv_disk);
1592 blk_cleanup_queue(zv->zv_queue);
1593 put_disk(zv->zv_disk);
1594
d45e010d 1595 ida_simple_remove(&zvol_ida, MINOR(zv->zv_dev) >> ZVOL_MINOR_BITS);
60101509 1596
5559ba09
BP
1597 mutex_destroy(&zv->zv_state_lock);
1598
1599 kmem_free(zv, sizeof (zvol_state_t));
899662e3
CC
1600}
1601
a0bd735a
BP
1602/*
1603 * Create a block device minor node and setup the linkage between it
1604 * and the specified volume. Once this function returns the block
1605 * device is live and ready for use.
1606 */
60101509 1607static int
a0bd735a 1608zvol_create_minor_impl(const char *name)
60101509
BB
1609{
1610 zvol_state_t *zv;
1611 objset_t *os;
1612 dmu_object_info_t *doi;
1613 uint64_t volsize;
9965059a 1614 uint64_t len;
60101509
BB
1615 unsigned minor = 0;
1616 int error = 0;
d45e010d
CC
1617 int idx;
1618 uint64_t hash = zvol_name_hash(name);
1619
1620 idx = ida_simple_get(&zvol_ida, 0, 0, kmem_flags_convert(KM_SLEEP));
1621 if (idx < 0)
1622 return (SET_ERROR(-idx));
1623 minor = idx << ZVOL_MINOR_BITS;
60101509 1624
a0bd735a 1625 mutex_enter(&zvol_state_lock);
60101509 1626
d45e010d 1627 zv = zvol_find_by_name_hash(name, hash);
60101509 1628 if (zv) {
5559ba09 1629 mutex_exit(&zvol_state_lock);
2d82116e 1630 ida_simple_remove(&zvol_ida, idx);
5559ba09 1631 return (SET_ERROR(EEXIST));
60101509
BB
1632 }
1633
5559ba09
BP
1634 mutex_exit(&zvol_state_lock);
1635
79c76d5b 1636 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
60101509 1637
040dab99 1638 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
60101509
BB
1639 if (error)
1640 goto out_doi;
1641
1642 error = dmu_object_info(os, ZVOL_OBJ, doi);
1643 if (error)
1644 goto out_dmu_objset_disown;
1645
1646 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1647 if (error)
1648 goto out_dmu_objset_disown;
1649
60101509
BB
1650 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1651 if (zv == NULL) {
2e528b49 1652 error = SET_ERROR(EAGAIN);
60101509
BB
1653 goto out_dmu_objset_disown;
1654 }
d45e010d 1655 zv->zv_hash = hash;
60101509
BB
1656
1657 if (dmu_objset_is_snapshot(os))
1658 zv->zv_flags |= ZVOL_RDONLY;
1659
1660 zv->zv_volblocksize = doi->doi_data_block_size;
1661 zv->zv_volsize = volsize;
1662 zv->zv_objset = os;
1663
1664 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1665
c495fe2c 1666 blk_queue_max_hw_sectors(zv->zv_queue, (DMU_MAX_ACCESS / 4) >> 9);
34037afe
ED
1667 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1668 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1669 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1670 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
7c0e5708
ED
1671 blk_queue_max_discard_sectors(zv->zv_queue,
1672 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
ee5fd0bb 1673 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
30930fba 1674 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
37f9dac5 1675#ifdef QUEUE_FLAG_NONROT
34037afe
ED
1676 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1677#endif
c6a3a222
RY
1678#ifdef QUEUE_FLAG_ADD_RANDOM
1679 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zv->zv_queue);
1680#endif
34037afe 1681
a4430fce
GW
1682 if (spa_writeable(dmu_objset_spa(os))) {
1683 if (zil_replay_disable)
1684 zil_destroy(dmu_objset_zil(os), B_FALSE);
1685 else
1686 zil_replay(os, zv, zvol_replay_vector);
1687 }
60101509 1688
9965059a
BB
1689 /*
1690 * When udev detects the addition of the device it will immediately
1691 * invoke blkid(8) to determine the type of content on the device.
1692 * Prefetching the blocks commonly scanned by blkid(8) will speed
1693 * up this process.
1694 */
1695 len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
1696 if (len > 0) {
fcff0f35
PD
1697 dmu_prefetch(os, ZVOL_OBJ, 0, 0, len, ZIO_PRIORITY_SYNC_READ);
1698 dmu_prefetch(os, ZVOL_OBJ, 0, volsize - len, len,
02730c33 1699 ZIO_PRIORITY_SYNC_READ);
9965059a
BB
1700 }
1701
f74a147c 1702 zv->zv_objset = NULL;
60101509 1703out_dmu_objset_disown:
040dab99 1704 dmu_objset_disown(os, FTAG);
60101509 1705out_doi:
ce37ebd2 1706 kmem_free(doi, sizeof (dmu_object_info_t));
60101509
BB
1707
1708 if (error == 0) {
5559ba09 1709 mutex_enter(&zvol_state_lock);
60101509 1710 zvol_insert(zv);
5428dc51 1711 mutex_exit(&zvol_state_lock);
60101509 1712 add_disk(zv->zv_disk);
a0bd735a 1713 } else {
d45e010d 1714 ida_simple_remove(&zvol_ida, idx);
60101509
BB
1715 }
1716
ce37ebd2 1717 return (SET_ERROR(error));
60101509
BB
1718}
1719
ba6a2402
BB
1720/*
1721 * Rename a block device minor mode for the specified volume.
1722 */
1723static void
a0bd735a 1724zvol_rename_minor(zvol_state_t *zv, const char *newname)
ba6a2402
BB
1725{
1726 int readonly = get_disk_ro(zv->zv_disk);
1727
1728 ASSERT(MUTEX_HELD(&zvol_state_lock));
1729
040dab99 1730 rw_enter(&zv->zv_suspend_lock, RW_READER);
ba6a2402 1731 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
040dab99
CC
1732 rw_exit(&zv->zv_suspend_lock);
1733
1734 /* move to new hashtable entry */
1735 zv->zv_hash = zvol_name_hash(zv->zv_name);
1736 hlist_del(&zv->zv_hlink);
1737 hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
ba6a2402
BB
1738
1739 /*
1740 * The block device's read-only state is briefly changed causing
1741 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1742 * the name change and fixes the symlinks. This does not change
1743 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1744 * changes. This would normally be done using kobject_uevent() but
1745 * that is a GPL-only symbol which is why we need this workaround.
1746 */
1747 set_disk_ro(zv->zv_disk, !readonly);
1748 set_disk_ro(zv->zv_disk, readonly);
1749}
1750
7ac557ce
CC
1751typedef struct minors_job {
1752 list_t *list;
1753 list_node_t link;
1754 /* input */
1755 char *name;
1756 /* output */
1757 int error;
1758} minors_job_t;
1759
1760/*
1761 * Prefetch zvol dnodes for the minors_job
1762 */
1763static void
1764zvol_prefetch_minors_impl(void *arg)
1765{
1766 minors_job_t *job = arg;
1767 char *dsname = job->name;
1768 objset_t *os = NULL;
1769
040dab99 1770 job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, FTAG,
7ac557ce
CC
1771 &os);
1772 if (job->error == 0) {
1773 dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
040dab99 1774 dmu_objset_disown(os, FTAG);
7ac557ce
CC
1775 }
1776}
a0bd735a
BP
1777
1778/*
1779 * Mask errors to continue dmu_objset_find() traversal
1780 */
1781static int
1782zvol_create_snap_minor_cb(const char *dsname, void *arg)
1783{
7ac557ce
CC
1784 minors_job_t *j = arg;
1785 list_t *minors_list = j->list;
1786 const char *name = j->name;
a0bd735a 1787
1ee159f4
BP
1788 ASSERT0(MUTEX_HELD(&spa_namespace_lock));
1789
a0bd735a
BP
1790 /* skip the designated dataset */
1791 if (name && strcmp(dsname, name) == 0)
1792 return (0);
1793
1794 /* at this point, the dsname should name a snapshot */
1795 if (strchr(dsname, '@') == 0) {
1796 dprintf("zvol_create_snap_minor_cb(): "
02730c33 1797 "%s is not a shapshot name\n", dsname);
a0bd735a 1798 } else {
7ac557ce
CC
1799 minors_job_t *job;
1800 char *n = strdup(dsname);
1801 if (n == NULL)
1802 return (0);
1803
1804 job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1805 job->name = n;
1806 job->list = minors_list;
1807 job->error = 0;
1808 list_insert_tail(minors_list, job);
1809 /* don't care if dispatch fails, because job->error is 0 */
1810 taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
1811 TQ_SLEEP);
a0bd735a
BP
1812 }
1813
1814 return (0);
1815}
1816
1817/*
1818 * Mask errors to continue dmu_objset_find() traversal
1819 */
60101509 1820static int
13fe0198 1821zvol_create_minors_cb(const char *dsname, void *arg)
60101509 1822{
a0bd735a
BP
1823 uint64_t snapdev;
1824 int error;
7ac557ce 1825 list_t *minors_list = arg;
a0bd735a 1826
1ee159f4
BP
1827 ASSERT0(MUTEX_HELD(&spa_namespace_lock));
1828
a0bd735a
BP
1829 error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
1830 if (error)
1831 return (0);
1832
1833 /*
1834 * Given the name and the 'snapdev' property, create device minor nodes
1835 * with the linkages to zvols/snapshots as needed.
1836 * If the name represents a zvol, create a minor node for the zvol, then
1837 * check if its snapshots are 'visible', and if so, iterate over the
1838 * snapshots and create device minor nodes for those.
1839 */
1840 if (strchr(dsname, '@') == 0) {
7ac557ce
CC
1841 minors_job_t *job;
1842 char *n = strdup(dsname);
1843 if (n == NULL)
1844 return (0);
1845
1846 job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
1847 job->name = n;
1848 job->list = minors_list;
1849 job->error = 0;
1850 list_insert_tail(minors_list, job);
1851 /* don't care if dispatch fails, because job->error is 0 */
1852 taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
1853 TQ_SLEEP);
1854
1855 if (snapdev == ZFS_SNAPDEV_VISIBLE) {
a0bd735a
BP
1856 /*
1857 * traverse snapshots only, do not traverse children,
1858 * and skip the 'dsname'
1859 */
1860 error = dmu_objset_find((char *)dsname,
7ac557ce 1861 zvol_create_snap_minor_cb, (void *)job,
a0bd735a 1862 DS_FIND_SNAPSHOTS);
a0bd735a
BP
1863 }
1864 } else {
1865 dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
02730c33 1866 dsname);
a0bd735a 1867 }
60101509 1868
d5674448 1869 return (0);
60101509
BB
1870}
1871
1872/*
a0bd735a
BP
1873 * Create minors for the specified dataset, including children and snapshots.
1874 * Pay attention to the 'snapdev' property and iterate over the snapshots
1875 * only if they are 'visible'. This approach allows one to assure that the
1876 * snapshot metadata is read from disk only if it is needed.
1877 *
1878 * The name can represent a dataset to be recursively scanned for zvols and
1879 * their snapshots, or a single zvol snapshot. If the name represents a
1880 * dataset, the scan is performed in two nested stages:
1881 * - scan the dataset for zvols, and
1882 * - for each zvol, create a minor node, then check if the zvol's snapshots
1883 * are 'visible', and only then iterate over the snapshots if needed
1884 *
4e33ba4c 1885 * If the name represents a snapshot, a check is performed if the snapshot is
a0bd735a
BP
1886 * 'visible' (which also verifies that the parent is a zvol), and if so,
1887 * a minor node for that snapshot is created.
60101509 1888 */
a0bd735a
BP
1889static int
1890zvol_create_minors_impl(const char *name)
60101509 1891{
60101509 1892 int error = 0;
5428dc51 1893 fstrans_cookie_t cookie;
a0bd735a 1894 char *atp, *parent;
7ac557ce
CC
1895 list_t minors_list;
1896 minors_job_t *job;
60101509 1897
5428dc51
BP
1898 if (zvol_inhibit_dev)
1899 return (0);
1900
7ac557ce
CC
1901 /*
1902 * This is the list for prefetch jobs. Whenever we found a match
1903 * during dmu_objset_find, we insert a minors_job to the list and do
1904 * taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
1905 * any lock because all list operation is done on the current thread.
1906 *
1907 * We will use this list to do zvol_create_minor_impl after prefetch
1908 * so we don't have to traverse using dmu_objset_find again.
1909 */
1910 list_create(&minors_list, sizeof (minors_job_t),
1911 offsetof(minors_job_t, link));
1912
a0bd735a
BP
1913 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1914 (void) strlcpy(parent, name, MAXPATHLEN);
1915
1916 if ((atp = strrchr(parent, '@')) != NULL) {
1917 uint64_t snapdev;
1918
1919 *atp = '\0';
1920 error = dsl_prop_get_integer(parent, "snapdev",
1921 &snapdev, NULL);
1922
1923 if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
1924 error = zvol_create_minor_impl(name);
1925 } else {
1926 cookie = spl_fstrans_mark();
1927 error = dmu_objset_find(parent, zvol_create_minors_cb,
7ac557ce 1928 &minors_list, DS_FIND_CHILDREN);
a0bd735a
BP
1929 spl_fstrans_unmark(cookie);
1930 }
1931
1932 kmem_free(parent, MAXPATHLEN);
7ac557ce
CC
1933 taskq_wait_outstanding(system_taskq, 0);
1934
1935 /*
1936 * Prefetch is completed, we can do zvol_create_minor_impl
1937 * sequentially.
1938 */
1939 while ((job = list_head(&minors_list)) != NULL) {
1940 list_remove(&minors_list, job);
1941 if (!job->error)
1942 zvol_create_minor_impl(job->name);
1943 strfree(job->name);
1944 kmem_free(job, sizeof (minors_job_t));
1945 }
1946
1947 list_destroy(&minors_list);
ba6a2402
BB
1948
1949 return (SET_ERROR(error));
1950}
1951
1952/*
1953 * Remove minors for specified dataset including children and snapshots.
1954 */
a0bd735a
BP
1955static void
1956zvol_remove_minors_impl(const char *name)
ba6a2402
BB
1957{
1958 zvol_state_t *zv, *zv_next;
1959 int namelen = ((name) ? strlen(name) : 0);
899662e3 1960 taskqid_t t, tid = TASKQID_INVALID;
5559ba09 1961 list_t free_list;
ba6a2402 1962
74497b7a 1963 if (zvol_inhibit_dev)
ba6a2402 1964 return;
74497b7a 1965
5559ba09
BP
1966 list_create(&free_list, sizeof (zvol_state_t),
1967 offsetof(zvol_state_t, zv_next));
1968
60101509 1969 mutex_enter(&zvol_state_lock);
ba6a2402
BB
1970
1971 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1972 zv_next = list_next(&zvol_state_list, zv);
1973
1974 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1975 (strncmp(zv->zv_name, name, namelen) == 0 &&
5428dc51
BP
1976 (zv->zv_name[namelen] == '/' ||
1977 zv->zv_name[namelen] == '@'))) {
1978
1979 /* If in use, leave alone */
040dab99
CC
1980 if (zv->zv_open_count > 0 ||
1981 atomic_read(&zv->zv_suspend_ref))
5428dc51 1982 continue;
5559ba09
BP
1983 /*
1984 * By taking zv_state_lock here, we guarantee that no
1985 * one is currently using this zv
1986 */
1987 mutex_enter(&zv->zv_state_lock);
ba6a2402 1988 zvol_remove(zv);
5559ba09 1989 mutex_exit(&zv->zv_state_lock);
899662e3
CC
1990
1991 /* clear this so zvol_open won't open it */
1992 zv->zv_disk->private_data = NULL;
1993
1994 /* try parallel zv_free, if failed do it in place */
5559ba09 1995 t = taskq_dispatch(system_taskq, zvol_free, zv,
899662e3
CC
1996 TQ_SLEEP);
1997 if (t == TASKQID_INVALID)
5559ba09 1998 list_insert_head(&free_list, zv);
899662e3
CC
1999 else
2000 tid = t;
60101509 2001 }
60101509 2002 }
ba6a2402 2003 mutex_exit(&zvol_state_lock);
5559ba09
BP
2004
2005 /*
2006 * Drop zvol_state_lock before calling zvol_free()
2007 */
2008 while ((zv = list_head(&free_list)) != NULL) {
2009 list_remove(&free_list, zv);
2010 zvol_free(zv);
2011 }
2012
899662e3
CC
2013 if (tid != TASKQID_INVALID)
2014 taskq_wait_outstanding(system_taskq, tid);
60101509
BB
2015}
2016
a0bd735a
BP
2017/* Remove minor for this specific snapshot only */
2018static void
2019zvol_remove_minor_impl(const char *name)
2020{
92aceb2a 2021 zvol_state_t *zv = NULL, *zv_next;
a0bd735a
BP
2022
2023 if (zvol_inhibit_dev)
2024 return;
2025
2026 if (strchr(name, '@') == NULL)
2027 return;
2028
2029 mutex_enter(&zvol_state_lock);
2030
2031 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
2032 zv_next = list_next(&zvol_state_list, zv);
2033
2034 if (strcmp(zv->zv_name, name) == 0) {
2035 /* If in use, leave alone */
040dab99
CC
2036 if (zv->zv_open_count > 0 ||
2037 atomic_read(&zv->zv_suspend_ref))
a0bd735a 2038 continue;
5559ba09
BP
2039 /*
2040 * By taking zv_state_lock here, we guarantee that no
2041 * one is currently using this zv
2042 */
2043 mutex_enter(&zv->zv_state_lock);
a0bd735a 2044 zvol_remove(zv);
5559ba09
BP
2045 mutex_exit(&zv->zv_state_lock);
2046 /* clear this so zvol_open won't open it */
2047 zv->zv_disk->private_data = NULL;
a0bd735a
BP
2048 break;
2049 }
2050 }
2051
92aceb2a 2052 /* Drop zvol_state_lock before calling zvol_free() */
a0bd735a 2053 mutex_exit(&zvol_state_lock);
5559ba09 2054
92aceb2a 2055 if (zv != NULL)
2056 zvol_free(zv);
a0bd735a
BP
2057}
2058
60101509 2059/*
ba6a2402 2060 * Rename minors for specified dataset including children and snapshots.
60101509 2061 */
a0bd735a
BP
2062static void
2063zvol_rename_minors_impl(const char *oldname, const char *newname)
60101509
BB
2064{
2065 zvol_state_t *zv, *zv_next;
ba6a2402
BB
2066 int oldnamelen, newnamelen;
2067 char *name;
60101509 2068
74497b7a
DH
2069 if (zvol_inhibit_dev)
2070 return;
2071
ba6a2402
BB
2072 oldnamelen = strlen(oldname);
2073 newnamelen = strlen(newname);
79c76d5b 2074 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
60101509
BB
2075
2076 mutex_enter(&zvol_state_lock);
ba6a2402 2077
60101509
BB
2078 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
2079 zv_next = list_next(&zvol_state_list, zv);
2080
5428dc51
BP
2081 /* If in use, leave alone */
2082 if (zv->zv_open_count > 0)
2083 continue;
2084
ba6a2402 2085 if (strcmp(zv->zv_name, oldname) == 0) {
a0bd735a 2086 zvol_rename_minor(zv, newname);
ba6a2402
BB
2087 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
2088 (zv->zv_name[oldnamelen] == '/' ||
2089 zv->zv_name[oldnamelen] == '@')) {
2090 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
2091 zv->zv_name[oldnamelen],
2092 zv->zv_name + oldnamelen + 1);
a0bd735a 2093 zvol_rename_minor(zv, name);
60101509
BB
2094 }
2095 }
ba6a2402 2096
60101509 2097 mutex_exit(&zvol_state_lock);
ba6a2402
BB
2098
2099 kmem_free(name, MAXNAMELEN);
60101509
BB
2100}
2101
a0bd735a
BP
2102typedef struct zvol_snapdev_cb_arg {
2103 uint64_t snapdev;
2104} zvol_snapdev_cb_arg_t;
2105
0b4d1b58 2106static int
4ea3f864
GM
2107zvol_set_snapdev_cb(const char *dsname, void *param)
2108{
a0bd735a 2109 zvol_snapdev_cb_arg_t *arg = param;
0b4d1b58
ED
2110
2111 if (strchr(dsname, '@') == NULL)
ba6a2402 2112 return (0);
0b4d1b58 2113
a0bd735a 2114 switch (arg->snapdev) {
0b4d1b58 2115 case ZFS_SNAPDEV_VISIBLE:
a0bd735a 2116 (void) zvol_create_minor_impl(dsname);
0b4d1b58
ED
2117 break;
2118 case ZFS_SNAPDEV_HIDDEN:
a0bd735a 2119 (void) zvol_remove_minor_impl(dsname);
0b4d1b58
ED
2120 break;
2121 }
ba6a2402
BB
2122
2123 return (0);
0b4d1b58
ED
2124}
2125
a0bd735a
BP
2126static void
2127zvol_set_snapdev_impl(char *name, uint64_t snapdev)
2128{
2129 zvol_snapdev_cb_arg_t arg = {snapdev};
2130 fstrans_cookie_t cookie = spl_fstrans_mark();
2131 /*
2132 * The zvol_set_snapdev_sync() sets snapdev appropriately
2133 * in the dataset hierarchy. Here, we only scan snapshots.
2134 */
2135 dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
2136 spl_fstrans_unmark(cookie);
2137}
2138
2139static zvol_task_t *
2140zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
2141 uint64_t snapdev)
2142{
2143 zvol_task_t *task;
2144 char *delim;
2145
2146 /* Never allow tasks on hidden names. */
2147 if (name1[0] == '$')
2148 return (NULL);
2149
2150 task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
2151 task->op = op;
2152 task->snapdev = snapdev;
2153 delim = strchr(name1, '/');
2154 strlcpy(task->pool, name1, delim ? (delim - name1 + 1) : MAXNAMELEN);
2155
2156 strlcpy(task->name1, name1, MAXNAMELEN);
2157 if (name2 != NULL)
2158 strlcpy(task->name2, name2, MAXNAMELEN);
2159
2160 return (task);
2161}
2162
2163static void
2164zvol_task_free(zvol_task_t *task)
2165{
2166 kmem_free(task, sizeof (zvol_task_t));
2167}
2168
2169/*
2170 * The worker thread function performed asynchronously.
2171 */
2172static void
2173zvol_task_cb(void *param)
2174{
2175 zvol_task_t *task = (zvol_task_t *)param;
2176
2177 switch (task->op) {
2178 case ZVOL_ASYNC_CREATE_MINORS:
2179 (void) zvol_create_minors_impl(task->name1);
2180 break;
2181 case ZVOL_ASYNC_REMOVE_MINORS:
2182 zvol_remove_minors_impl(task->name1);
2183 break;
2184 case ZVOL_ASYNC_RENAME_MINORS:
2185 zvol_rename_minors_impl(task->name1, task->name2);
2186 break;
2187 case ZVOL_ASYNC_SET_SNAPDEV:
2188 zvol_set_snapdev_impl(task->name1, task->snapdev);
2189 break;
2190 default:
2191 VERIFY(0);
2192 break;
2193 }
2194
2195 zvol_task_free(task);
2196}
2197
2198typedef struct zvol_set_snapdev_arg {
2199 const char *zsda_name;
2200 uint64_t zsda_value;
2201 zprop_source_t zsda_source;
2202 dmu_tx_t *zsda_tx;
2203} zvol_set_snapdev_arg_t;
2204
2205/*
2206 * Sanity check the dataset for safe use by the sync task. No additional
2207 * conditions are imposed.
2208 */
2209static int
2210zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
2211{
2212 zvol_set_snapdev_arg_t *zsda = arg;
2213 dsl_pool_t *dp = dmu_tx_pool(tx);
2214 dsl_dir_t *dd;
2215 int error;
2216
2217 error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
2218 if (error != 0)
2219 return (error);
2220
2221 dsl_dir_rele(dd, FTAG);
2222
2223 return (error);
2224}
2225
92aceb2a 2226/* ARGSUSED */
a0bd735a
BP
2227static int
2228zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
2229{
a0bd735a
BP
2230 char dsname[MAXNAMELEN];
2231 zvol_task_t *task;
92aceb2a 2232 uint64_t snapdev;
a0bd735a
BP
2233
2234 dsl_dataset_name(ds, dsname);
92aceb2a 2235 if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
2236 return (0);
2237 task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
a0bd735a
BP
2238 if (task == NULL)
2239 return (0);
2240
2241 (void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
02730c33 2242 task, TQ_SLEEP);
a0bd735a
BP
2243 return (0);
2244}
2245
2246/*
92aceb2a 2247 * Traverse all child datasets and apply snapdev appropriately.
2248 * We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
2249 * dataset and read the effective "snapdev" on every child in the callback
2250 * function: this is because the value is not guaranteed to be the same in the
2251 * whole dataset hierarchy.
a0bd735a
BP
2252 */
2253static void
2254zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
2255{
2256 zvol_set_snapdev_arg_t *zsda = arg;
2257 dsl_pool_t *dp = dmu_tx_pool(tx);
2258 dsl_dir_t *dd;
92aceb2a 2259 dsl_dataset_t *ds;
2260 int error;
a0bd735a
BP
2261
2262 VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
2263 zsda->zsda_tx = tx;
2264
92aceb2a 2265 error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
2266 if (error == 0) {
2267 dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
2268 zsda->zsda_source, sizeof (zsda->zsda_value), 1,
2269 &zsda->zsda_value, zsda->zsda_tx);
2270 dsl_dataset_rele(ds, FTAG);
2271 }
a0bd735a
BP
2272 dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
2273 zsda, DS_FIND_CHILDREN);
2274
2275 dsl_dir_rele(dd, FTAG);
2276}
2277
0b4d1b58 2278int
a0bd735a
BP
2279zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
2280{
2281 zvol_set_snapdev_arg_t zsda;
5428dc51 2282
a0bd735a
BP
2283 zsda.zsda_name = ddname;
2284 zsda.zsda_source = source;
2285 zsda.zsda_value = snapdev;
5428dc51 2286
a0bd735a
BP
2287 return (dsl_sync_task(ddname, zvol_set_snapdev_check,
2288 zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
2289}
2290
2291void
2292zvol_create_minors(spa_t *spa, const char *name, boolean_t async)
2293{
2294 zvol_task_t *task;
2295 taskqid_t id;
2296
2297 task = zvol_task_alloc(ZVOL_ASYNC_CREATE_MINORS, name, NULL, ~0ULL);
2298 if (task == NULL)
2299 return;
2300
2301 id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
48d3eb40 2302 if ((async == B_FALSE) && (id != TASKQID_INVALID))
a0bd735a
BP
2303 taskq_wait_id(spa->spa_zvol_taskq, id);
2304}
2305
2306void
2307zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
2308{
2309 zvol_task_t *task;
2310 taskqid_t id;
2311
2312 task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
2313 if (task == NULL)
2314 return;
5428dc51 2315
a0bd735a 2316 id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
48d3eb40 2317 if ((async == B_FALSE) && (id != TASKQID_INVALID))
a0bd735a
BP
2318 taskq_wait_id(spa->spa_zvol_taskq, id);
2319}
2320
2321void
2322zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
2323 boolean_t async)
2324{
2325 zvol_task_t *task;
2326 taskqid_t id;
2327
2328 task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
2329 if (task == NULL)
2330 return;
2331
2332 id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
48d3eb40 2333 if ((async == B_FALSE) && (id != TASKQID_INVALID))
a0bd735a 2334 taskq_wait_id(spa->spa_zvol_taskq, id);
0b4d1b58
ED
2335}
2336
60101509
BB
2337int
2338zvol_init(void)
2339{
692e55b8 2340 int threads = MIN(MAX(zvol_threads, 1), 1024);
d45e010d 2341 int i, error;
60101509 2342
2a3871d4 2343 list_create(&zvol_state_list, sizeof (zvol_state_t),
ce37ebd2 2344 offsetof(zvol_state_t, zv_next));
2a3871d4 2345 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
4a5d7f82 2346 ida_init(&zvol_ida);
2a3871d4 2347
692e55b8
CC
2348 zvol_taskq = taskq_create(ZVOL_DRIVER, threads, maxclsyspri,
2349 threads * 2, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
2350 if (zvol_taskq == NULL) {
2351 printk(KERN_INFO "ZFS: taskq_create() failed\n");
2352 error = -ENOMEM;
2353 goto out;
2354 }
2355
d45e010d
CC
2356 zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
2357 KM_SLEEP);
2358 if (!zvol_htable) {
692e55b8
CC
2359 error = -ENOMEM;
2360 goto out_taskq;
d45e010d
CC
2361 }
2362 for (i = 0; i < ZVOL_HT_SIZE; i++)
2363 INIT_HLIST_HEAD(&zvol_htable[i]);
2364
60101509
BB
2365 error = register_blkdev(zvol_major, ZVOL_DRIVER);
2366 if (error) {
2367 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
d45e010d 2368 goto out_free;
60101509
BB
2369 }
2370
2371 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
ce37ebd2 2372 THIS_MODULE, zvol_probe, NULL, NULL);
60101509 2373
60101509 2374 return (0);
2a3871d4 2375
d45e010d
CC
2376out_free:
2377 kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
692e55b8
CC
2378out_taskq:
2379 taskq_destroy(zvol_taskq);
37f9dac5 2380out:
2a3871d4
RY
2381 mutex_destroy(&zvol_state_lock);
2382 list_destroy(&zvol_state_list);
2383
ce37ebd2 2384 return (SET_ERROR(error));
60101509
BB
2385}
2386
2387void
2388zvol_fini(void)
2389{
a0bd735a
BP
2390 zvol_remove_minors_impl(NULL);
2391
60101509
BB
2392 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
2393 unregister_blkdev(zvol_major, ZVOL_DRIVER);
d45e010d 2394 kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
a0bd735a 2395
692e55b8 2396 taskq_destroy(zvol_taskq);
60101509 2397 list_destroy(&zvol_state_list);
a0bd735a 2398 mutex_destroy(&zvol_state_lock);
f2d8bdc6
CC
2399
2400 ida_destroy(&zvol_ida);
60101509
BB
2401}
2402
02730c33 2403/* BEGIN CSTYLED */
74497b7a
DH
2404module_param(zvol_inhibit_dev, uint, 0644);
2405MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
2406
30a9524e 2407module_param(zvol_major, uint, 0444);
60101509
BB
2408MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
2409
692e55b8
CC
2410module_param(zvol_threads, uint, 0444);
2411MODULE_PARM_DESC(zvol_threads, "Max number of threads to handle I/O requests");
2412
2413module_param(zvol_request_sync, uint, 0644);
2414MODULE_PARM_DESC(zvol_request_sync, "Synchronously handle bio requests");
2415
7c0e5708 2416module_param(zvol_max_discard_blocks, ulong, 0444);
ce37ebd2 2417MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
9965059a
BB
2418
2419module_param(zvol_prefetch_bytes, uint, 0644);
2420MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");
02730c33 2421/* END CSTYLED */