]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zvol.c
Prefetch start and end of volumes
[mirror_zfs.git] / module / zfs / zvol.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
38 #include <sys/dbuf.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/zap.h>
43 #include <sys/zfeature.h>
44 #include <sys/zil_impl.h>
45 #include <sys/zio.h>
46 #include <sys/zfs_rlock.h>
47 #include <sys/zfs_znode.h>
48 #include <sys/zvol.h>
49 #include <linux/blkdev_compat.h>
50
51 unsigned int zvol_inhibit_dev = 0;
52 unsigned int zvol_major = ZVOL_MAJOR;
53 unsigned int zvol_prefetch_bytes = (128 * 1024);
54 unsigned long zvol_max_discard_blocks = 16384;
55
56 static kmutex_t zvol_state_lock;
57 static list_t zvol_state_list;
58 static char *zvol_tag = "zvol_tag";
59
60 /*
61 * The in-core state of each volume.
62 */
63 typedef struct zvol_state {
64 char zv_name[MAXNAMELEN]; /* name */
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize; /* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
79 } zvol_state_t;
80
81 #define ZVOL_RDONLY 0x1
82
83 /*
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
88 */
89 static int
90 zvol_find_minor(unsigned *minor)
91 {
92 zvol_state_t *zv;
93
94 *minor = 0;
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 break;
100 }
101
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
104 return (SET_ERROR(ENXIO));
105
106 return (0);
107 }
108
109 /*
110 * Find a zvol_state_t given the full major+minor dev_t.
111 */
112 static zvol_state_t *
113 zvol_find_by_dev(dev_t dev)
114 {
115 zvol_state_t *zv;
116
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
121 return (zv);
122 }
123
124 return (NULL);
125 }
126
127 /*
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 */
130 static zvol_state_t *
131 zvol_find_by_name(const char *name)
132 {
133 zvol_state_t *zv;
134
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
138 if (strncmp(zv->zv_name, name, MAXNAMELEN) == 0)
139 return (zv);
140 }
141
142 return (NULL);
143 }
144
145
146 /*
147 * Given a path, return TRUE if path is a ZVOL.
148 */
149 boolean_t
150 zvol_is_zvol(const char *device)
151 {
152 struct block_device *bdev;
153 unsigned int major;
154
155 bdev = lookup_bdev(device);
156 if (IS_ERR(bdev))
157 return (B_FALSE);
158
159 major = MAJOR(bdev->bd_dev);
160 bdput(bdev);
161
162 if (major == zvol_major)
163 return (B_TRUE);
164
165 return (B_FALSE);
166 }
167
168 /*
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
170 */
171 void
172 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173 {
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
176 int error;
177 uint64_t volblocksize, volsize;
178
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
184
185 /*
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
188 */
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
195 DMU_OT_NONE, 0, tx);
196 ASSERT(error == 0);
197
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
199 DMU_OT_NONE, 0, tx);
200 ASSERT(error == 0);
201
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
203 ASSERT(error == 0);
204 }
205
206 /*
207 * ZFS_IOC_OBJSET_STATS entry point.
208 */
209 int
210 zvol_get_stats(objset_t *os, nvlist_t *nv)
211 {
212 int error;
213 dmu_object_info_t *doi;
214 uint64_t val;
215
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
217 if (error)
218 return (SET_ERROR(error));
219
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
223
224 if (error == 0) {
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
227 }
228
229 kmem_free(doi, sizeof (dmu_object_info_t));
230
231 return (SET_ERROR(error));
232 }
233
234 static void
235 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
236 {
237 struct block_device *bdev;
238
239 bdev = bdget_disk(zv->zv_disk, 0);
240 if (bdev == NULL)
241 return;
242 /*
243 * 2.6.28 API change
244 * Added check_disk_size_change() helper function.
245 */
246 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
247 set_capacity(zv->zv_disk, volsize >> 9);
248 zv->zv_volsize = volsize;
249 check_disk_size_change(zv->zv_disk, bdev);
250 #else
251 zv->zv_volsize = volsize;
252 zv->zv_changed = 1;
253 (void) check_disk_change(bdev);
254 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
255
256 bdput(bdev);
257 }
258
259 /*
260 * Sanity check volume size.
261 */
262 int
263 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
264 {
265 if (volsize == 0)
266 return (SET_ERROR(EINVAL));
267
268 if (volsize % blocksize != 0)
269 return (SET_ERROR(EINVAL));
270
271 #ifdef _ILP32
272 if (volsize - 1 > MAXOFFSET_T)
273 return (SET_ERROR(EOVERFLOW));
274 #endif
275 return (0);
276 }
277
278 /*
279 * Ensure the zap is flushed then inform the VFS of the capacity change.
280 */
281 static int
282 zvol_update_volsize(uint64_t volsize, objset_t *os)
283 {
284 dmu_tx_t *tx;
285 int error;
286
287 ASSERT(MUTEX_HELD(&zvol_state_lock));
288
289 tx = dmu_tx_create(os);
290 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
291 error = dmu_tx_assign(tx, TXG_WAIT);
292 if (error) {
293 dmu_tx_abort(tx);
294 return (SET_ERROR(error));
295 }
296
297 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
298 &volsize, tx);
299 dmu_tx_commit(tx);
300
301 if (error == 0)
302 error = dmu_free_long_range(os,
303 ZVOL_OBJ, volsize, DMU_OBJECT_END);
304
305 return (error);
306 }
307
308 static int
309 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
310 {
311 zvol_size_changed(zv, volsize);
312
313 /*
314 * We should post a event here describing the expansion. However,
315 * the zfs_ereport_post() interface doesn't nicely support posting
316 * events for zvols, it assumes events relate to vdevs or zios.
317 */
318
319 return (0);
320 }
321
322 /*
323 * Set ZFS_PROP_VOLSIZE set entry point.
324 */
325 int
326 zvol_set_volsize(const char *name, uint64_t volsize)
327 {
328 zvol_state_t *zv = NULL;
329 objset_t *os = NULL;
330 int error;
331 dmu_object_info_t *doi;
332 uint64_t readonly;
333 boolean_t owned = B_FALSE;
334
335 error = dsl_prop_get_integer(name,
336 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
337 if (error != 0)
338 return (SET_ERROR(error));
339 if (readonly)
340 return (SET_ERROR(EROFS));
341
342 mutex_enter(&zvol_state_lock);
343 zv = zvol_find_by_name(name);
344
345 if (zv == NULL || zv->zv_objset == NULL) {
346 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
347 FTAG, &os)) != 0) {
348 mutex_exit(&zvol_state_lock);
349 return (SET_ERROR(error));
350 }
351 owned = B_TRUE;
352 if (zv != NULL)
353 zv->zv_objset = os;
354 } else {
355 os = zv->zv_objset;
356 }
357
358 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
359
360 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
361 (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
362 goto out;
363
364 error = zvol_update_volsize(volsize, os);
365 kmem_free(doi, sizeof (dmu_object_info_t));
366
367 if (error == 0 && zv != NULL)
368 error = zvol_update_live_volsize(zv, volsize);
369 out:
370 if (owned) {
371 dmu_objset_disown(os, FTAG);
372 if (zv != NULL)
373 zv->zv_objset = NULL;
374 }
375 mutex_exit(&zvol_state_lock);
376 return (error);
377 }
378
379 /*
380 * Sanity check volume block size.
381 */
382 int
383 zvol_check_volblocksize(const char *name, uint64_t volblocksize)
384 {
385 /* Record sizes above 128k need the feature to be enabled */
386 if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
387 spa_t *spa;
388 int error;
389
390 if ((error = spa_open(name, &spa, FTAG)) != 0)
391 return (error);
392
393 if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
394 spa_close(spa, FTAG);
395 return (SET_ERROR(ENOTSUP));
396 }
397
398 /*
399 * We don't allow setting the property above 1MB,
400 * unless the tunable has been changed.
401 */
402 if (volblocksize > zfs_max_recordsize)
403 return (SET_ERROR(EDOM));
404
405 spa_close(spa, FTAG);
406 }
407
408 if (volblocksize < SPA_MINBLOCKSIZE ||
409 volblocksize > SPA_MAXBLOCKSIZE ||
410 !ISP2(volblocksize))
411 return (SET_ERROR(EDOM));
412
413 return (0);
414 }
415
416 /*
417 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
418 */
419 int
420 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
421 {
422 zvol_state_t *zv;
423 dmu_tx_t *tx;
424 int error;
425
426 mutex_enter(&zvol_state_lock);
427
428 zv = zvol_find_by_name(name);
429 if (zv == NULL) {
430 error = SET_ERROR(ENXIO);
431 goto out;
432 }
433
434 if (zv->zv_flags & ZVOL_RDONLY) {
435 error = SET_ERROR(EROFS);
436 goto out;
437 }
438
439 tx = dmu_tx_create(zv->zv_objset);
440 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
441 error = dmu_tx_assign(tx, TXG_WAIT);
442 if (error) {
443 dmu_tx_abort(tx);
444 } else {
445 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
446 volblocksize, 0, tx);
447 if (error == ENOTSUP)
448 error = SET_ERROR(EBUSY);
449 dmu_tx_commit(tx);
450 if (error == 0)
451 zv->zv_volblocksize = volblocksize;
452 }
453 out:
454 mutex_exit(&zvol_state_lock);
455
456 return (SET_ERROR(error));
457 }
458
459 /*
460 * Replay a TX_WRITE ZIL transaction that didn't get committed
461 * after a system failure
462 */
463 static int
464 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
465 {
466 objset_t *os = zv->zv_objset;
467 char *data = (char *)(lr + 1); /* data follows lr_write_t */
468 uint64_t off = lr->lr_offset;
469 uint64_t len = lr->lr_length;
470 dmu_tx_t *tx;
471 int error;
472
473 if (byteswap)
474 byteswap_uint64_array(lr, sizeof (*lr));
475
476 tx = dmu_tx_create(os);
477 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
478 error = dmu_tx_assign(tx, TXG_WAIT);
479 if (error) {
480 dmu_tx_abort(tx);
481 } else {
482 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
483 dmu_tx_commit(tx);
484 }
485
486 return (SET_ERROR(error));
487 }
488
489 static int
490 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
491 {
492 return (SET_ERROR(ENOTSUP));
493 }
494
495 /*
496 * Callback vectors for replaying records.
497 * Only TX_WRITE is needed for zvol.
498 */
499 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
500 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
501 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
502 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
503 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
504 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
505 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
506 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
507 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
508 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
509 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
510 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
511 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
512 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
513 };
514
515 /*
516 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
517 *
518 * We store data in the log buffers if it's small enough.
519 * Otherwise we will later flush the data out via dmu_sync().
520 */
521 ssize_t zvol_immediate_write_sz = 32768;
522
523 static void
524 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
525 uint64_t size, int sync)
526 {
527 uint32_t blocksize = zv->zv_volblocksize;
528 zilog_t *zilog = zv->zv_zilog;
529 boolean_t slogging;
530 ssize_t immediate_write_sz;
531
532 if (zil_replaying(zilog, tx))
533 return;
534
535 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
536 ? 0 : zvol_immediate_write_sz;
537 slogging = spa_has_slogs(zilog->zl_spa) &&
538 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
539
540 while (size) {
541 itx_t *itx;
542 lr_write_t *lr;
543 ssize_t len;
544 itx_wr_state_t write_state;
545
546 /*
547 * Unlike zfs_log_write() we can be called with
548 * up to DMU_MAX_ACCESS/2 (5MB) writes.
549 */
550 if (blocksize > immediate_write_sz && !slogging &&
551 size >= blocksize && offset % blocksize == 0) {
552 write_state = WR_INDIRECT; /* uses dmu_sync */
553 len = blocksize;
554 } else if (sync) {
555 write_state = WR_COPIED;
556 len = MIN(ZIL_MAX_LOG_DATA, size);
557 } else {
558 write_state = WR_NEED_COPY;
559 len = MIN(ZIL_MAX_LOG_DATA, size);
560 }
561
562 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
563 (write_state == WR_COPIED ? len : 0));
564 lr = (lr_write_t *)&itx->itx_lr;
565 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
566 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
567 zil_itx_destroy(itx);
568 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
569 lr = (lr_write_t *)&itx->itx_lr;
570 write_state = WR_NEED_COPY;
571 }
572
573 itx->itx_wr_state = write_state;
574 if (write_state == WR_NEED_COPY)
575 itx->itx_sod += len;
576 lr->lr_foid = ZVOL_OBJ;
577 lr->lr_offset = offset;
578 lr->lr_length = len;
579 lr->lr_blkoff = 0;
580 BP_ZERO(&lr->lr_blkptr);
581
582 itx->itx_private = zv;
583 itx->itx_sync = sync;
584
585 (void) zil_itx_assign(zilog, itx, tx);
586
587 offset += len;
588 size -= len;
589 }
590 }
591
592 static int
593 zvol_write(struct bio *bio)
594 {
595 zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
596 uint64_t offset = BIO_BI_SECTOR(bio) << 9;
597 uint64_t size = BIO_BI_SIZE(bio);
598 int error = 0;
599 dmu_tx_t *tx;
600 rl_t *rl;
601
602 if (bio->bi_rw & VDEV_REQ_FLUSH)
603 zil_commit(zv->zv_zilog, ZVOL_OBJ);
604
605 /*
606 * Some requests are just for flush and nothing else.
607 */
608 if (size == 0)
609 goto out;
610
611 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
612
613 tx = dmu_tx_create(zv->zv_objset);
614 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
615
616 /* This will only fail for ENOSPC */
617 error = dmu_tx_assign(tx, TXG_WAIT);
618 if (error) {
619 dmu_tx_abort(tx);
620 zfs_range_unlock(rl);
621 goto out;
622 }
623
624 error = dmu_write_bio(zv->zv_objset, ZVOL_OBJ, bio, tx);
625 if (error == 0)
626 zvol_log_write(zv, tx, offset, size,
627 !!(bio->bi_rw & VDEV_REQ_FUA));
628
629 dmu_tx_commit(tx);
630 zfs_range_unlock(rl);
631
632 if ((bio->bi_rw & VDEV_REQ_FUA) ||
633 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
634 zil_commit(zv->zv_zilog, ZVOL_OBJ);
635
636 out:
637 return (error);
638 }
639
640 static int
641 zvol_discard(struct bio *bio)
642 {
643 zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
644 uint64_t start = BIO_BI_SECTOR(bio) << 9;
645 uint64_t size = BIO_BI_SIZE(bio);
646 uint64_t end = start + size;
647 int error;
648 rl_t *rl;
649
650 if (end > zv->zv_volsize)
651 return (SET_ERROR(EIO));
652
653 /*
654 * Align the request to volume block boundaries when REQ_SECURE is
655 * available, but not requested. If we don't, then this will force
656 * dnode_free_range() to zero out the unaligned parts, which is slow
657 * (read-modify-write) and useless since we are not freeing any space
658 * by doing so. Kernels that do not support REQ_SECURE (2.6.32 through
659 * 2.6.35) will not receive this optimization.
660 */
661 #ifdef REQ_SECURE
662 if (!(bio->bi_rw & REQ_SECURE)) {
663 start = P2ROUNDUP(start, zv->zv_volblocksize);
664 end = P2ALIGN(end, zv->zv_volblocksize);
665 }
666 #endif
667
668 if (start >= end)
669 return (0);
670
671 rl = zfs_range_lock(&zv->zv_znode, start, size, RL_WRITER);
672
673 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, size);
674
675 /*
676 * TODO: maybe we should add the operation to the log.
677 */
678
679 zfs_range_unlock(rl);
680
681 return (error);
682 }
683
684 static int
685 zvol_read(struct bio *bio)
686 {
687 zvol_state_t *zv = bio->bi_bdev->bd_disk->private_data;
688 uint64_t offset = BIO_BI_SECTOR(bio) << 9;
689 uint64_t len = BIO_BI_SIZE(bio);
690 int error;
691 rl_t *rl;
692
693 if (len == 0)
694 return (0);
695
696
697 rl = zfs_range_lock(&zv->zv_znode, offset, len, RL_READER);
698
699 error = dmu_read_bio(zv->zv_objset, ZVOL_OBJ, bio);
700
701 zfs_range_unlock(rl);
702
703 /* convert checksum errors into IO errors */
704 if (error == ECKSUM)
705 error = SET_ERROR(EIO);
706
707 return (error);
708 }
709
710 static MAKE_REQUEST_FN_RET
711 zvol_request(struct request_queue *q, struct bio *bio)
712 {
713 zvol_state_t *zv = q->queuedata;
714 fstrans_cookie_t cookie = spl_fstrans_mark();
715 uint64_t offset = BIO_BI_SECTOR(bio);
716 unsigned int sectors = bio_sectors(bio);
717 int rw = bio_data_dir(bio);
718 #ifdef HAVE_GENERIC_IO_ACCT
719 unsigned long start = jiffies;
720 #endif
721 int error = 0;
722
723 if (bio_has_data(bio) && offset + sectors >
724 get_capacity(zv->zv_disk)) {
725 printk(KERN_INFO
726 "%s: bad access: block=%llu, count=%lu\n",
727 zv->zv_disk->disk_name,
728 (long long unsigned)offset,
729 (long unsigned)sectors);
730 error = SET_ERROR(EIO);
731 goto out1;
732 }
733
734 generic_start_io_acct(rw, sectors, &zv->zv_disk->part0);
735
736 if (rw == WRITE) {
737 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
738 error = SET_ERROR(EROFS);
739 goto out2;
740 }
741
742 if (bio->bi_rw & VDEV_REQ_DISCARD) {
743 error = zvol_discard(bio);
744 goto out2;
745 }
746
747 error = zvol_write(bio);
748 } else
749 error = zvol_read(bio);
750
751 out2:
752 generic_end_io_acct(rw, &zv->zv_disk->part0, start);
753 out1:
754 bio_endio(bio, -error);
755 spl_fstrans_unmark(cookie);
756 #ifdef HAVE_MAKE_REQUEST_FN_RET_INT
757 return (0);
758 #endif
759 }
760
761 static void
762 zvol_get_done(zgd_t *zgd, int error)
763 {
764 if (zgd->zgd_db)
765 dmu_buf_rele(zgd->zgd_db, zgd);
766
767 zfs_range_unlock(zgd->zgd_rl);
768
769 if (error == 0 && zgd->zgd_bp)
770 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
771
772 kmem_free(zgd, sizeof (zgd_t));
773 }
774
775 /*
776 * Get data to generate a TX_WRITE intent log record.
777 */
778 static int
779 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
780 {
781 zvol_state_t *zv = arg;
782 objset_t *os = zv->zv_objset;
783 uint64_t object = ZVOL_OBJ;
784 uint64_t offset = lr->lr_offset;
785 uint64_t size = lr->lr_length;
786 blkptr_t *bp = &lr->lr_blkptr;
787 dmu_buf_t *db;
788 zgd_t *zgd;
789 int error;
790
791 ASSERT(zio != NULL);
792 ASSERT(size != 0);
793
794 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
795 zgd->zgd_zilog = zv->zv_zilog;
796 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
797
798 /*
799 * Write records come in two flavors: immediate and indirect.
800 * For small writes it's cheaper to store the data with the
801 * log record (immediate); for large writes it's cheaper to
802 * sync the data and get a pointer to it (indirect) so that
803 * we don't have to write the data twice.
804 */
805 if (buf != NULL) { /* immediate write */
806 error = dmu_read(os, object, offset, size, buf,
807 DMU_READ_NO_PREFETCH);
808 } else {
809 size = zv->zv_volblocksize;
810 offset = P2ALIGN_TYPED(offset, size, uint64_t);
811 error = dmu_buf_hold(os, object, offset, zgd, &db,
812 DMU_READ_NO_PREFETCH);
813 if (error == 0) {
814 blkptr_t *obp = dmu_buf_get_blkptr(db);
815 if (obp) {
816 ASSERT(BP_IS_HOLE(bp));
817 *bp = *obp;
818 }
819
820 zgd->zgd_db = db;
821 zgd->zgd_bp = &lr->lr_blkptr;
822
823 ASSERT(db != NULL);
824 ASSERT(db->db_offset == offset);
825 ASSERT(db->db_size == size);
826
827 error = dmu_sync(zio, lr->lr_common.lrc_txg,
828 zvol_get_done, zgd);
829
830 if (error == 0)
831 return (0);
832 }
833 }
834
835 zvol_get_done(zgd, error);
836
837 return (SET_ERROR(error));
838 }
839
840 /*
841 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
842 */
843 static void
844 zvol_insert(zvol_state_t *zv_insert)
845 {
846 zvol_state_t *zv = NULL;
847
848 ASSERT(MUTEX_HELD(&zvol_state_lock));
849 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
850 for (zv = list_head(&zvol_state_list); zv != NULL;
851 zv = list_next(&zvol_state_list, zv)) {
852 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
853 break;
854 }
855
856 list_insert_before(&zvol_state_list, zv, zv_insert);
857 }
858
859 /*
860 * Simply remove the zvol from to list of zvols.
861 */
862 static void
863 zvol_remove(zvol_state_t *zv_remove)
864 {
865 ASSERT(MUTEX_HELD(&zvol_state_lock));
866 list_remove(&zvol_state_list, zv_remove);
867 }
868
869 static int
870 zvol_first_open(zvol_state_t *zv)
871 {
872 objset_t *os;
873 uint64_t volsize;
874 int locked = 0;
875 int error;
876 uint64_t ro;
877
878 /*
879 * In all other cases the spa_namespace_lock is taken before the
880 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
881 * function calls fops->open() with the bdev->bd_mutex lock held.
882 *
883 * To avoid a potential lock inversion deadlock we preemptively
884 * try to take the spa_namespace_lock(). Normally it will not
885 * be contended and this is safe because spa_open_common() handles
886 * the case where the caller already holds the spa_namespace_lock.
887 *
888 * When it is contended we risk a lock inversion if we were to
889 * block waiting for the lock. Luckily, the __blkdev_get()
890 * function allows us to return -ERESTARTSYS which will result in
891 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
892 * called again. This process can be repeated safely until both
893 * locks are acquired.
894 */
895 if (!mutex_owned(&spa_namespace_lock)) {
896 locked = mutex_tryenter(&spa_namespace_lock);
897 if (!locked)
898 return (-SET_ERROR(ERESTARTSYS));
899 }
900
901 error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
902 if (error)
903 goto out_mutex;
904
905 /* lie and say we're read-only */
906 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
907 if (error)
908 goto out_mutex;
909
910 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
911 if (error) {
912 dmu_objset_disown(os, zvol_tag);
913 goto out_mutex;
914 }
915
916 zv->zv_objset = os;
917 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
918 if (error) {
919 dmu_objset_disown(os, zvol_tag);
920 goto out_mutex;
921 }
922
923 set_capacity(zv->zv_disk, volsize >> 9);
924 zv->zv_volsize = volsize;
925 zv->zv_zilog = zil_open(os, zvol_get_data);
926
927 if (ro || dmu_objset_is_snapshot(os) ||
928 !spa_writeable(dmu_objset_spa(os))) {
929 set_disk_ro(zv->zv_disk, 1);
930 zv->zv_flags |= ZVOL_RDONLY;
931 } else {
932 set_disk_ro(zv->zv_disk, 0);
933 zv->zv_flags &= ~ZVOL_RDONLY;
934 }
935
936 out_mutex:
937 if (locked)
938 mutex_exit(&spa_namespace_lock);
939
940 return (SET_ERROR(-error));
941 }
942
943 static void
944 zvol_last_close(zvol_state_t *zv)
945 {
946 zil_close(zv->zv_zilog);
947 zv->zv_zilog = NULL;
948
949 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
950 zv->zv_dbuf = NULL;
951
952 /*
953 * Evict cached data
954 */
955 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
956 !(zv->zv_flags & ZVOL_RDONLY))
957 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
958 (void) dmu_objset_evict_dbufs(zv->zv_objset);
959
960 dmu_objset_disown(zv->zv_objset, zvol_tag);
961 zv->zv_objset = NULL;
962 }
963
964 static int
965 zvol_open(struct block_device *bdev, fmode_t flag)
966 {
967 zvol_state_t *zv = bdev->bd_disk->private_data;
968 int error = 0, drop_mutex = 0;
969
970 /*
971 * If the caller is already holding the mutex do not take it
972 * again, this will happen as part of zvol_create_minor().
973 * Once add_disk() is called the device is live and the kernel
974 * will attempt to open it to read the partition information.
975 */
976 if (!mutex_owned(&zvol_state_lock)) {
977 mutex_enter(&zvol_state_lock);
978 drop_mutex = 1;
979 }
980
981 ASSERT3P(zv, !=, NULL);
982
983 if (zv->zv_open_count == 0) {
984 error = zvol_first_open(zv);
985 if (error)
986 goto out_mutex;
987 }
988
989 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
990 error = -EROFS;
991 goto out_open_count;
992 }
993
994 zv->zv_open_count++;
995
996 out_open_count:
997 if (zv->zv_open_count == 0)
998 zvol_last_close(zv);
999
1000 out_mutex:
1001 if (drop_mutex)
1002 mutex_exit(&zvol_state_lock);
1003
1004 check_disk_change(bdev);
1005
1006 return (SET_ERROR(error));
1007 }
1008
1009 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1010 static void
1011 #else
1012 static int
1013 #endif
1014 zvol_release(struct gendisk *disk, fmode_t mode)
1015 {
1016 zvol_state_t *zv = disk->private_data;
1017 int drop_mutex = 0;
1018
1019 if (!mutex_owned(&zvol_state_lock)) {
1020 mutex_enter(&zvol_state_lock);
1021 drop_mutex = 1;
1022 }
1023
1024 if (zv->zv_open_count > 0) {
1025 zv->zv_open_count--;
1026 if (zv->zv_open_count == 0)
1027 zvol_last_close(zv);
1028 }
1029
1030 if (drop_mutex)
1031 mutex_exit(&zvol_state_lock);
1032
1033 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1034 return (0);
1035 #endif
1036 }
1037
1038 static int
1039 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1040 unsigned int cmd, unsigned long arg)
1041 {
1042 zvol_state_t *zv = bdev->bd_disk->private_data;
1043 int error = 0;
1044
1045 if (zv == NULL)
1046 return (SET_ERROR(-ENXIO));
1047
1048 switch (cmd) {
1049 case BLKFLSBUF:
1050 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1051 break;
1052 case BLKZNAME:
1053 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1054 break;
1055
1056 default:
1057 error = -ENOTTY;
1058 break;
1059
1060 }
1061
1062 return (SET_ERROR(error));
1063 }
1064
1065 #ifdef CONFIG_COMPAT
1066 static int
1067 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1068 unsigned cmd, unsigned long arg)
1069 {
1070 return (zvol_ioctl(bdev, mode, cmd, arg));
1071 }
1072 #else
1073 #define zvol_compat_ioctl NULL
1074 #endif
1075
1076 static int zvol_media_changed(struct gendisk *disk)
1077 {
1078 zvol_state_t *zv = disk->private_data;
1079
1080 return (zv->zv_changed);
1081 }
1082
1083 static int zvol_revalidate_disk(struct gendisk *disk)
1084 {
1085 zvol_state_t *zv = disk->private_data;
1086
1087 zv->zv_changed = 0;
1088 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1089
1090 return (0);
1091 }
1092
1093 /*
1094 * Provide a simple virtual geometry for legacy compatibility. For devices
1095 * smaller than 1 MiB a small head and sector count is used to allow very
1096 * tiny devices. For devices over 1 Mib a standard head and sector count
1097 * is used to keep the cylinders count reasonable.
1098 */
1099 static int
1100 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1101 {
1102 zvol_state_t *zv = bdev->bd_disk->private_data;
1103 sector_t sectors = get_capacity(zv->zv_disk);
1104
1105 if (sectors > 2048) {
1106 geo->heads = 16;
1107 geo->sectors = 63;
1108 } else {
1109 geo->heads = 2;
1110 geo->sectors = 4;
1111 }
1112
1113 geo->start = 0;
1114 geo->cylinders = sectors / (geo->heads * geo->sectors);
1115
1116 return (0);
1117 }
1118
1119 static struct kobject *
1120 zvol_probe(dev_t dev, int *part, void *arg)
1121 {
1122 zvol_state_t *zv;
1123 struct kobject *kobj;
1124
1125 mutex_enter(&zvol_state_lock);
1126 zv = zvol_find_by_dev(dev);
1127 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1128 mutex_exit(&zvol_state_lock);
1129
1130 return (kobj);
1131 }
1132
1133 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1134 static struct block_device_operations zvol_ops = {
1135 .open = zvol_open,
1136 .release = zvol_release,
1137 .ioctl = zvol_ioctl,
1138 .compat_ioctl = zvol_compat_ioctl,
1139 .media_changed = zvol_media_changed,
1140 .revalidate_disk = zvol_revalidate_disk,
1141 .getgeo = zvol_getgeo,
1142 .owner = THIS_MODULE,
1143 };
1144
1145 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1146
1147 static int
1148 zvol_open_by_inode(struct inode *inode, struct file *file)
1149 {
1150 return (zvol_open(inode->i_bdev, file->f_mode));
1151 }
1152
1153 static int
1154 zvol_release_by_inode(struct inode *inode, struct file *file)
1155 {
1156 return (zvol_release(inode->i_bdev->bd_disk, file->f_mode));
1157 }
1158
1159 static int
1160 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1161 unsigned int cmd, unsigned long arg)
1162 {
1163 if (file == NULL || inode == NULL)
1164 return (SET_ERROR(-EINVAL));
1165
1166 return (zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg));
1167 }
1168
1169 #ifdef CONFIG_COMPAT
1170 static long
1171 zvol_compat_ioctl_by_inode(struct file *file,
1172 unsigned int cmd, unsigned long arg)
1173 {
1174 if (file == NULL)
1175 return (SET_ERROR(-EINVAL));
1176
1177 return (zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1178 file->f_mode, cmd, arg));
1179 }
1180 #else
1181 #define zvol_compat_ioctl_by_inode NULL
1182 #endif
1183
1184 static struct block_device_operations zvol_ops = {
1185 .open = zvol_open_by_inode,
1186 .release = zvol_release_by_inode,
1187 .ioctl = zvol_ioctl_by_inode,
1188 .compat_ioctl = zvol_compat_ioctl_by_inode,
1189 .media_changed = zvol_media_changed,
1190 .revalidate_disk = zvol_revalidate_disk,
1191 .getgeo = zvol_getgeo,
1192 .owner = THIS_MODULE,
1193 };
1194 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1195
1196 /*
1197 * Allocate memory for a new zvol_state_t and setup the required
1198 * request queue and generic disk structures for the block device.
1199 */
1200 static zvol_state_t *
1201 zvol_alloc(dev_t dev, const char *name)
1202 {
1203 zvol_state_t *zv;
1204
1205 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1206
1207 spin_lock_init(&zv->zv_lock);
1208 list_link_init(&zv->zv_next);
1209
1210 zv->zv_queue = blk_alloc_queue(GFP_ATOMIC);
1211 if (zv->zv_queue == NULL)
1212 goto out_kmem;
1213
1214 blk_queue_make_request(zv->zv_queue, zvol_request);
1215
1216 #ifdef HAVE_BLK_QUEUE_FLUSH
1217 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1218 #else
1219 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1220 #endif /* HAVE_BLK_QUEUE_FLUSH */
1221
1222 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1223 if (zv->zv_disk == NULL)
1224 goto out_queue;
1225
1226 zv->zv_queue->queuedata = zv;
1227 zv->zv_dev = dev;
1228 zv->zv_open_count = 0;
1229 strlcpy(zv->zv_name, name, MAXNAMELEN);
1230
1231 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1232 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1233 sizeof (rl_t), offsetof(rl_t, r_node));
1234 zv->zv_znode.z_is_zvol = TRUE;
1235
1236 zv->zv_disk->major = zvol_major;
1237 zv->zv_disk->first_minor = (dev & MINORMASK);
1238 zv->zv_disk->fops = &zvol_ops;
1239 zv->zv_disk->private_data = zv;
1240 zv->zv_disk->queue = zv->zv_queue;
1241 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1242 ZVOL_DEV_NAME, (dev & MINORMASK));
1243
1244 return (zv);
1245
1246 out_queue:
1247 blk_cleanup_queue(zv->zv_queue);
1248 out_kmem:
1249 kmem_free(zv, sizeof (zvol_state_t));
1250
1251 return (NULL);
1252 }
1253
1254 /*
1255 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1256 */
1257 static void
1258 zvol_free(zvol_state_t *zv)
1259 {
1260 avl_destroy(&zv->zv_znode.z_range_avl);
1261 mutex_destroy(&zv->zv_znode.z_range_lock);
1262
1263 del_gendisk(zv->zv_disk);
1264 blk_cleanup_queue(zv->zv_queue);
1265 put_disk(zv->zv_disk);
1266
1267 kmem_free(zv, sizeof (zvol_state_t));
1268 }
1269
1270 static int
1271 __zvol_snapdev_hidden(const char *name)
1272 {
1273 uint64_t snapdev;
1274 char *parent;
1275 char *atp;
1276 int error = 0;
1277
1278 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1279 (void) strlcpy(parent, name, MAXPATHLEN);
1280
1281 if ((atp = strrchr(parent, '@')) != NULL) {
1282 *atp = '\0';
1283 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1284 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1285 error = SET_ERROR(ENODEV);
1286 }
1287
1288 kmem_free(parent, MAXPATHLEN);
1289
1290 return (SET_ERROR(error));
1291 }
1292
1293 static int
1294 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1295 {
1296 zvol_state_t *zv;
1297 objset_t *os;
1298 dmu_object_info_t *doi;
1299 uint64_t volsize;
1300 uint64_t len;
1301 unsigned minor = 0;
1302 int error = 0;
1303
1304 ASSERT(MUTEX_HELD(&zvol_state_lock));
1305
1306 zv = zvol_find_by_name(name);
1307 if (zv) {
1308 error = SET_ERROR(EEXIST);
1309 goto out;
1310 }
1311
1312 if (ignore_snapdev == B_FALSE) {
1313 error = __zvol_snapdev_hidden(name);
1314 if (error)
1315 goto out;
1316 }
1317
1318 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1319
1320 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1321 if (error)
1322 goto out_doi;
1323
1324 error = dmu_object_info(os, ZVOL_OBJ, doi);
1325 if (error)
1326 goto out_dmu_objset_disown;
1327
1328 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1329 if (error)
1330 goto out_dmu_objset_disown;
1331
1332 error = zvol_find_minor(&minor);
1333 if (error)
1334 goto out_dmu_objset_disown;
1335
1336 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1337 if (zv == NULL) {
1338 error = SET_ERROR(EAGAIN);
1339 goto out_dmu_objset_disown;
1340 }
1341
1342 if (dmu_objset_is_snapshot(os))
1343 zv->zv_flags |= ZVOL_RDONLY;
1344
1345 zv->zv_volblocksize = doi->doi_data_block_size;
1346 zv->zv_volsize = volsize;
1347 zv->zv_objset = os;
1348
1349 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1350
1351 blk_queue_max_hw_sectors(zv->zv_queue, (DMU_MAX_ACCESS / 4) >> 9);
1352 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1353 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1354 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1355 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1356 blk_queue_max_discard_sectors(zv->zv_queue,
1357 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1358 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1359 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1360 #ifdef QUEUE_FLAG_NONROT
1361 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1362 #endif
1363 #ifdef QUEUE_FLAG_ADD_RANDOM
1364 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, zv->zv_queue);
1365 #endif
1366
1367 if (spa_writeable(dmu_objset_spa(os))) {
1368 if (zil_replay_disable)
1369 zil_destroy(dmu_objset_zil(os), B_FALSE);
1370 else
1371 zil_replay(os, zv, zvol_replay_vector);
1372 }
1373
1374 /*
1375 * When udev detects the addition of the device it will immediately
1376 * invoke blkid(8) to determine the type of content on the device.
1377 * Prefetching the blocks commonly scanned by blkid(8) will speed
1378 * up this process.
1379 */
1380 len = MIN(MAX(zvol_prefetch_bytes, 0), SPA_MAXBLOCKSIZE);
1381 if (len > 0) {
1382 dmu_prefetch(os, ZVOL_OBJ, 0, len);
1383 dmu_prefetch(os, ZVOL_OBJ, volsize - len, len);
1384 }
1385
1386 zv->zv_objset = NULL;
1387 out_dmu_objset_disown:
1388 dmu_objset_disown(os, zvol_tag);
1389 out_doi:
1390 kmem_free(doi, sizeof (dmu_object_info_t));
1391 out:
1392
1393 if (error == 0) {
1394 zvol_insert(zv);
1395 add_disk(zv->zv_disk);
1396 }
1397
1398 return (SET_ERROR(error));
1399 }
1400
1401 /*
1402 * Create a block device minor node and setup the linkage between it
1403 * and the specified volume. Once this function returns the block
1404 * device is live and ready for use.
1405 */
1406 int
1407 zvol_create_minor(const char *name)
1408 {
1409 int error;
1410
1411 mutex_enter(&zvol_state_lock);
1412 error = __zvol_create_minor(name, B_FALSE);
1413 mutex_exit(&zvol_state_lock);
1414
1415 return (SET_ERROR(error));
1416 }
1417
1418 static int
1419 __zvol_remove_minor(const char *name)
1420 {
1421 zvol_state_t *zv;
1422
1423 ASSERT(MUTEX_HELD(&zvol_state_lock));
1424
1425 zv = zvol_find_by_name(name);
1426 if (zv == NULL)
1427 return (SET_ERROR(ENXIO));
1428
1429 if (zv->zv_open_count > 0)
1430 return (SET_ERROR(EBUSY));
1431
1432 zvol_remove(zv);
1433 zvol_free(zv);
1434
1435 return (0);
1436 }
1437
1438 /*
1439 * Remove a block device minor node for the specified volume.
1440 */
1441 int
1442 zvol_remove_minor(const char *name)
1443 {
1444 int error;
1445
1446 mutex_enter(&zvol_state_lock);
1447 error = __zvol_remove_minor(name);
1448 mutex_exit(&zvol_state_lock);
1449
1450 return (SET_ERROR(error));
1451 }
1452
1453 /*
1454 * Rename a block device minor mode for the specified volume.
1455 */
1456 static void
1457 __zvol_rename_minor(zvol_state_t *zv, const char *newname)
1458 {
1459 int readonly = get_disk_ro(zv->zv_disk);
1460
1461 ASSERT(MUTEX_HELD(&zvol_state_lock));
1462
1463 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1464
1465 /*
1466 * The block device's read-only state is briefly changed causing
1467 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1468 * the name change and fixes the symlinks. This does not change
1469 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1470 * changes. This would normally be done using kobject_uevent() but
1471 * that is a GPL-only symbol which is why we need this workaround.
1472 */
1473 set_disk_ro(zv->zv_disk, !readonly);
1474 set_disk_ro(zv->zv_disk, readonly);
1475 }
1476
1477 static int
1478 zvol_create_minors_cb(const char *dsname, void *arg)
1479 {
1480 (void) zvol_create_minor(dsname);
1481
1482 return (0);
1483 }
1484
1485 /*
1486 * Create minors for specified dataset including children and snapshots.
1487 */
1488 int
1489 zvol_create_minors(const char *name)
1490 {
1491 int error = 0;
1492
1493 if (!zvol_inhibit_dev)
1494 error = dmu_objset_find((char *)name, zvol_create_minors_cb,
1495 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1496
1497 return (SET_ERROR(error));
1498 }
1499
1500 /*
1501 * Remove minors for specified dataset including children and snapshots.
1502 */
1503 void
1504 zvol_remove_minors(const char *name)
1505 {
1506 zvol_state_t *zv, *zv_next;
1507 int namelen = ((name) ? strlen(name) : 0);
1508
1509 if (zvol_inhibit_dev)
1510 return;
1511
1512 mutex_enter(&zvol_state_lock);
1513
1514 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1515 zv_next = list_next(&zvol_state_list, zv);
1516
1517 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1518 (strncmp(zv->zv_name, name, namelen) == 0 &&
1519 zv->zv_name[namelen] == '/')) {
1520 zvol_remove(zv);
1521 zvol_free(zv);
1522 }
1523 }
1524
1525 mutex_exit(&zvol_state_lock);
1526 }
1527
1528 /*
1529 * Rename minors for specified dataset including children and snapshots.
1530 */
1531 void
1532 zvol_rename_minors(const char *oldname, const char *newname)
1533 {
1534 zvol_state_t *zv, *zv_next;
1535 int oldnamelen, newnamelen;
1536 char *name;
1537
1538 if (zvol_inhibit_dev)
1539 return;
1540
1541 oldnamelen = strlen(oldname);
1542 newnamelen = strlen(newname);
1543 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1544
1545 mutex_enter(&zvol_state_lock);
1546
1547 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1548 zv_next = list_next(&zvol_state_list, zv);
1549
1550 if (strcmp(zv->zv_name, oldname) == 0) {
1551 __zvol_rename_minor(zv, newname);
1552 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1553 (zv->zv_name[oldnamelen] == '/' ||
1554 zv->zv_name[oldnamelen] == '@')) {
1555 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
1556 zv->zv_name[oldnamelen],
1557 zv->zv_name + oldnamelen + 1);
1558 __zvol_rename_minor(zv, name);
1559 }
1560 }
1561
1562 mutex_exit(&zvol_state_lock);
1563
1564 kmem_free(name, MAXNAMELEN);
1565 }
1566
1567 static int
1568 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1569 uint64_t snapdev = *(uint64_t *) arg;
1570
1571 if (strchr(dsname, '@') == NULL)
1572 return (0);
1573
1574 switch (snapdev) {
1575 case ZFS_SNAPDEV_VISIBLE:
1576 mutex_enter(&zvol_state_lock);
1577 (void) __zvol_create_minor(dsname, B_TRUE);
1578 mutex_exit(&zvol_state_lock);
1579 break;
1580 case ZFS_SNAPDEV_HIDDEN:
1581 (void) zvol_remove_minor(dsname);
1582 break;
1583 }
1584
1585 return (0);
1586 }
1587
1588 int
1589 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1590 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1591 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1592 /* caller should continue to modify snapdev property */
1593 return (-1);
1594 }
1595
1596 int
1597 zvol_init(void)
1598 {
1599 int error;
1600
1601 list_create(&zvol_state_list, sizeof (zvol_state_t),
1602 offsetof(zvol_state_t, zv_next));
1603
1604 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1605
1606 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1607 if (error) {
1608 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1609 goto out;
1610 }
1611
1612 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1613 THIS_MODULE, zvol_probe, NULL, NULL);
1614
1615 return (0);
1616
1617 out:
1618 mutex_destroy(&zvol_state_lock);
1619 list_destroy(&zvol_state_list);
1620
1621 return (SET_ERROR(error));
1622 }
1623
1624 void
1625 zvol_fini(void)
1626 {
1627 zvol_remove_minors(NULL);
1628 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1629 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1630 mutex_destroy(&zvol_state_lock);
1631 list_destroy(&zvol_state_list);
1632 }
1633
1634 module_param(zvol_inhibit_dev, uint, 0644);
1635 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1636
1637 module_param(zvol_major, uint, 0444);
1638 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1639
1640 module_param(zvol_max_discard_blocks, ulong, 0444);
1641 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");
1642
1643 module_param(zvol_prefetch_bytes, uint, 0644);
1644 MODULE_PARM_DESC(zvol_prefetch_bytes, "Prefetch N bytes at zvol start+end");