]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zvol.c
Change KM_PUSHPAGE -> KM_SLEEP
[mirror_zfs.git] / module / zfs / zvol.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
38 #include <sys/dbuf.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/zap.h>
43 #include <sys/zil_impl.h>
44 #include <sys/zio.h>
45 #include <sys/zfs_rlock.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/zvol.h>
48 #include <linux/blkdev_compat.h>
49
50 unsigned int zvol_inhibit_dev = 0;
51 unsigned int zvol_major = ZVOL_MAJOR;
52 unsigned int zvol_threads = 32;
53 unsigned long zvol_max_discard_blocks = 16384;
54
55 static taskq_t *zvol_taskq;
56 static kmutex_t zvol_state_lock;
57 static list_t zvol_state_list;
58 static char *zvol_tag = "zvol_tag";
59
60 /*
61 * The in-core state of each volume.
62 */
63 typedef struct zvol_state {
64 char zv_name[MAXNAMELEN]; /* name */
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize; /* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
79 } zvol_state_t;
80
81 #define ZVOL_RDONLY 0x1
82
83 /*
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
88 */
89 static int
90 zvol_find_minor(unsigned *minor)
91 {
92 zvol_state_t *zv;
93
94 *minor = 0;
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 break;
100 }
101
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
104 return (SET_ERROR(ENXIO));
105
106 return (0);
107 }
108
109 /*
110 * Find a zvol_state_t given the full major+minor dev_t.
111 */
112 static zvol_state_t *
113 zvol_find_by_dev(dev_t dev)
114 {
115 zvol_state_t *zv;
116
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
121 return (zv);
122 }
123
124 return (NULL);
125 }
126
127 /*
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 */
130 static zvol_state_t *
131 zvol_find_by_name(const char *name)
132 {
133 zvol_state_t *zv;
134
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
138 if (strncmp(zv->zv_name, name, MAXNAMELEN) == 0)
139 return (zv);
140 }
141
142 return (NULL);
143 }
144
145
146 /*
147 * Given a path, return TRUE if path is a ZVOL.
148 */
149 boolean_t
150 zvol_is_zvol(const char *device)
151 {
152 struct block_device *bdev;
153 unsigned int major;
154
155 bdev = lookup_bdev(device);
156 if (IS_ERR(bdev))
157 return (B_FALSE);
158
159 major = MAJOR(bdev->bd_dev);
160 bdput(bdev);
161
162 if (major == zvol_major)
163 return (B_TRUE);
164
165 return (B_FALSE);
166 }
167
168 /*
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
170 */
171 void
172 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173 {
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
176 int error;
177 uint64_t volblocksize, volsize;
178
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
184
185 /*
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
188 */
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
195 DMU_OT_NONE, 0, tx);
196 ASSERT(error == 0);
197
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
199 DMU_OT_NONE, 0, tx);
200 ASSERT(error == 0);
201
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
203 ASSERT(error == 0);
204 }
205
206 /*
207 * ZFS_IOC_OBJSET_STATS entry point.
208 */
209 int
210 zvol_get_stats(objset_t *os, nvlist_t *nv)
211 {
212 int error;
213 dmu_object_info_t *doi;
214 uint64_t val;
215
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
217 if (error)
218 return (SET_ERROR(error));
219
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
223
224 if (error == 0) {
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
227 }
228
229 kmem_free(doi, sizeof (dmu_object_info_t));
230
231 return (SET_ERROR(error));
232 }
233
234 static void
235 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
236 {
237 struct block_device *bdev;
238
239 bdev = bdget_disk(zv->zv_disk, 0);
240 if (bdev == NULL)
241 return;
242 /*
243 * 2.6.28 API change
244 * Added check_disk_size_change() helper function.
245 */
246 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
247 set_capacity(zv->zv_disk, volsize >> 9);
248 zv->zv_volsize = volsize;
249 check_disk_size_change(zv->zv_disk, bdev);
250 #else
251 zv->zv_volsize = volsize;
252 zv->zv_changed = 1;
253 (void) check_disk_change(bdev);
254 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
255
256 bdput(bdev);
257 }
258
259 /*
260 * Sanity check volume size.
261 */
262 int
263 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
264 {
265 if (volsize == 0)
266 return (SET_ERROR(EINVAL));
267
268 if (volsize % blocksize != 0)
269 return (SET_ERROR(EINVAL));
270
271 #ifdef _ILP32
272 if (volsize - 1 > MAXOFFSET_T)
273 return (SET_ERROR(EOVERFLOW));
274 #endif
275 return (0);
276 }
277
278 /*
279 * Ensure the zap is flushed then inform the VFS of the capacity change.
280 */
281 static int
282 zvol_update_volsize(uint64_t volsize, objset_t *os)
283 {
284 dmu_tx_t *tx;
285 int error;
286
287 ASSERT(MUTEX_HELD(&zvol_state_lock));
288
289 tx = dmu_tx_create(os);
290 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
291 error = dmu_tx_assign(tx, TXG_WAIT);
292 if (error) {
293 dmu_tx_abort(tx);
294 return (SET_ERROR(error));
295 }
296
297 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
298 &volsize, tx);
299 dmu_tx_commit(tx);
300
301 if (error == 0)
302 error = dmu_free_long_range(os,
303 ZVOL_OBJ, volsize, DMU_OBJECT_END);
304
305 return (error);
306 }
307
308 static int
309 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
310 {
311 zvol_size_changed(zv, volsize);
312
313 /*
314 * We should post a event here describing the expansion. However,
315 * the zfs_ereport_post() interface doesn't nicely support posting
316 * events for zvols, it assumes events relate to vdevs or zios.
317 */
318
319 return (0);
320 }
321
322 /*
323 * Set ZFS_PROP_VOLSIZE set entry point.
324 */
325 int
326 zvol_set_volsize(const char *name, uint64_t volsize)
327 {
328 zvol_state_t *zv = NULL;
329 objset_t *os = NULL;
330 int error;
331 dmu_object_info_t *doi;
332 uint64_t readonly;
333 boolean_t owned = B_FALSE;
334
335 error = dsl_prop_get_integer(name,
336 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
337 if (error != 0)
338 return (SET_ERROR(error));
339 if (readonly)
340 return (SET_ERROR(EROFS));
341
342 mutex_enter(&zvol_state_lock);
343 zv = zvol_find_by_name(name);
344
345 if (zv == NULL || zv->zv_objset == NULL) {
346 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
347 FTAG, &os)) != 0) {
348 mutex_exit(&zvol_state_lock);
349 return (SET_ERROR(error));
350 }
351 owned = B_TRUE;
352 if (zv != NULL)
353 zv->zv_objset = os;
354 } else {
355 os = zv->zv_objset;
356 }
357
358 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
359
360 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
361 (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
362 goto out;
363
364 error = zvol_update_volsize(volsize, os);
365 kmem_free(doi, sizeof (dmu_object_info_t));
366
367 if (error == 0 && zv != NULL)
368 error = zvol_update_live_volsize(zv, volsize);
369 out:
370 if (owned) {
371 dmu_objset_disown(os, FTAG);
372 if (zv != NULL)
373 zv->zv_objset = NULL;
374 }
375 mutex_exit(&zvol_state_lock);
376 return (error);
377 }
378
379 /*
380 * Sanity check volume block size.
381 */
382 int
383 zvol_check_volblocksize(uint64_t volblocksize)
384 {
385 if (volblocksize < SPA_MINBLOCKSIZE ||
386 volblocksize > SPA_MAXBLOCKSIZE ||
387 !ISP2(volblocksize))
388 return (SET_ERROR(EDOM));
389
390 return (0);
391 }
392
393 /*
394 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
395 */
396 int
397 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
398 {
399 zvol_state_t *zv;
400 dmu_tx_t *tx;
401 int error;
402
403 mutex_enter(&zvol_state_lock);
404
405 zv = zvol_find_by_name(name);
406 if (zv == NULL) {
407 error = SET_ERROR(ENXIO);
408 goto out;
409 }
410
411 if (zv->zv_flags & ZVOL_RDONLY) {
412 error = SET_ERROR(EROFS);
413 goto out;
414 }
415
416 tx = dmu_tx_create(zv->zv_objset);
417 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
418 error = dmu_tx_assign(tx, TXG_WAIT);
419 if (error) {
420 dmu_tx_abort(tx);
421 } else {
422 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
423 volblocksize, 0, tx);
424 if (error == ENOTSUP)
425 error = SET_ERROR(EBUSY);
426 dmu_tx_commit(tx);
427 if (error == 0)
428 zv->zv_volblocksize = volblocksize;
429 }
430 out:
431 mutex_exit(&zvol_state_lock);
432
433 return (SET_ERROR(error));
434 }
435
436 /*
437 * Replay a TX_WRITE ZIL transaction that didn't get committed
438 * after a system failure
439 */
440 static int
441 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
442 {
443 objset_t *os = zv->zv_objset;
444 char *data = (char *)(lr + 1); /* data follows lr_write_t */
445 uint64_t off = lr->lr_offset;
446 uint64_t len = lr->lr_length;
447 dmu_tx_t *tx;
448 int error;
449
450 if (byteswap)
451 byteswap_uint64_array(lr, sizeof (*lr));
452
453 tx = dmu_tx_create(os);
454 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
455 error = dmu_tx_assign(tx, TXG_WAIT);
456 if (error) {
457 dmu_tx_abort(tx);
458 } else {
459 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
460 dmu_tx_commit(tx);
461 }
462
463 return (SET_ERROR(error));
464 }
465
466 static int
467 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
468 {
469 return (SET_ERROR(ENOTSUP));
470 }
471
472 /*
473 * Callback vectors for replaying records.
474 * Only TX_WRITE is needed for zvol.
475 */
476 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
477 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
478 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
479 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
480 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
481 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
482 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
483 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
484 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
485 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
486 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
487 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
488 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
489 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
490 };
491
492 /*
493 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
494 *
495 * We store data in the log buffers if it's small enough.
496 * Otherwise we will later flush the data out via dmu_sync().
497 */
498 ssize_t zvol_immediate_write_sz = 32768;
499
500 static void
501 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
502 uint64_t size, int sync)
503 {
504 uint32_t blocksize = zv->zv_volblocksize;
505 zilog_t *zilog = zv->zv_zilog;
506 boolean_t slogging;
507 ssize_t immediate_write_sz;
508
509 if (zil_replaying(zilog, tx))
510 return;
511
512 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
513 ? 0 : zvol_immediate_write_sz;
514 slogging = spa_has_slogs(zilog->zl_spa) &&
515 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
516
517 while (size) {
518 itx_t *itx;
519 lr_write_t *lr;
520 ssize_t len;
521 itx_wr_state_t write_state;
522
523 /*
524 * Unlike zfs_log_write() we can be called with
525 * up to DMU_MAX_ACCESS/2 (5MB) writes.
526 */
527 if (blocksize > immediate_write_sz && !slogging &&
528 size >= blocksize && offset % blocksize == 0) {
529 write_state = WR_INDIRECT; /* uses dmu_sync */
530 len = blocksize;
531 } else if (sync) {
532 write_state = WR_COPIED;
533 len = MIN(ZIL_MAX_LOG_DATA, size);
534 } else {
535 write_state = WR_NEED_COPY;
536 len = MIN(ZIL_MAX_LOG_DATA, size);
537 }
538
539 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
540 (write_state == WR_COPIED ? len : 0));
541 lr = (lr_write_t *)&itx->itx_lr;
542 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
543 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
544 zil_itx_destroy(itx);
545 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
546 lr = (lr_write_t *)&itx->itx_lr;
547 write_state = WR_NEED_COPY;
548 }
549
550 itx->itx_wr_state = write_state;
551 if (write_state == WR_NEED_COPY)
552 itx->itx_sod += len;
553 lr->lr_foid = ZVOL_OBJ;
554 lr->lr_offset = offset;
555 lr->lr_length = len;
556 lr->lr_blkoff = 0;
557 BP_ZERO(&lr->lr_blkptr);
558
559 itx->itx_private = zv;
560 itx->itx_sync = sync;
561
562 (void) zil_itx_assign(zilog, itx, tx);
563
564 offset += len;
565 size -= len;
566 }
567 }
568
569 /*
570 * Common write path running under the zvol taskq context. This function
571 * is responsible for copying the request structure data in to the DMU and
572 * signaling the request queue with the result of the copy.
573 */
574 static void
575 zvol_write(void *arg)
576 {
577 struct request *req = (struct request *)arg;
578 struct request_queue *q = req->q;
579 zvol_state_t *zv = q->queuedata;
580 fstrans_cookie_t cookie = spl_fstrans_mark();
581 uint64_t offset = blk_rq_pos(req) << 9;
582 uint64_t size = blk_rq_bytes(req);
583 int error = 0;
584 dmu_tx_t *tx;
585 rl_t *rl;
586
587 if (req->cmd_flags & VDEV_REQ_FLUSH)
588 zil_commit(zv->zv_zilog, ZVOL_OBJ);
589
590 /*
591 * Some requests are just for flush and nothing else.
592 */
593 if (size == 0) {
594 error = 0;
595 goto out;
596 }
597
598 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
599
600 tx = dmu_tx_create(zv->zv_objset);
601 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
602
603 /* This will only fail for ENOSPC */
604 error = dmu_tx_assign(tx, TXG_WAIT);
605 if (error) {
606 dmu_tx_abort(tx);
607 zfs_range_unlock(rl);
608 goto out;
609 }
610
611 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
612 if (error == 0)
613 zvol_log_write(zv, tx, offset, size,
614 req->cmd_flags & VDEV_REQ_FUA);
615
616 dmu_tx_commit(tx);
617 zfs_range_unlock(rl);
618
619 if ((req->cmd_flags & VDEV_REQ_FUA) ||
620 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
621 zil_commit(zv->zv_zilog, ZVOL_OBJ);
622
623 out:
624 blk_end_request(req, -error, size);
625 spl_fstrans_unmark(cookie);
626 }
627
628 #ifdef HAVE_BLK_QUEUE_DISCARD
629 static void
630 zvol_discard(void *arg)
631 {
632 struct request *req = (struct request *)arg;
633 struct request_queue *q = req->q;
634 zvol_state_t *zv = q->queuedata;
635 fstrans_cookie_t cookie = spl_fstrans_mark();
636 uint64_t start = blk_rq_pos(req) << 9;
637 uint64_t end = start + blk_rq_bytes(req);
638 int error;
639 rl_t *rl;
640
641 if (end > zv->zv_volsize) {
642 error = EIO;
643 goto out;
644 }
645
646 /*
647 * Align the request to volume block boundaries. If we don't,
648 * then this will force dnode_free_range() to zero out the
649 * unaligned parts, which is slow (read-modify-write) and
650 * useless since we are not freeing any space by doing so.
651 */
652 start = P2ROUNDUP(start, zv->zv_volblocksize);
653 end = P2ALIGN(end, zv->zv_volblocksize);
654
655 if (start >= end) {
656 error = 0;
657 goto out;
658 }
659
660 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
661
662 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);
663
664 /*
665 * TODO: maybe we should add the operation to the log.
666 */
667
668 zfs_range_unlock(rl);
669 out:
670 blk_end_request(req, -error, blk_rq_bytes(req));
671 spl_fstrans_unmark(cookie);
672 }
673 #endif /* HAVE_BLK_QUEUE_DISCARD */
674
675 /*
676 * Common read path running under the zvol taskq context. This function
677 * is responsible for copying the requested data out of the DMU and in to
678 * a linux request structure. It then must signal the request queue with
679 * an error code describing the result of the copy.
680 */
681 static void
682 zvol_read(void *arg)
683 {
684 struct request *req = (struct request *)arg;
685 struct request_queue *q = req->q;
686 zvol_state_t *zv = q->queuedata;
687 fstrans_cookie_t cookie = spl_fstrans_mark();
688 uint64_t offset = blk_rq_pos(req) << 9;
689 uint64_t size = blk_rq_bytes(req);
690 int error;
691 rl_t *rl;
692
693 if (size == 0) {
694 error = 0;
695 goto out;
696 }
697
698 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
699
700 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
701
702 zfs_range_unlock(rl);
703
704 /* convert checksum errors into IO errors */
705 if (error == ECKSUM)
706 error = SET_ERROR(EIO);
707
708 out:
709 blk_end_request(req, -error, size);
710 spl_fstrans_unmark(cookie);
711 }
712
713 /*
714 * Request will be added back to the request queue and retried if
715 * it cannot be immediately dispatched to the taskq for handling
716 */
717 static inline void
718 zvol_dispatch(task_func_t func, struct request *req)
719 {
720 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
721 blk_requeue_request(req->q, req);
722 }
723
724 /*
725 * Common request path. Rather than registering a custom make_request()
726 * function we use the generic Linux version. This is done because it allows
727 * us to easily merge read requests which would otherwise we performed
728 * synchronously by the DMU. This is less critical in write case where the
729 * DMU will perform the correct merging within a transaction group. Using
730 * the generic make_request() also let's use leverage the fact that the
731 * elevator with ensure correct ordering in regards to barrior IOs. On
732 * the downside it means that in the write case we end up doing request
733 * merging twice once in the elevator and once in the DMU.
734 *
735 * The request handler is called under a spin lock so all the real work
736 * is handed off to be done in the context of the zvol taskq. This function
737 * simply performs basic request sanity checking and hands off the request.
738 */
739 static void
740 zvol_request(struct request_queue *q)
741 {
742 zvol_state_t *zv = q->queuedata;
743 struct request *req;
744 unsigned int size;
745
746 while ((req = blk_fetch_request(q)) != NULL) {
747 size = blk_rq_bytes(req);
748
749 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
750 get_capacity(zv->zv_disk)) {
751 printk(KERN_INFO
752 "%s: bad access: block=%llu, count=%lu\n",
753 req->rq_disk->disk_name,
754 (long long unsigned)blk_rq_pos(req),
755 (long unsigned)blk_rq_sectors(req));
756 __blk_end_request(req, -EIO, size);
757 continue;
758 }
759
760 if (!blk_fs_request(req)) {
761 printk(KERN_INFO "%s: non-fs cmd\n",
762 req->rq_disk->disk_name);
763 __blk_end_request(req, -EIO, size);
764 continue;
765 }
766
767 switch (rq_data_dir(req)) {
768 case READ:
769 zvol_dispatch(zvol_read, req);
770 break;
771 case WRITE:
772 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
773 __blk_end_request(req, -EROFS, size);
774 break;
775 }
776
777 #ifdef HAVE_BLK_QUEUE_DISCARD
778 if (req->cmd_flags & VDEV_REQ_DISCARD) {
779 zvol_dispatch(zvol_discard, req);
780 break;
781 }
782 #endif /* HAVE_BLK_QUEUE_DISCARD */
783
784 zvol_dispatch(zvol_write, req);
785 break;
786 default:
787 printk(KERN_INFO "%s: unknown cmd: %d\n",
788 req->rq_disk->disk_name, (int)rq_data_dir(req));
789 __blk_end_request(req, -EIO, size);
790 break;
791 }
792 }
793 }
794
795 static void
796 zvol_get_done(zgd_t *zgd, int error)
797 {
798 if (zgd->zgd_db)
799 dmu_buf_rele(zgd->zgd_db, zgd);
800
801 zfs_range_unlock(zgd->zgd_rl);
802
803 if (error == 0 && zgd->zgd_bp)
804 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
805
806 kmem_free(zgd, sizeof (zgd_t));
807 }
808
809 /*
810 * Get data to generate a TX_WRITE intent log record.
811 */
812 static int
813 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
814 {
815 zvol_state_t *zv = arg;
816 objset_t *os = zv->zv_objset;
817 uint64_t object = ZVOL_OBJ;
818 uint64_t offset = lr->lr_offset;
819 uint64_t size = lr->lr_length;
820 blkptr_t *bp = &lr->lr_blkptr;
821 dmu_buf_t *db;
822 zgd_t *zgd;
823 int error;
824
825 ASSERT(zio != NULL);
826 ASSERT(size != 0);
827
828 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
829 zgd->zgd_zilog = zv->zv_zilog;
830 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
831
832 /*
833 * Write records come in two flavors: immediate and indirect.
834 * For small writes it's cheaper to store the data with the
835 * log record (immediate); for large writes it's cheaper to
836 * sync the data and get a pointer to it (indirect) so that
837 * we don't have to write the data twice.
838 */
839 if (buf != NULL) { /* immediate write */
840 error = dmu_read(os, object, offset, size, buf,
841 DMU_READ_NO_PREFETCH);
842 } else {
843 size = zv->zv_volblocksize;
844 offset = P2ALIGN_TYPED(offset, size, uint64_t);
845 error = dmu_buf_hold(os, object, offset, zgd, &db,
846 DMU_READ_NO_PREFETCH);
847 if (error == 0) {
848 blkptr_t *obp = dmu_buf_get_blkptr(db);
849 if (obp) {
850 ASSERT(BP_IS_HOLE(bp));
851 *bp = *obp;
852 }
853
854 zgd->zgd_db = db;
855 zgd->zgd_bp = &lr->lr_blkptr;
856
857 ASSERT(db != NULL);
858 ASSERT(db->db_offset == offset);
859 ASSERT(db->db_size == size);
860
861 error = dmu_sync(zio, lr->lr_common.lrc_txg,
862 zvol_get_done, zgd);
863
864 if (error == 0)
865 return (0);
866 }
867 }
868
869 zvol_get_done(zgd, error);
870
871 return (SET_ERROR(error));
872 }
873
874 /*
875 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
876 */
877 static void
878 zvol_insert(zvol_state_t *zv_insert)
879 {
880 zvol_state_t *zv = NULL;
881
882 ASSERT(MUTEX_HELD(&zvol_state_lock));
883 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
884 for (zv = list_head(&zvol_state_list); zv != NULL;
885 zv = list_next(&zvol_state_list, zv)) {
886 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
887 break;
888 }
889
890 list_insert_before(&zvol_state_list, zv, zv_insert);
891 }
892
893 /*
894 * Simply remove the zvol from to list of zvols.
895 */
896 static void
897 zvol_remove(zvol_state_t *zv_remove)
898 {
899 ASSERT(MUTEX_HELD(&zvol_state_lock));
900 list_remove(&zvol_state_list, zv_remove);
901 }
902
903 static int
904 zvol_first_open(zvol_state_t *zv)
905 {
906 objset_t *os;
907 uint64_t volsize;
908 int locked = 0;
909 int error;
910 uint64_t ro;
911
912 /*
913 * In all other cases the spa_namespace_lock is taken before the
914 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
915 * function calls fops->open() with the bdev->bd_mutex lock held.
916 *
917 * To avoid a potential lock inversion deadlock we preemptively
918 * try to take the spa_namespace_lock(). Normally it will not
919 * be contended and this is safe because spa_open_common() handles
920 * the case where the caller already holds the spa_namespace_lock.
921 *
922 * When it is contended we risk a lock inversion if we were to
923 * block waiting for the lock. Luckily, the __blkdev_get()
924 * function allows us to return -ERESTARTSYS which will result in
925 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
926 * called again. This process can be repeated safely until both
927 * locks are acquired.
928 */
929 if (!mutex_owned(&spa_namespace_lock)) {
930 locked = mutex_tryenter(&spa_namespace_lock);
931 if (!locked)
932 return (-SET_ERROR(ERESTARTSYS));
933 }
934
935 /* lie and say we're read-only */
936 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
937 if (error)
938 goto out_mutex;
939
940 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
941 if (error) {
942 dmu_objset_disown(os, zvol_tag);
943 goto out_mutex;
944 }
945
946 zv->zv_objset = os;
947 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
948 if (error) {
949 dmu_objset_disown(os, zvol_tag);
950 goto out_mutex;
951 }
952
953 set_capacity(zv->zv_disk, volsize >> 9);
954 zv->zv_volsize = volsize;
955 zv->zv_zilog = zil_open(os, zvol_get_data);
956
957 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
958 if (ro || dmu_objset_is_snapshot(os) ||
959 !spa_writeable(dmu_objset_spa(os))) {
960 set_disk_ro(zv->zv_disk, 1);
961 zv->zv_flags |= ZVOL_RDONLY;
962 } else {
963 set_disk_ro(zv->zv_disk, 0);
964 zv->zv_flags &= ~ZVOL_RDONLY;
965 }
966
967 out_mutex:
968 if (locked)
969 mutex_exit(&spa_namespace_lock);
970
971 return (SET_ERROR(-error));
972 }
973
974 static void
975 zvol_last_close(zvol_state_t *zv)
976 {
977 zil_close(zv->zv_zilog);
978 zv->zv_zilog = NULL;
979
980 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
981 zv->zv_dbuf = NULL;
982
983 /*
984 * Evict cached data
985 */
986 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
987 !(zv->zv_flags & ZVOL_RDONLY))
988 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
989 (void) dmu_objset_evict_dbufs(zv->zv_objset);
990
991 dmu_objset_disown(zv->zv_objset, zvol_tag);
992 zv->zv_objset = NULL;
993 }
994
995 static int
996 zvol_open(struct block_device *bdev, fmode_t flag)
997 {
998 zvol_state_t *zv = bdev->bd_disk->private_data;
999 int error = 0, drop_mutex = 0;
1000
1001 /*
1002 * If the caller is already holding the mutex do not take it
1003 * again, this will happen as part of zvol_create_minor().
1004 * Once add_disk() is called the device is live and the kernel
1005 * will attempt to open it to read the partition information.
1006 */
1007 if (!mutex_owned(&zvol_state_lock)) {
1008 mutex_enter(&zvol_state_lock);
1009 drop_mutex = 1;
1010 }
1011
1012 ASSERT3P(zv, !=, NULL);
1013
1014 if (zv->zv_open_count == 0) {
1015 error = zvol_first_open(zv);
1016 if (error)
1017 goto out_mutex;
1018 }
1019
1020 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1021 error = -EROFS;
1022 goto out_open_count;
1023 }
1024
1025 zv->zv_open_count++;
1026
1027 out_open_count:
1028 if (zv->zv_open_count == 0)
1029 zvol_last_close(zv);
1030
1031 out_mutex:
1032 if (drop_mutex)
1033 mutex_exit(&zvol_state_lock);
1034
1035 check_disk_change(bdev);
1036
1037 return (SET_ERROR(error));
1038 }
1039
1040 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1041 static void
1042 #else
1043 static int
1044 #endif
1045 zvol_release(struct gendisk *disk, fmode_t mode)
1046 {
1047 zvol_state_t *zv = disk->private_data;
1048 int drop_mutex = 0;
1049
1050 if (!mutex_owned(&zvol_state_lock)) {
1051 mutex_enter(&zvol_state_lock);
1052 drop_mutex = 1;
1053 }
1054
1055 ASSERT3P(zv, !=, NULL);
1056 ASSERT3U(zv->zv_open_count, >, 0);
1057 zv->zv_open_count--;
1058 if (zv->zv_open_count == 0)
1059 zvol_last_close(zv);
1060
1061 if (drop_mutex)
1062 mutex_exit(&zvol_state_lock);
1063
1064 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1065 return (0);
1066 #endif
1067 }
1068
1069 static int
1070 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1071 unsigned int cmd, unsigned long arg)
1072 {
1073 zvol_state_t *zv = bdev->bd_disk->private_data;
1074 int error = 0;
1075
1076 if (zv == NULL)
1077 return (SET_ERROR(-ENXIO));
1078
1079 switch (cmd) {
1080 case BLKFLSBUF:
1081 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1082 break;
1083 case BLKZNAME:
1084 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1085 break;
1086
1087 default:
1088 error = -ENOTTY;
1089 break;
1090
1091 }
1092
1093 return (SET_ERROR(error));
1094 }
1095
1096 #ifdef CONFIG_COMPAT
1097 static int
1098 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1099 unsigned cmd, unsigned long arg)
1100 {
1101 return (zvol_ioctl(bdev, mode, cmd, arg));
1102 }
1103 #else
1104 #define zvol_compat_ioctl NULL
1105 #endif
1106
1107 static int zvol_media_changed(struct gendisk *disk)
1108 {
1109 zvol_state_t *zv = disk->private_data;
1110
1111 return (zv->zv_changed);
1112 }
1113
1114 static int zvol_revalidate_disk(struct gendisk *disk)
1115 {
1116 zvol_state_t *zv = disk->private_data;
1117
1118 zv->zv_changed = 0;
1119 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1120
1121 return (0);
1122 }
1123
1124 /*
1125 * Provide a simple virtual geometry for legacy compatibility. For devices
1126 * smaller than 1 MiB a small head and sector count is used to allow very
1127 * tiny devices. For devices over 1 Mib a standard head and sector count
1128 * is used to keep the cylinders count reasonable.
1129 */
1130 static int
1131 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1132 {
1133 zvol_state_t *zv = bdev->bd_disk->private_data;
1134 sector_t sectors = get_capacity(zv->zv_disk);
1135
1136 if (sectors > 2048) {
1137 geo->heads = 16;
1138 geo->sectors = 63;
1139 } else {
1140 geo->heads = 2;
1141 geo->sectors = 4;
1142 }
1143
1144 geo->start = 0;
1145 geo->cylinders = sectors / (geo->heads * geo->sectors);
1146
1147 return (0);
1148 }
1149
1150 static struct kobject *
1151 zvol_probe(dev_t dev, int *part, void *arg)
1152 {
1153 zvol_state_t *zv;
1154 struct kobject *kobj;
1155
1156 mutex_enter(&zvol_state_lock);
1157 zv = zvol_find_by_dev(dev);
1158 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1159 mutex_exit(&zvol_state_lock);
1160
1161 return (kobj);
1162 }
1163
1164 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1165 static struct block_device_operations zvol_ops = {
1166 .open = zvol_open,
1167 .release = zvol_release,
1168 .ioctl = zvol_ioctl,
1169 .compat_ioctl = zvol_compat_ioctl,
1170 .media_changed = zvol_media_changed,
1171 .revalidate_disk = zvol_revalidate_disk,
1172 .getgeo = zvol_getgeo,
1173 .owner = THIS_MODULE,
1174 };
1175
1176 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1177
1178 static int
1179 zvol_open_by_inode(struct inode *inode, struct file *file)
1180 {
1181 return (zvol_open(inode->i_bdev, file->f_mode));
1182 }
1183
1184 static int
1185 zvol_release_by_inode(struct inode *inode, struct file *file)
1186 {
1187 return (zvol_release(inode->i_bdev->bd_disk, file->f_mode));
1188 }
1189
1190 static int
1191 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1192 unsigned int cmd, unsigned long arg)
1193 {
1194 if (file == NULL || inode == NULL)
1195 return (SET_ERROR(-EINVAL));
1196
1197 return (zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg));
1198 }
1199
1200 #ifdef CONFIG_COMPAT
1201 static long
1202 zvol_compat_ioctl_by_inode(struct file *file,
1203 unsigned int cmd, unsigned long arg)
1204 {
1205 if (file == NULL)
1206 return (SET_ERROR(-EINVAL));
1207
1208 return (zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1209 file->f_mode, cmd, arg));
1210 }
1211 #else
1212 #define zvol_compat_ioctl_by_inode NULL
1213 #endif
1214
1215 static struct block_device_operations zvol_ops = {
1216 .open = zvol_open_by_inode,
1217 .release = zvol_release_by_inode,
1218 .ioctl = zvol_ioctl_by_inode,
1219 .compat_ioctl = zvol_compat_ioctl_by_inode,
1220 .media_changed = zvol_media_changed,
1221 .revalidate_disk = zvol_revalidate_disk,
1222 .getgeo = zvol_getgeo,
1223 .owner = THIS_MODULE,
1224 };
1225 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1226
1227 /*
1228 * Allocate memory for a new zvol_state_t and setup the required
1229 * request queue and generic disk structures for the block device.
1230 */
1231 static zvol_state_t *
1232 zvol_alloc(dev_t dev, const char *name)
1233 {
1234 zvol_state_t *zv;
1235 int error = 0;
1236
1237 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1238
1239 spin_lock_init(&zv->zv_lock);
1240 list_link_init(&zv->zv_next);
1241
1242 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1243 if (zv->zv_queue == NULL)
1244 goto out_kmem;
1245
1246 #ifdef HAVE_ELEVATOR_CHANGE
1247 error = elevator_change(zv->zv_queue, "noop");
1248 #endif /* HAVE_ELEVATOR_CHANGE */
1249 if (error) {
1250 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1251 "noop", name, error);
1252 goto out_queue;
1253 }
1254
1255 #ifdef HAVE_BLK_QUEUE_FLUSH
1256 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1257 #else
1258 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1259 #endif /* HAVE_BLK_QUEUE_FLUSH */
1260
1261 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1262 if (zv->zv_disk == NULL)
1263 goto out_queue;
1264
1265 zv->zv_queue->queuedata = zv;
1266 zv->zv_dev = dev;
1267 zv->zv_open_count = 0;
1268 strlcpy(zv->zv_name, name, MAXNAMELEN);
1269
1270 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1271 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1272 sizeof (rl_t), offsetof(rl_t, r_node));
1273 zv->zv_znode.z_is_zvol = TRUE;
1274
1275 zv->zv_disk->major = zvol_major;
1276 zv->zv_disk->first_minor = (dev & MINORMASK);
1277 zv->zv_disk->fops = &zvol_ops;
1278 zv->zv_disk->private_data = zv;
1279 zv->zv_disk->queue = zv->zv_queue;
1280 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1281 ZVOL_DEV_NAME, (dev & MINORMASK));
1282
1283 return (zv);
1284
1285 out_queue:
1286 blk_cleanup_queue(zv->zv_queue);
1287 out_kmem:
1288 kmem_free(zv, sizeof (zvol_state_t));
1289
1290 return (NULL);
1291 }
1292
1293 /*
1294 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1295 */
1296 static void
1297 zvol_free(zvol_state_t *zv)
1298 {
1299 avl_destroy(&zv->zv_znode.z_range_avl);
1300 mutex_destroy(&zv->zv_znode.z_range_lock);
1301
1302 del_gendisk(zv->zv_disk);
1303 blk_cleanup_queue(zv->zv_queue);
1304 put_disk(zv->zv_disk);
1305
1306 kmem_free(zv, sizeof (zvol_state_t));
1307 }
1308
1309 static int
1310 __zvol_snapdev_hidden(const char *name)
1311 {
1312 uint64_t snapdev;
1313 char *parent;
1314 char *atp;
1315 int error = 0;
1316
1317 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1318 (void) strlcpy(parent, name, MAXPATHLEN);
1319
1320 if ((atp = strrchr(parent, '@')) != NULL) {
1321 *atp = '\0';
1322 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1323 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1324 error = SET_ERROR(ENODEV);
1325 }
1326
1327 kmem_free(parent, MAXPATHLEN);
1328
1329 return (SET_ERROR(error));
1330 }
1331
1332 static int
1333 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1334 {
1335 zvol_state_t *zv;
1336 objset_t *os;
1337 dmu_object_info_t *doi;
1338 uint64_t volsize;
1339 unsigned minor = 0;
1340 int error = 0;
1341
1342 ASSERT(MUTEX_HELD(&zvol_state_lock));
1343
1344 zv = zvol_find_by_name(name);
1345 if (zv) {
1346 error = SET_ERROR(EEXIST);
1347 goto out;
1348 }
1349
1350 if (ignore_snapdev == B_FALSE) {
1351 error = __zvol_snapdev_hidden(name);
1352 if (error)
1353 goto out;
1354 }
1355
1356 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1357
1358 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1359 if (error)
1360 goto out_doi;
1361
1362 error = dmu_object_info(os, ZVOL_OBJ, doi);
1363 if (error)
1364 goto out_dmu_objset_disown;
1365
1366 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1367 if (error)
1368 goto out_dmu_objset_disown;
1369
1370 error = zvol_find_minor(&minor);
1371 if (error)
1372 goto out_dmu_objset_disown;
1373
1374 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1375 if (zv == NULL) {
1376 error = SET_ERROR(EAGAIN);
1377 goto out_dmu_objset_disown;
1378 }
1379
1380 if (dmu_objset_is_snapshot(os))
1381 zv->zv_flags |= ZVOL_RDONLY;
1382
1383 zv->zv_volblocksize = doi->doi_data_block_size;
1384 zv->zv_volsize = volsize;
1385 zv->zv_objset = os;
1386
1387 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1388
1389 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1390 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1391 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1392 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1393 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1394 #ifdef HAVE_BLK_QUEUE_DISCARD
1395 blk_queue_max_discard_sectors(zv->zv_queue,
1396 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1397 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1398 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1399 #endif
1400 #ifdef HAVE_BLK_QUEUE_NONROT
1401 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1402 #endif
1403
1404 if (spa_writeable(dmu_objset_spa(os))) {
1405 if (zil_replay_disable)
1406 zil_destroy(dmu_objset_zil(os), B_FALSE);
1407 else
1408 zil_replay(os, zv, zvol_replay_vector);
1409 }
1410
1411 zv->zv_objset = NULL;
1412 out_dmu_objset_disown:
1413 dmu_objset_disown(os, zvol_tag);
1414 out_doi:
1415 kmem_free(doi, sizeof (dmu_object_info_t));
1416 out:
1417
1418 if (error == 0) {
1419 zvol_insert(zv);
1420 add_disk(zv->zv_disk);
1421 }
1422
1423 return (SET_ERROR(error));
1424 }
1425
1426 /*
1427 * Create a block device minor node and setup the linkage between it
1428 * and the specified volume. Once this function returns the block
1429 * device is live and ready for use.
1430 */
1431 int
1432 zvol_create_minor(const char *name)
1433 {
1434 int error;
1435
1436 mutex_enter(&zvol_state_lock);
1437 error = __zvol_create_minor(name, B_FALSE);
1438 mutex_exit(&zvol_state_lock);
1439
1440 return (SET_ERROR(error));
1441 }
1442
1443 static int
1444 __zvol_remove_minor(const char *name)
1445 {
1446 zvol_state_t *zv;
1447
1448 ASSERT(MUTEX_HELD(&zvol_state_lock));
1449
1450 zv = zvol_find_by_name(name);
1451 if (zv == NULL)
1452 return (SET_ERROR(ENXIO));
1453
1454 if (zv->zv_open_count > 0)
1455 return (SET_ERROR(EBUSY));
1456
1457 zvol_remove(zv);
1458 zvol_free(zv);
1459
1460 return (0);
1461 }
1462
1463 /*
1464 * Remove a block device minor node for the specified volume.
1465 */
1466 int
1467 zvol_remove_minor(const char *name)
1468 {
1469 int error;
1470
1471 mutex_enter(&zvol_state_lock);
1472 error = __zvol_remove_minor(name);
1473 mutex_exit(&zvol_state_lock);
1474
1475 return (SET_ERROR(error));
1476 }
1477
1478 /*
1479 * Rename a block device minor mode for the specified volume.
1480 */
1481 static void
1482 __zvol_rename_minor(zvol_state_t *zv, const char *newname)
1483 {
1484 int readonly = get_disk_ro(zv->zv_disk);
1485
1486 ASSERT(MUTEX_HELD(&zvol_state_lock));
1487
1488 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1489
1490 /*
1491 * The block device's read-only state is briefly changed causing
1492 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1493 * the name change and fixes the symlinks. This does not change
1494 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1495 * changes. This would normally be done using kobject_uevent() but
1496 * that is a GPL-only symbol which is why we need this workaround.
1497 */
1498 set_disk_ro(zv->zv_disk, !readonly);
1499 set_disk_ro(zv->zv_disk, readonly);
1500 }
1501
1502 static int
1503 zvol_create_minors_cb(const char *dsname, void *arg)
1504 {
1505 (void) zvol_create_minor(dsname);
1506
1507 return (0);
1508 }
1509
1510 /*
1511 * Create minors for specified dataset including children and snapshots.
1512 */
1513 int
1514 zvol_create_minors(const char *name)
1515 {
1516 int error = 0;
1517
1518 if (!zvol_inhibit_dev)
1519 error = dmu_objset_find((char *)name, zvol_create_minors_cb,
1520 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1521
1522 return (SET_ERROR(error));
1523 }
1524
1525 /*
1526 * Remove minors for specified dataset including children and snapshots.
1527 */
1528 void
1529 zvol_remove_minors(const char *name)
1530 {
1531 zvol_state_t *zv, *zv_next;
1532 int namelen = ((name) ? strlen(name) : 0);
1533
1534 if (zvol_inhibit_dev)
1535 return;
1536
1537 mutex_enter(&zvol_state_lock);
1538
1539 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1540 zv_next = list_next(&zvol_state_list, zv);
1541
1542 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1543 (strncmp(zv->zv_name, name, namelen) == 0 &&
1544 zv->zv_name[namelen] == '/')) {
1545 zvol_remove(zv);
1546 zvol_free(zv);
1547 }
1548 }
1549
1550 mutex_exit(&zvol_state_lock);
1551 }
1552
1553 /*
1554 * Rename minors for specified dataset including children and snapshots.
1555 */
1556 void
1557 zvol_rename_minors(const char *oldname, const char *newname)
1558 {
1559 zvol_state_t *zv, *zv_next;
1560 int oldnamelen, newnamelen;
1561 char *name;
1562
1563 if (zvol_inhibit_dev)
1564 return;
1565
1566 oldnamelen = strlen(oldname);
1567 newnamelen = strlen(newname);
1568 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1569
1570 mutex_enter(&zvol_state_lock);
1571
1572 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1573 zv_next = list_next(&zvol_state_list, zv);
1574
1575 if (strcmp(zv->zv_name, oldname) == 0) {
1576 __zvol_rename_minor(zv, newname);
1577 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1578 (zv->zv_name[oldnamelen] == '/' ||
1579 zv->zv_name[oldnamelen] == '@')) {
1580 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
1581 zv->zv_name[oldnamelen],
1582 zv->zv_name + oldnamelen + 1);
1583 __zvol_rename_minor(zv, name);
1584 }
1585 }
1586
1587 mutex_exit(&zvol_state_lock);
1588
1589 kmem_free(name, MAXNAMELEN);
1590 }
1591
1592 static int
1593 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1594 uint64_t snapdev = *(uint64_t *) arg;
1595
1596 if (strchr(dsname, '@') == NULL)
1597 return (0);
1598
1599 switch (snapdev) {
1600 case ZFS_SNAPDEV_VISIBLE:
1601 mutex_enter(&zvol_state_lock);
1602 (void) __zvol_create_minor(dsname, B_TRUE);
1603 mutex_exit(&zvol_state_lock);
1604 break;
1605 case ZFS_SNAPDEV_HIDDEN:
1606 (void) zvol_remove_minor(dsname);
1607 break;
1608 }
1609
1610 return (0);
1611 }
1612
1613 int
1614 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1615 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1616 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1617 /* caller should continue to modify snapdev property */
1618 return (-1);
1619 }
1620
1621 int
1622 zvol_init(void)
1623 {
1624 int error;
1625
1626 list_create(&zvol_state_list, sizeof (zvol_state_t),
1627 offsetof(zvol_state_t, zv_next));
1628
1629 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1630
1631 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1632 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1633 if (zvol_taskq == NULL) {
1634 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1635 error = -ENOMEM;
1636 goto out1;
1637 }
1638
1639 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1640 if (error) {
1641 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1642 goto out2;
1643 }
1644
1645 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1646 THIS_MODULE, zvol_probe, NULL, NULL);
1647
1648 return (0);
1649
1650 out2:
1651 taskq_destroy(zvol_taskq);
1652 out1:
1653 mutex_destroy(&zvol_state_lock);
1654 list_destroy(&zvol_state_list);
1655
1656 return (SET_ERROR(error));
1657 }
1658
1659 void
1660 zvol_fini(void)
1661 {
1662 zvol_remove_minors(NULL);
1663 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1664 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1665 taskq_destroy(zvol_taskq);
1666 mutex_destroy(&zvol_state_lock);
1667 list_destroy(&zvol_state_list);
1668 }
1669
1670 module_param(zvol_inhibit_dev, uint, 0644);
1671 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1672
1673 module_param(zvol_major, uint, 0444);
1674 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1675
1676 module_param(zvol_threads, uint, 0444);
1677 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
1678
1679 module_param(zvol_max_discard_blocks, ulong, 0444);
1680 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");