]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/zvol.c
Imported Upstream version 0.6.4.2
[mirror_zfs-debian.git] / module / zfs / zvol.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
38 #include <sys/dbuf.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/zap.h>
43 #include <sys/zil_impl.h>
44 #include <sys/zio.h>
45 #include <sys/zfs_rlock.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/zvol.h>
48 #include <linux/blkdev_compat.h>
49
50 unsigned int zvol_inhibit_dev = 0;
51 unsigned int zvol_major = ZVOL_MAJOR;
52 unsigned int zvol_threads = 32;
53 unsigned long zvol_max_discard_blocks = 16384;
54
55 static taskq_t *zvol_taskq;
56 static kmutex_t zvol_state_lock;
57 static list_t zvol_state_list;
58 static char *zvol_tag = "zvol_tag";
59
60 /*
61 * The in-core state of each volume.
62 */
63 typedef struct zvol_state {
64 char zv_name[MAXNAMELEN]; /* name */
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize; /* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
79 } zvol_state_t;
80
81 #define ZVOL_RDONLY 0x1
82
83 /*
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
88 */
89 static int
90 zvol_find_minor(unsigned *minor)
91 {
92 zvol_state_t *zv;
93
94 *minor = 0;
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 break;
100 }
101
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
104 return (SET_ERROR(ENXIO));
105
106 return (0);
107 }
108
109 /*
110 * Find a zvol_state_t given the full major+minor dev_t.
111 */
112 static zvol_state_t *
113 zvol_find_by_dev(dev_t dev)
114 {
115 zvol_state_t *zv;
116
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
121 return (zv);
122 }
123
124 return (NULL);
125 }
126
127 /*
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 */
130 static zvol_state_t *
131 zvol_find_by_name(const char *name)
132 {
133 zvol_state_t *zv;
134
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
138 if (strncmp(zv->zv_name, name, MAXNAMELEN) == 0)
139 return (zv);
140 }
141
142 return (NULL);
143 }
144
145
146 /*
147 * Given a path, return TRUE if path is a ZVOL.
148 */
149 boolean_t
150 zvol_is_zvol(const char *device)
151 {
152 struct block_device *bdev;
153 unsigned int major;
154
155 bdev = lookup_bdev(device);
156 if (IS_ERR(bdev))
157 return (B_FALSE);
158
159 major = MAJOR(bdev->bd_dev);
160 bdput(bdev);
161
162 if (major == zvol_major)
163 return (B_TRUE);
164
165 return (B_FALSE);
166 }
167
168 /*
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
170 */
171 void
172 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173 {
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
176 int error;
177 uint64_t volblocksize, volsize;
178
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
184
185 /*
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
188 */
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
195 DMU_OT_NONE, 0, tx);
196 ASSERT(error == 0);
197
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
199 DMU_OT_NONE, 0, tx);
200 ASSERT(error == 0);
201
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
203 ASSERT(error == 0);
204 }
205
206 /*
207 * ZFS_IOC_OBJSET_STATS entry point.
208 */
209 int
210 zvol_get_stats(objset_t *os, nvlist_t *nv)
211 {
212 int error;
213 dmu_object_info_t *doi;
214 uint64_t val;
215
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
217 if (error)
218 return (SET_ERROR(error));
219
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
223
224 if (error == 0) {
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
227 }
228
229 kmem_free(doi, sizeof (dmu_object_info_t));
230
231 return (SET_ERROR(error));
232 }
233
234 static void
235 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
236 {
237 struct block_device *bdev;
238
239 bdev = bdget_disk(zv->zv_disk, 0);
240 if (bdev == NULL)
241 return;
242 /*
243 * 2.6.28 API change
244 * Added check_disk_size_change() helper function.
245 */
246 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
247 set_capacity(zv->zv_disk, volsize >> 9);
248 zv->zv_volsize = volsize;
249 check_disk_size_change(zv->zv_disk, bdev);
250 #else
251 zv->zv_volsize = volsize;
252 zv->zv_changed = 1;
253 (void) check_disk_change(bdev);
254 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
255
256 bdput(bdev);
257 }
258
259 /*
260 * Sanity check volume size.
261 */
262 int
263 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
264 {
265 if (volsize == 0)
266 return (SET_ERROR(EINVAL));
267
268 if (volsize % blocksize != 0)
269 return (SET_ERROR(EINVAL));
270
271 #ifdef _ILP32
272 if (volsize - 1 > MAXOFFSET_T)
273 return (SET_ERROR(EOVERFLOW));
274 #endif
275 return (0);
276 }
277
278 /*
279 * Ensure the zap is flushed then inform the VFS of the capacity change.
280 */
281 static int
282 zvol_update_volsize(uint64_t volsize, objset_t *os)
283 {
284 dmu_tx_t *tx;
285 int error;
286
287 ASSERT(MUTEX_HELD(&zvol_state_lock));
288
289 tx = dmu_tx_create(os);
290 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
291 error = dmu_tx_assign(tx, TXG_WAIT);
292 if (error) {
293 dmu_tx_abort(tx);
294 return (SET_ERROR(error));
295 }
296
297 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
298 &volsize, tx);
299 dmu_tx_commit(tx);
300
301 if (error == 0)
302 error = dmu_free_long_range(os,
303 ZVOL_OBJ, volsize, DMU_OBJECT_END);
304
305 return (error);
306 }
307
308 static int
309 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
310 {
311 zvol_size_changed(zv, volsize);
312
313 /*
314 * We should post a event here describing the expansion. However,
315 * the zfs_ereport_post() interface doesn't nicely support posting
316 * events for zvols, it assumes events relate to vdevs or zios.
317 */
318
319 return (0);
320 }
321
322 /*
323 * Set ZFS_PROP_VOLSIZE set entry point.
324 */
325 int
326 zvol_set_volsize(const char *name, uint64_t volsize)
327 {
328 zvol_state_t *zv = NULL;
329 objset_t *os = NULL;
330 int error;
331 dmu_object_info_t *doi;
332 uint64_t readonly;
333 boolean_t owned = B_FALSE;
334
335 error = dsl_prop_get_integer(name,
336 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
337 if (error != 0)
338 return (SET_ERROR(error));
339 if (readonly)
340 return (SET_ERROR(EROFS));
341
342 mutex_enter(&zvol_state_lock);
343 zv = zvol_find_by_name(name);
344
345 if (zv == NULL || zv->zv_objset == NULL) {
346 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
347 FTAG, &os)) != 0) {
348 mutex_exit(&zvol_state_lock);
349 return (SET_ERROR(error));
350 }
351 owned = B_TRUE;
352 if (zv != NULL)
353 zv->zv_objset = os;
354 } else {
355 os = zv->zv_objset;
356 }
357
358 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
359
360 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
361 (error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
362 goto out;
363
364 error = zvol_update_volsize(volsize, os);
365 kmem_free(doi, sizeof (dmu_object_info_t));
366
367 if (error == 0 && zv != NULL)
368 error = zvol_update_live_volsize(zv, volsize);
369 out:
370 if (owned) {
371 dmu_objset_disown(os, FTAG);
372 if (zv != NULL)
373 zv->zv_objset = NULL;
374 }
375 mutex_exit(&zvol_state_lock);
376 return (error);
377 }
378
379 /*
380 * Sanity check volume block size.
381 */
382 int
383 zvol_check_volblocksize(uint64_t volblocksize)
384 {
385 if (volblocksize < SPA_MINBLOCKSIZE ||
386 volblocksize > SPA_MAXBLOCKSIZE ||
387 !ISP2(volblocksize))
388 return (SET_ERROR(EDOM));
389
390 return (0);
391 }
392
393 /*
394 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
395 */
396 int
397 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
398 {
399 zvol_state_t *zv;
400 dmu_tx_t *tx;
401 int error;
402
403 mutex_enter(&zvol_state_lock);
404
405 zv = zvol_find_by_name(name);
406 if (zv == NULL) {
407 error = SET_ERROR(ENXIO);
408 goto out;
409 }
410
411 if (zv->zv_flags & ZVOL_RDONLY) {
412 error = SET_ERROR(EROFS);
413 goto out;
414 }
415
416 tx = dmu_tx_create(zv->zv_objset);
417 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
418 error = dmu_tx_assign(tx, TXG_WAIT);
419 if (error) {
420 dmu_tx_abort(tx);
421 } else {
422 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
423 volblocksize, 0, tx);
424 if (error == ENOTSUP)
425 error = SET_ERROR(EBUSY);
426 dmu_tx_commit(tx);
427 if (error == 0)
428 zv->zv_volblocksize = volblocksize;
429 }
430 out:
431 mutex_exit(&zvol_state_lock);
432
433 return (SET_ERROR(error));
434 }
435
436 /*
437 * Replay a TX_WRITE ZIL transaction that didn't get committed
438 * after a system failure
439 */
440 static int
441 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
442 {
443 objset_t *os = zv->zv_objset;
444 char *data = (char *)(lr + 1); /* data follows lr_write_t */
445 uint64_t off = lr->lr_offset;
446 uint64_t len = lr->lr_length;
447 dmu_tx_t *tx;
448 int error;
449
450 if (byteswap)
451 byteswap_uint64_array(lr, sizeof (*lr));
452
453 tx = dmu_tx_create(os);
454 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
455 error = dmu_tx_assign(tx, TXG_WAIT);
456 if (error) {
457 dmu_tx_abort(tx);
458 } else {
459 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
460 dmu_tx_commit(tx);
461 }
462
463 return (SET_ERROR(error));
464 }
465
466 static int
467 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
468 {
469 return (SET_ERROR(ENOTSUP));
470 }
471
472 /*
473 * Callback vectors for replaying records.
474 * Only TX_WRITE is needed for zvol.
475 */
476 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
477 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
478 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
479 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
480 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
481 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
482 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
483 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
484 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
485 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
486 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
487 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
488 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
489 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
490 };
491
492 /*
493 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
494 *
495 * We store data in the log buffers if it's small enough.
496 * Otherwise we will later flush the data out via dmu_sync().
497 */
498 ssize_t zvol_immediate_write_sz = 32768;
499
500 static void
501 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
502 uint64_t size, int sync)
503 {
504 uint32_t blocksize = zv->zv_volblocksize;
505 zilog_t *zilog = zv->zv_zilog;
506 boolean_t slogging;
507 ssize_t immediate_write_sz;
508
509 if (zil_replaying(zilog, tx))
510 return;
511
512 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
513 ? 0 : zvol_immediate_write_sz;
514 slogging = spa_has_slogs(zilog->zl_spa) &&
515 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
516
517 while (size) {
518 itx_t *itx;
519 lr_write_t *lr;
520 ssize_t len;
521 itx_wr_state_t write_state;
522
523 /*
524 * Unlike zfs_log_write() we can be called with
525 * up to DMU_MAX_ACCESS/2 (5MB) writes.
526 */
527 if (blocksize > immediate_write_sz && !slogging &&
528 size >= blocksize && offset % blocksize == 0) {
529 write_state = WR_INDIRECT; /* uses dmu_sync */
530 len = blocksize;
531 } else if (sync) {
532 write_state = WR_COPIED;
533 len = MIN(ZIL_MAX_LOG_DATA, size);
534 } else {
535 write_state = WR_NEED_COPY;
536 len = MIN(ZIL_MAX_LOG_DATA, size);
537 }
538
539 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
540 (write_state == WR_COPIED ? len : 0));
541 lr = (lr_write_t *)&itx->itx_lr;
542 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
543 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
544 zil_itx_destroy(itx);
545 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
546 lr = (lr_write_t *)&itx->itx_lr;
547 write_state = WR_NEED_COPY;
548 }
549
550 itx->itx_wr_state = write_state;
551 if (write_state == WR_NEED_COPY)
552 itx->itx_sod += len;
553 lr->lr_foid = ZVOL_OBJ;
554 lr->lr_offset = offset;
555 lr->lr_length = len;
556 lr->lr_blkoff = 0;
557 BP_ZERO(&lr->lr_blkptr);
558
559 itx->itx_private = zv;
560 itx->itx_sync = sync;
561
562 (void) zil_itx_assign(zilog, itx, tx);
563
564 offset += len;
565 size -= len;
566 }
567 }
568
569 /*
570 * Common write path running under the zvol taskq context. This function
571 * is responsible for copying the request structure data in to the DMU and
572 * signaling the request queue with the result of the copy.
573 */
574 static void
575 zvol_write(void *arg)
576 {
577 struct request *req = (struct request *)arg;
578 struct request_queue *q = req->q;
579 zvol_state_t *zv = q->queuedata;
580 fstrans_cookie_t cookie = spl_fstrans_mark();
581 uint64_t offset = blk_rq_pos(req) << 9;
582 uint64_t size = blk_rq_bytes(req);
583 int error = 0;
584 dmu_tx_t *tx;
585 rl_t *rl;
586
587 if (req->cmd_flags & VDEV_REQ_FLUSH)
588 zil_commit(zv->zv_zilog, ZVOL_OBJ);
589
590 /*
591 * Some requests are just for flush and nothing else.
592 */
593 if (size == 0) {
594 error = 0;
595 goto out;
596 }
597
598 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
599
600 tx = dmu_tx_create(zv->zv_objset);
601 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
602
603 /* This will only fail for ENOSPC */
604 error = dmu_tx_assign(tx, TXG_WAIT);
605 if (error) {
606 dmu_tx_abort(tx);
607 zfs_range_unlock(rl);
608 goto out;
609 }
610
611 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
612 if (error == 0)
613 zvol_log_write(zv, tx, offset, size,
614 req->cmd_flags & VDEV_REQ_FUA);
615
616 dmu_tx_commit(tx);
617 zfs_range_unlock(rl);
618
619 if ((req->cmd_flags & VDEV_REQ_FUA) ||
620 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
621 zil_commit(zv->zv_zilog, ZVOL_OBJ);
622
623 out:
624 blk_end_request(req, -error, size);
625 spl_fstrans_unmark(cookie);
626 }
627
628 #ifdef HAVE_BLK_QUEUE_DISCARD
629 static void
630 zvol_discard(void *arg)
631 {
632 struct request *req = (struct request *)arg;
633 struct request_queue *q = req->q;
634 zvol_state_t *zv = q->queuedata;
635 fstrans_cookie_t cookie = spl_fstrans_mark();
636 uint64_t start = blk_rq_pos(req) << 9;
637 uint64_t end = start + blk_rq_bytes(req);
638 int error;
639 rl_t *rl;
640
641 if (end > zv->zv_volsize) {
642 error = EIO;
643 goto out;
644 }
645
646 /*
647 * Align the request to volume block boundaries. If we don't,
648 * then this will force dnode_free_range() to zero out the
649 * unaligned parts, which is slow (read-modify-write) and
650 * useless since we are not freeing any space by doing so.
651 */
652 start = P2ROUNDUP(start, zv->zv_volblocksize);
653 end = P2ALIGN(end, zv->zv_volblocksize);
654
655 if (start >= end) {
656 error = 0;
657 goto out;
658 }
659
660 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
661
662 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end-start);
663
664 /*
665 * TODO: maybe we should add the operation to the log.
666 */
667
668 zfs_range_unlock(rl);
669 out:
670 blk_end_request(req, -error, blk_rq_bytes(req));
671 spl_fstrans_unmark(cookie);
672 }
673 #endif /* HAVE_BLK_QUEUE_DISCARD */
674
675 /*
676 * Common read path running under the zvol taskq context. This function
677 * is responsible for copying the requested data out of the DMU and in to
678 * a linux request structure. It then must signal the request queue with
679 * an error code describing the result of the copy.
680 */
681 static void
682 zvol_read(void *arg)
683 {
684 struct request *req = (struct request *)arg;
685 struct request_queue *q = req->q;
686 zvol_state_t *zv = q->queuedata;
687 fstrans_cookie_t cookie = spl_fstrans_mark();
688 uint64_t offset = blk_rq_pos(req) << 9;
689 uint64_t size = blk_rq_bytes(req);
690 int error;
691 rl_t *rl;
692
693 if (size == 0) {
694 error = 0;
695 goto out;
696 }
697
698 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
699
700 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
701
702 zfs_range_unlock(rl);
703
704 /* convert checksum errors into IO errors */
705 if (error == ECKSUM)
706 error = SET_ERROR(EIO);
707
708 out:
709 blk_end_request(req, -error, size);
710 spl_fstrans_unmark(cookie);
711 }
712
713 /*
714 * Request will be added back to the request queue and retried if
715 * it cannot be immediately dispatched to the taskq for handling
716 */
717 static inline void
718 zvol_dispatch(task_func_t func, struct request *req)
719 {
720 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
721 blk_requeue_request(req->q, req);
722 }
723
724 /*
725 * Common request path. Rather than registering a custom make_request()
726 * function we use the generic Linux version. This is done because it allows
727 * us to easily merge read requests which would otherwise we performed
728 * synchronously by the DMU. This is less critical in write case where the
729 * DMU will perform the correct merging within a transaction group. Using
730 * the generic make_request() also let's use leverage the fact that the
731 * elevator with ensure correct ordering in regards to barrior IOs. On
732 * the downside it means that in the write case we end up doing request
733 * merging twice once in the elevator and once in the DMU.
734 *
735 * The request handler is called under a spin lock so all the real work
736 * is handed off to be done in the context of the zvol taskq. This function
737 * simply performs basic request sanity checking and hands off the request.
738 */
739 static void
740 zvol_request(struct request_queue *q)
741 {
742 zvol_state_t *zv = q->queuedata;
743 struct request *req;
744 unsigned int size;
745
746 while ((req = blk_fetch_request(q)) != NULL) {
747 size = blk_rq_bytes(req);
748
749 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
750 get_capacity(zv->zv_disk)) {
751 printk(KERN_INFO
752 "%s: bad access: block=%llu, count=%lu\n",
753 req->rq_disk->disk_name,
754 (long long unsigned)blk_rq_pos(req),
755 (long unsigned)blk_rq_sectors(req));
756 __blk_end_request(req, -EIO, size);
757 continue;
758 }
759
760 if (!blk_fs_request(req)) {
761 printk(KERN_INFO "%s: non-fs cmd\n",
762 req->rq_disk->disk_name);
763 __blk_end_request(req, -EIO, size);
764 continue;
765 }
766
767 switch (rq_data_dir(req)) {
768 case READ:
769 zvol_dispatch(zvol_read, req);
770 break;
771 case WRITE:
772 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
773 __blk_end_request(req, -EROFS, size);
774 break;
775 }
776
777 #ifdef HAVE_BLK_QUEUE_DISCARD
778 if (req->cmd_flags & VDEV_REQ_DISCARD) {
779 zvol_dispatch(zvol_discard, req);
780 break;
781 }
782 #endif /* HAVE_BLK_QUEUE_DISCARD */
783
784 zvol_dispatch(zvol_write, req);
785 break;
786 default:
787 printk(KERN_INFO "%s: unknown cmd: %d\n",
788 req->rq_disk->disk_name, (int)rq_data_dir(req));
789 __blk_end_request(req, -EIO, size);
790 break;
791 }
792 }
793 }
794
795 static void
796 zvol_get_done(zgd_t *zgd, int error)
797 {
798 if (zgd->zgd_db)
799 dmu_buf_rele(zgd->zgd_db, zgd);
800
801 zfs_range_unlock(zgd->zgd_rl);
802
803 if (error == 0 && zgd->zgd_bp)
804 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
805
806 kmem_free(zgd, sizeof (zgd_t));
807 }
808
809 /*
810 * Get data to generate a TX_WRITE intent log record.
811 */
812 static int
813 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
814 {
815 zvol_state_t *zv = arg;
816 objset_t *os = zv->zv_objset;
817 uint64_t object = ZVOL_OBJ;
818 uint64_t offset = lr->lr_offset;
819 uint64_t size = lr->lr_length;
820 blkptr_t *bp = &lr->lr_blkptr;
821 dmu_buf_t *db;
822 zgd_t *zgd;
823 int error;
824
825 ASSERT(zio != NULL);
826 ASSERT(size != 0);
827
828 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
829 zgd->zgd_zilog = zv->zv_zilog;
830 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
831
832 /*
833 * Write records come in two flavors: immediate and indirect.
834 * For small writes it's cheaper to store the data with the
835 * log record (immediate); for large writes it's cheaper to
836 * sync the data and get a pointer to it (indirect) so that
837 * we don't have to write the data twice.
838 */
839 if (buf != NULL) { /* immediate write */
840 error = dmu_read(os, object, offset, size, buf,
841 DMU_READ_NO_PREFETCH);
842 } else {
843 size = zv->zv_volblocksize;
844 offset = P2ALIGN_TYPED(offset, size, uint64_t);
845 error = dmu_buf_hold(os, object, offset, zgd, &db,
846 DMU_READ_NO_PREFETCH);
847 if (error == 0) {
848 blkptr_t *obp = dmu_buf_get_blkptr(db);
849 if (obp) {
850 ASSERT(BP_IS_HOLE(bp));
851 *bp = *obp;
852 }
853
854 zgd->zgd_db = db;
855 zgd->zgd_bp = &lr->lr_blkptr;
856
857 ASSERT(db != NULL);
858 ASSERT(db->db_offset == offset);
859 ASSERT(db->db_size == size);
860
861 error = dmu_sync(zio, lr->lr_common.lrc_txg,
862 zvol_get_done, zgd);
863
864 if (error == 0)
865 return (0);
866 }
867 }
868
869 zvol_get_done(zgd, error);
870
871 return (SET_ERROR(error));
872 }
873
874 /*
875 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
876 */
877 static void
878 zvol_insert(zvol_state_t *zv_insert)
879 {
880 zvol_state_t *zv = NULL;
881
882 ASSERT(MUTEX_HELD(&zvol_state_lock));
883 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
884 for (zv = list_head(&zvol_state_list); zv != NULL;
885 zv = list_next(&zvol_state_list, zv)) {
886 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
887 break;
888 }
889
890 list_insert_before(&zvol_state_list, zv, zv_insert);
891 }
892
893 /*
894 * Simply remove the zvol from to list of zvols.
895 */
896 static void
897 zvol_remove(zvol_state_t *zv_remove)
898 {
899 ASSERT(MUTEX_HELD(&zvol_state_lock));
900 list_remove(&zvol_state_list, zv_remove);
901 }
902
903 static int
904 zvol_first_open(zvol_state_t *zv)
905 {
906 objset_t *os;
907 uint64_t volsize;
908 int locked = 0;
909 int error;
910 uint64_t ro;
911
912 /*
913 * In all other cases the spa_namespace_lock is taken before the
914 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
915 * function calls fops->open() with the bdev->bd_mutex lock held.
916 *
917 * To avoid a potential lock inversion deadlock we preemptively
918 * try to take the spa_namespace_lock(). Normally it will not
919 * be contended and this is safe because spa_open_common() handles
920 * the case where the caller already holds the spa_namespace_lock.
921 *
922 * When it is contended we risk a lock inversion if we were to
923 * block waiting for the lock. Luckily, the __blkdev_get()
924 * function allows us to return -ERESTARTSYS which will result in
925 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
926 * called again. This process can be repeated safely until both
927 * locks are acquired.
928 */
929 if (!mutex_owned(&spa_namespace_lock)) {
930 locked = mutex_tryenter(&spa_namespace_lock);
931 if (!locked)
932 return (-SET_ERROR(ERESTARTSYS));
933 }
934
935 error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
936 if (error)
937 goto out_mutex;
938
939 /* lie and say we're read-only */
940 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
941 if (error)
942 goto out_mutex;
943
944 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
945 if (error) {
946 dmu_objset_disown(os, zvol_tag);
947 goto out_mutex;
948 }
949
950 zv->zv_objset = os;
951 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
952 if (error) {
953 dmu_objset_disown(os, zvol_tag);
954 goto out_mutex;
955 }
956
957 set_capacity(zv->zv_disk, volsize >> 9);
958 zv->zv_volsize = volsize;
959 zv->zv_zilog = zil_open(os, zvol_get_data);
960
961 if (ro || dmu_objset_is_snapshot(os) ||
962 !spa_writeable(dmu_objset_spa(os))) {
963 set_disk_ro(zv->zv_disk, 1);
964 zv->zv_flags |= ZVOL_RDONLY;
965 } else {
966 set_disk_ro(zv->zv_disk, 0);
967 zv->zv_flags &= ~ZVOL_RDONLY;
968 }
969
970 out_mutex:
971 if (locked)
972 mutex_exit(&spa_namespace_lock);
973
974 return (SET_ERROR(-error));
975 }
976
977 static void
978 zvol_last_close(zvol_state_t *zv)
979 {
980 zil_close(zv->zv_zilog);
981 zv->zv_zilog = NULL;
982
983 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
984 zv->zv_dbuf = NULL;
985
986 /*
987 * Evict cached data
988 */
989 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
990 !(zv->zv_flags & ZVOL_RDONLY))
991 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
992 (void) dmu_objset_evict_dbufs(zv->zv_objset);
993
994 dmu_objset_disown(zv->zv_objset, zvol_tag);
995 zv->zv_objset = NULL;
996 }
997
998 static int
999 zvol_open(struct block_device *bdev, fmode_t flag)
1000 {
1001 zvol_state_t *zv = bdev->bd_disk->private_data;
1002 int error = 0, drop_mutex = 0;
1003
1004 /*
1005 * If the caller is already holding the mutex do not take it
1006 * again, this will happen as part of zvol_create_minor().
1007 * Once add_disk() is called the device is live and the kernel
1008 * will attempt to open it to read the partition information.
1009 */
1010 if (!mutex_owned(&zvol_state_lock)) {
1011 mutex_enter(&zvol_state_lock);
1012 drop_mutex = 1;
1013 }
1014
1015 ASSERT3P(zv, !=, NULL);
1016
1017 if (zv->zv_open_count == 0) {
1018 error = zvol_first_open(zv);
1019 if (error)
1020 goto out_mutex;
1021 }
1022
1023 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1024 error = -EROFS;
1025 goto out_open_count;
1026 }
1027
1028 zv->zv_open_count++;
1029
1030 out_open_count:
1031 if (zv->zv_open_count == 0)
1032 zvol_last_close(zv);
1033
1034 out_mutex:
1035 if (drop_mutex)
1036 mutex_exit(&zvol_state_lock);
1037
1038 check_disk_change(bdev);
1039
1040 return (SET_ERROR(error));
1041 }
1042
1043 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1044 static void
1045 #else
1046 static int
1047 #endif
1048 zvol_release(struct gendisk *disk, fmode_t mode)
1049 {
1050 zvol_state_t *zv = disk->private_data;
1051 int drop_mutex = 0;
1052
1053 if (!mutex_owned(&zvol_state_lock)) {
1054 mutex_enter(&zvol_state_lock);
1055 drop_mutex = 1;
1056 }
1057
1058 if (zv->zv_open_count > 0) {
1059 zv->zv_open_count--;
1060 if (zv->zv_open_count == 0)
1061 zvol_last_close(zv);
1062 }
1063
1064 if (drop_mutex)
1065 mutex_exit(&zvol_state_lock);
1066
1067 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1068 return (0);
1069 #endif
1070 }
1071
1072 static int
1073 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1074 unsigned int cmd, unsigned long arg)
1075 {
1076 zvol_state_t *zv = bdev->bd_disk->private_data;
1077 int error = 0;
1078
1079 if (zv == NULL)
1080 return (SET_ERROR(-ENXIO));
1081
1082 switch (cmd) {
1083 case BLKFLSBUF:
1084 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1085 break;
1086 case BLKZNAME:
1087 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1088 break;
1089
1090 default:
1091 error = -ENOTTY;
1092 break;
1093
1094 }
1095
1096 return (SET_ERROR(error));
1097 }
1098
1099 #ifdef CONFIG_COMPAT
1100 static int
1101 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1102 unsigned cmd, unsigned long arg)
1103 {
1104 return (zvol_ioctl(bdev, mode, cmd, arg));
1105 }
1106 #else
1107 #define zvol_compat_ioctl NULL
1108 #endif
1109
1110 static int zvol_media_changed(struct gendisk *disk)
1111 {
1112 zvol_state_t *zv = disk->private_data;
1113
1114 return (zv->zv_changed);
1115 }
1116
1117 static int zvol_revalidate_disk(struct gendisk *disk)
1118 {
1119 zvol_state_t *zv = disk->private_data;
1120
1121 zv->zv_changed = 0;
1122 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1123
1124 return (0);
1125 }
1126
1127 /*
1128 * Provide a simple virtual geometry for legacy compatibility. For devices
1129 * smaller than 1 MiB a small head and sector count is used to allow very
1130 * tiny devices. For devices over 1 Mib a standard head and sector count
1131 * is used to keep the cylinders count reasonable.
1132 */
1133 static int
1134 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1135 {
1136 zvol_state_t *zv = bdev->bd_disk->private_data;
1137 sector_t sectors = get_capacity(zv->zv_disk);
1138
1139 if (sectors > 2048) {
1140 geo->heads = 16;
1141 geo->sectors = 63;
1142 } else {
1143 geo->heads = 2;
1144 geo->sectors = 4;
1145 }
1146
1147 geo->start = 0;
1148 geo->cylinders = sectors / (geo->heads * geo->sectors);
1149
1150 return (0);
1151 }
1152
1153 static struct kobject *
1154 zvol_probe(dev_t dev, int *part, void *arg)
1155 {
1156 zvol_state_t *zv;
1157 struct kobject *kobj;
1158
1159 mutex_enter(&zvol_state_lock);
1160 zv = zvol_find_by_dev(dev);
1161 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1162 mutex_exit(&zvol_state_lock);
1163
1164 return (kobj);
1165 }
1166
1167 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1168 static struct block_device_operations zvol_ops = {
1169 .open = zvol_open,
1170 .release = zvol_release,
1171 .ioctl = zvol_ioctl,
1172 .compat_ioctl = zvol_compat_ioctl,
1173 .media_changed = zvol_media_changed,
1174 .revalidate_disk = zvol_revalidate_disk,
1175 .getgeo = zvol_getgeo,
1176 .owner = THIS_MODULE,
1177 };
1178
1179 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1180
1181 static int
1182 zvol_open_by_inode(struct inode *inode, struct file *file)
1183 {
1184 return (zvol_open(inode->i_bdev, file->f_mode));
1185 }
1186
1187 static int
1188 zvol_release_by_inode(struct inode *inode, struct file *file)
1189 {
1190 return (zvol_release(inode->i_bdev->bd_disk, file->f_mode));
1191 }
1192
1193 static int
1194 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1195 unsigned int cmd, unsigned long arg)
1196 {
1197 if (file == NULL || inode == NULL)
1198 return (SET_ERROR(-EINVAL));
1199
1200 return (zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg));
1201 }
1202
1203 #ifdef CONFIG_COMPAT
1204 static long
1205 zvol_compat_ioctl_by_inode(struct file *file,
1206 unsigned int cmd, unsigned long arg)
1207 {
1208 if (file == NULL)
1209 return (SET_ERROR(-EINVAL));
1210
1211 return (zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1212 file->f_mode, cmd, arg));
1213 }
1214 #else
1215 #define zvol_compat_ioctl_by_inode NULL
1216 #endif
1217
1218 static struct block_device_operations zvol_ops = {
1219 .open = zvol_open_by_inode,
1220 .release = zvol_release_by_inode,
1221 .ioctl = zvol_ioctl_by_inode,
1222 .compat_ioctl = zvol_compat_ioctl_by_inode,
1223 .media_changed = zvol_media_changed,
1224 .revalidate_disk = zvol_revalidate_disk,
1225 .getgeo = zvol_getgeo,
1226 .owner = THIS_MODULE,
1227 };
1228 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1229
1230 /*
1231 * Allocate memory for a new zvol_state_t and setup the required
1232 * request queue and generic disk structures for the block device.
1233 */
1234 static zvol_state_t *
1235 zvol_alloc(dev_t dev, const char *name)
1236 {
1237 zvol_state_t *zv;
1238 int error = 0;
1239
1240 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1241
1242 spin_lock_init(&zv->zv_lock);
1243 list_link_init(&zv->zv_next);
1244
1245 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1246 if (zv->zv_queue == NULL)
1247 goto out_kmem;
1248
1249 #ifdef HAVE_ELEVATOR_CHANGE
1250 error = elevator_change(zv->zv_queue, "noop");
1251 #endif /* HAVE_ELEVATOR_CHANGE */
1252 if (error) {
1253 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1254 "noop", name, error);
1255 goto out_queue;
1256 }
1257
1258 #ifdef HAVE_BLK_QUEUE_FLUSH
1259 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1260 #else
1261 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1262 #endif /* HAVE_BLK_QUEUE_FLUSH */
1263
1264 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1265 if (zv->zv_disk == NULL)
1266 goto out_queue;
1267
1268 zv->zv_queue->queuedata = zv;
1269 zv->zv_dev = dev;
1270 zv->zv_open_count = 0;
1271 strlcpy(zv->zv_name, name, MAXNAMELEN);
1272
1273 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1274 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1275 sizeof (rl_t), offsetof(rl_t, r_node));
1276 zv->zv_znode.z_is_zvol = TRUE;
1277
1278 zv->zv_disk->major = zvol_major;
1279 zv->zv_disk->first_minor = (dev & MINORMASK);
1280 zv->zv_disk->fops = &zvol_ops;
1281 zv->zv_disk->private_data = zv;
1282 zv->zv_disk->queue = zv->zv_queue;
1283 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1284 ZVOL_DEV_NAME, (dev & MINORMASK));
1285
1286 return (zv);
1287
1288 out_queue:
1289 blk_cleanup_queue(zv->zv_queue);
1290 out_kmem:
1291 kmem_free(zv, sizeof (zvol_state_t));
1292
1293 return (NULL);
1294 }
1295
1296 /*
1297 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1298 */
1299 static void
1300 zvol_free(zvol_state_t *zv)
1301 {
1302 avl_destroy(&zv->zv_znode.z_range_avl);
1303 mutex_destroy(&zv->zv_znode.z_range_lock);
1304
1305 del_gendisk(zv->zv_disk);
1306 blk_cleanup_queue(zv->zv_queue);
1307 put_disk(zv->zv_disk);
1308
1309 kmem_free(zv, sizeof (zvol_state_t));
1310 }
1311
1312 static int
1313 __zvol_snapdev_hidden(const char *name)
1314 {
1315 uint64_t snapdev;
1316 char *parent;
1317 char *atp;
1318 int error = 0;
1319
1320 parent = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1321 (void) strlcpy(parent, name, MAXPATHLEN);
1322
1323 if ((atp = strrchr(parent, '@')) != NULL) {
1324 *atp = '\0';
1325 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1326 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1327 error = SET_ERROR(ENODEV);
1328 }
1329
1330 kmem_free(parent, MAXPATHLEN);
1331
1332 return (SET_ERROR(error));
1333 }
1334
1335 static int
1336 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1337 {
1338 zvol_state_t *zv;
1339 objset_t *os;
1340 dmu_object_info_t *doi;
1341 uint64_t volsize;
1342 unsigned minor = 0;
1343 int error = 0;
1344
1345 ASSERT(MUTEX_HELD(&zvol_state_lock));
1346
1347 zv = zvol_find_by_name(name);
1348 if (zv) {
1349 error = SET_ERROR(EEXIST);
1350 goto out;
1351 }
1352
1353 if (ignore_snapdev == B_FALSE) {
1354 error = __zvol_snapdev_hidden(name);
1355 if (error)
1356 goto out;
1357 }
1358
1359 doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
1360
1361 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1362 if (error)
1363 goto out_doi;
1364
1365 error = dmu_object_info(os, ZVOL_OBJ, doi);
1366 if (error)
1367 goto out_dmu_objset_disown;
1368
1369 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1370 if (error)
1371 goto out_dmu_objset_disown;
1372
1373 error = zvol_find_minor(&minor);
1374 if (error)
1375 goto out_dmu_objset_disown;
1376
1377 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1378 if (zv == NULL) {
1379 error = SET_ERROR(EAGAIN);
1380 goto out_dmu_objset_disown;
1381 }
1382
1383 if (dmu_objset_is_snapshot(os))
1384 zv->zv_flags |= ZVOL_RDONLY;
1385
1386 zv->zv_volblocksize = doi->doi_data_block_size;
1387 zv->zv_volsize = volsize;
1388 zv->zv_objset = os;
1389
1390 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1391
1392 blk_queue_max_hw_sectors(zv->zv_queue, DMU_MAX_ACCESS / 512);
1393 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1394 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1395 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1396 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1397 #ifdef HAVE_BLK_QUEUE_DISCARD
1398 blk_queue_max_discard_sectors(zv->zv_queue,
1399 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1400 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1401 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1402 #endif
1403 #ifdef HAVE_BLK_QUEUE_NONROT
1404 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1405 #endif
1406
1407 if (spa_writeable(dmu_objset_spa(os))) {
1408 if (zil_replay_disable)
1409 zil_destroy(dmu_objset_zil(os), B_FALSE);
1410 else
1411 zil_replay(os, zv, zvol_replay_vector);
1412 }
1413
1414 zv->zv_objset = NULL;
1415 out_dmu_objset_disown:
1416 dmu_objset_disown(os, zvol_tag);
1417 out_doi:
1418 kmem_free(doi, sizeof (dmu_object_info_t));
1419 out:
1420
1421 if (error == 0) {
1422 zvol_insert(zv);
1423 add_disk(zv->zv_disk);
1424 }
1425
1426 return (SET_ERROR(error));
1427 }
1428
1429 /*
1430 * Create a block device minor node and setup the linkage between it
1431 * and the specified volume. Once this function returns the block
1432 * device is live and ready for use.
1433 */
1434 int
1435 zvol_create_minor(const char *name)
1436 {
1437 int error;
1438
1439 mutex_enter(&zvol_state_lock);
1440 error = __zvol_create_minor(name, B_FALSE);
1441 mutex_exit(&zvol_state_lock);
1442
1443 return (SET_ERROR(error));
1444 }
1445
1446 static int
1447 __zvol_remove_minor(const char *name)
1448 {
1449 zvol_state_t *zv;
1450
1451 ASSERT(MUTEX_HELD(&zvol_state_lock));
1452
1453 zv = zvol_find_by_name(name);
1454 if (zv == NULL)
1455 return (SET_ERROR(ENXIO));
1456
1457 if (zv->zv_open_count > 0)
1458 return (SET_ERROR(EBUSY));
1459
1460 zvol_remove(zv);
1461 zvol_free(zv);
1462
1463 return (0);
1464 }
1465
1466 /*
1467 * Remove a block device minor node for the specified volume.
1468 */
1469 int
1470 zvol_remove_minor(const char *name)
1471 {
1472 int error;
1473
1474 mutex_enter(&zvol_state_lock);
1475 error = __zvol_remove_minor(name);
1476 mutex_exit(&zvol_state_lock);
1477
1478 return (SET_ERROR(error));
1479 }
1480
1481 /*
1482 * Rename a block device minor mode for the specified volume.
1483 */
1484 static void
1485 __zvol_rename_minor(zvol_state_t *zv, const char *newname)
1486 {
1487 int readonly = get_disk_ro(zv->zv_disk);
1488
1489 ASSERT(MUTEX_HELD(&zvol_state_lock));
1490
1491 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1492
1493 /*
1494 * The block device's read-only state is briefly changed causing
1495 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1496 * the name change and fixes the symlinks. This does not change
1497 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1498 * changes. This would normally be done using kobject_uevent() but
1499 * that is a GPL-only symbol which is why we need this workaround.
1500 */
1501 set_disk_ro(zv->zv_disk, !readonly);
1502 set_disk_ro(zv->zv_disk, readonly);
1503 }
1504
1505 static int
1506 zvol_create_minors_cb(const char *dsname, void *arg)
1507 {
1508 (void) zvol_create_minor(dsname);
1509
1510 return (0);
1511 }
1512
1513 /*
1514 * Create minors for specified dataset including children and snapshots.
1515 */
1516 int
1517 zvol_create_minors(const char *name)
1518 {
1519 int error = 0;
1520
1521 if (!zvol_inhibit_dev)
1522 error = dmu_objset_find((char *)name, zvol_create_minors_cb,
1523 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1524
1525 return (SET_ERROR(error));
1526 }
1527
1528 /*
1529 * Remove minors for specified dataset including children and snapshots.
1530 */
1531 void
1532 zvol_remove_minors(const char *name)
1533 {
1534 zvol_state_t *zv, *zv_next;
1535 int namelen = ((name) ? strlen(name) : 0);
1536
1537 if (zvol_inhibit_dev)
1538 return;
1539
1540 mutex_enter(&zvol_state_lock);
1541
1542 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1543 zv_next = list_next(&zvol_state_list, zv);
1544
1545 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1546 (strncmp(zv->zv_name, name, namelen) == 0 &&
1547 zv->zv_name[namelen] == '/')) {
1548 zvol_remove(zv);
1549 zvol_free(zv);
1550 }
1551 }
1552
1553 mutex_exit(&zvol_state_lock);
1554 }
1555
1556 /*
1557 * Rename minors for specified dataset including children and snapshots.
1558 */
1559 void
1560 zvol_rename_minors(const char *oldname, const char *newname)
1561 {
1562 zvol_state_t *zv, *zv_next;
1563 int oldnamelen, newnamelen;
1564 char *name;
1565
1566 if (zvol_inhibit_dev)
1567 return;
1568
1569 oldnamelen = strlen(oldname);
1570 newnamelen = strlen(newname);
1571 name = kmem_alloc(MAXNAMELEN, KM_SLEEP);
1572
1573 mutex_enter(&zvol_state_lock);
1574
1575 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1576 zv_next = list_next(&zvol_state_list, zv);
1577
1578 if (strcmp(zv->zv_name, oldname) == 0) {
1579 __zvol_rename_minor(zv, newname);
1580 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1581 (zv->zv_name[oldnamelen] == '/' ||
1582 zv->zv_name[oldnamelen] == '@')) {
1583 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
1584 zv->zv_name[oldnamelen],
1585 zv->zv_name + oldnamelen + 1);
1586 __zvol_rename_minor(zv, name);
1587 }
1588 }
1589
1590 mutex_exit(&zvol_state_lock);
1591
1592 kmem_free(name, MAXNAMELEN);
1593 }
1594
1595 static int
1596 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1597 uint64_t snapdev = *(uint64_t *) arg;
1598
1599 if (strchr(dsname, '@') == NULL)
1600 return (0);
1601
1602 switch (snapdev) {
1603 case ZFS_SNAPDEV_VISIBLE:
1604 mutex_enter(&zvol_state_lock);
1605 (void) __zvol_create_minor(dsname, B_TRUE);
1606 mutex_exit(&zvol_state_lock);
1607 break;
1608 case ZFS_SNAPDEV_HIDDEN:
1609 (void) zvol_remove_minor(dsname);
1610 break;
1611 }
1612
1613 return (0);
1614 }
1615
1616 int
1617 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1618 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1619 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1620 /* caller should continue to modify snapdev property */
1621 return (-1);
1622 }
1623
1624 int
1625 zvol_init(void)
1626 {
1627 int error;
1628
1629 list_create(&zvol_state_list, sizeof (zvol_state_t),
1630 offsetof(zvol_state_t, zv_next));
1631
1632 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1633
1634 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1635 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1636 if (zvol_taskq == NULL) {
1637 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1638 error = -ENOMEM;
1639 goto out1;
1640 }
1641
1642 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1643 if (error) {
1644 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1645 goto out2;
1646 }
1647
1648 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1649 THIS_MODULE, zvol_probe, NULL, NULL);
1650
1651 return (0);
1652
1653 out2:
1654 taskq_destroy(zvol_taskq);
1655 out1:
1656 mutex_destroy(&zvol_state_lock);
1657 list_destroy(&zvol_state_list);
1658
1659 return (SET_ERROR(error));
1660 }
1661
1662 void
1663 zvol_fini(void)
1664 {
1665 zvol_remove_minors(NULL);
1666 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1667 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1668 taskq_destroy(zvol_taskq);
1669 mutex_destroy(&zvol_state_lock);
1670 list_destroy(&zvol_state_list);
1671 }
1672
1673 module_param(zvol_inhibit_dev, uint, 0644);
1674 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1675
1676 module_param(zvol_major, uint, 0444);
1677 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1678
1679 module_param(zvol_threads, uint, 0444);
1680 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
1681
1682 module_param(zvol_max_discard_blocks, ulong, 0444);
1683 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard");