]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/zvol.c
Remove ZFC_IOC_*_MINOR ioctl()s
[mirror_zfs.git] / module / zfs / zvol.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
38 #include <sys/dbuf.h>
39 #include <sys/dmu_traverse.h>
40 #include <sys/dsl_dataset.h>
41 #include <sys/dsl_prop.h>
42 #include <sys/zap.h>
43 #include <sys/zil_impl.h>
44 #include <sys/zio.h>
45 #include <sys/zfs_rlock.h>
46 #include <sys/zfs_znode.h>
47 #include <sys/zvol.h>
48 #include <linux/blkdev_compat.h>
49
50 unsigned int zvol_inhibit_dev = 0;
51 unsigned int zvol_major = ZVOL_MAJOR;
52 unsigned int zvol_threads = 32;
53 unsigned long zvol_max_discard_blocks = 16384;
54
55 static taskq_t *zvol_taskq;
56 static kmutex_t zvol_state_lock;
57 static list_t zvol_state_list;
58 static char *zvol_tag = "zvol_tag";
59
60 /*
61 * The in-core state of each volume.
62 */
63 typedef struct zvol_state {
64 char zv_name[MAXNAMELEN]; /* name */
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize;/* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
79 } zvol_state_t;
80
81 #define ZVOL_RDONLY 0x1
82
83 /*
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
88 */
89 static int
90 zvol_find_minor(unsigned *minor)
91 {
92 zvol_state_t *zv;
93
94 *minor = 0;
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 break;
100 }
101
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
104 return ENXIO;
105
106 return 0;
107 }
108
109 /*
110 * Find a zvol_state_t given the full major+minor dev_t.
111 */
112 static zvol_state_t *
113 zvol_find_by_dev(dev_t dev)
114 {
115 zvol_state_t *zv;
116
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
121 return zv;
122 }
123
124 return NULL;
125 }
126
127 /*
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 */
130 static zvol_state_t *
131 zvol_find_by_name(const char *name)
132 {
133 zvol_state_t *zv;
134
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
138 if (!strncmp(zv->zv_name, name, MAXNAMELEN))
139 return zv;
140 }
141
142 return NULL;
143 }
144
145
146 /*
147 * Given a path, return TRUE if path is a ZVOL.
148 */
149 boolean_t
150 zvol_is_zvol(const char *device)
151 {
152 struct block_device *bdev;
153 unsigned int major;
154
155 bdev = lookup_bdev(device);
156 if (IS_ERR(bdev))
157 return (B_FALSE);
158
159 major = MAJOR(bdev->bd_dev);
160 bdput(bdev);
161
162 if (major == zvol_major)
163 return (B_TRUE);
164
165 return (B_FALSE);
166 }
167
168 /*
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
170 */
171 void
172 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173 {
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
176 int error;
177 uint64_t volblocksize, volsize;
178
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
184
185 /*
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
188 */
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
195 DMU_OT_NONE, 0, tx);
196 ASSERT(error == 0);
197
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
199 DMU_OT_NONE, 0, tx);
200 ASSERT(error == 0);
201
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
203 ASSERT(error == 0);
204 }
205
206 /*
207 * ZFS_IOC_OBJSET_STATS entry point.
208 */
209 int
210 zvol_get_stats(objset_t *os, nvlist_t *nv)
211 {
212 int error;
213 dmu_object_info_t *doi;
214 uint64_t val;
215
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
217 if (error)
218 return (error);
219
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
223
224 if (error == 0) {
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
227 }
228
229 kmem_free(doi, sizeof(dmu_object_info_t));
230
231 return (error);
232 }
233
234 /*
235 * Sanity check volume size.
236 */
237 int
238 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
239 {
240 if (volsize == 0)
241 return (SET_ERROR(EINVAL));
242
243 if (volsize % blocksize != 0)
244 return (SET_ERROR(EINVAL));
245
246 #ifdef _ILP32
247 if (volsize - 1 > MAXOFFSET_T)
248 return (SET_ERROR(EOVERFLOW));
249 #endif
250 return (0);
251 }
252
253 /*
254 * Ensure the zap is flushed then inform the VFS of the capacity change.
255 */
256 static int
257 zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
258 {
259 struct block_device *bdev;
260 dmu_tx_t *tx;
261 int error;
262
263 ASSERT(MUTEX_HELD(&zvol_state_lock));
264
265 tx = dmu_tx_create(os);
266 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
267 error = dmu_tx_assign(tx, TXG_WAIT);
268 if (error) {
269 dmu_tx_abort(tx);
270 return (error);
271 }
272
273 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
274 &volsize, tx);
275 dmu_tx_commit(tx);
276
277 if (error)
278 return (error);
279
280 error = dmu_free_long_range(os,
281 ZVOL_OBJ, volsize, DMU_OBJECT_END);
282 if (error)
283 return (error);
284
285 bdev = bdget_disk(zv->zv_disk, 0);
286 if (!bdev)
287 return (SET_ERROR(EIO));
288 /*
289 * 2.6.28 API change
290 * Added check_disk_size_change() helper function.
291 */
292 #ifdef HAVE_CHECK_DISK_SIZE_CHANGE
293 set_capacity(zv->zv_disk, volsize >> 9);
294 zv->zv_volsize = volsize;
295 check_disk_size_change(zv->zv_disk, bdev);
296 #else
297 zv->zv_volsize = volsize;
298 zv->zv_changed = 1;
299 (void) check_disk_change(bdev);
300 #endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
301
302 bdput(bdev);
303
304 return (0);
305 }
306
307 /*
308 * Set ZFS_PROP_VOLSIZE set entry point.
309 */
310 int
311 zvol_set_volsize(const char *name, uint64_t volsize)
312 {
313 zvol_state_t *zv;
314 dmu_object_info_t *doi;
315 objset_t *os = NULL;
316 uint64_t readonly;
317 int error;
318
319 error = dsl_prop_get_integer(name,
320 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
321 if (error != 0)
322 return (error);
323 if (readonly)
324 return (SET_ERROR(EROFS));
325
326 mutex_enter(&zvol_state_lock);
327
328 zv = zvol_find_by_name(name);
329 if (zv == NULL) {
330 error = SET_ERROR(ENXIO);
331 goto out;
332 }
333
334 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
335
336 error = dmu_objset_hold(name, FTAG, &os);
337 if (error)
338 goto out_doi;
339
340 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
341 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
342 goto out_doi;
343
344 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
345 if (readonly) {
346 error = SET_ERROR(EROFS);
347 goto out_doi;
348 }
349
350 if (zv->zv_flags & ZVOL_RDONLY) {
351 error = SET_ERROR(EROFS);
352 goto out_doi;
353 }
354
355 error = zvol_update_volsize(zv, volsize, os);
356 out_doi:
357 kmem_free(doi, sizeof(dmu_object_info_t));
358 out:
359 if (os)
360 dmu_objset_rele(os, FTAG);
361
362 mutex_exit(&zvol_state_lock);
363
364 return (error);
365 }
366
367 /*
368 * Sanity check volume block size.
369 */
370 int
371 zvol_check_volblocksize(uint64_t volblocksize)
372 {
373 if (volblocksize < SPA_MINBLOCKSIZE ||
374 volblocksize > SPA_MAXBLOCKSIZE ||
375 !ISP2(volblocksize))
376 return (SET_ERROR(EDOM));
377
378 return (0);
379 }
380
381 /*
382 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
383 */
384 int
385 zvol_set_volblocksize(const char *name, uint64_t volblocksize)
386 {
387 zvol_state_t *zv;
388 dmu_tx_t *tx;
389 int error;
390
391 mutex_enter(&zvol_state_lock);
392
393 zv = zvol_find_by_name(name);
394 if (zv == NULL) {
395 error = SET_ERROR(ENXIO);
396 goto out;
397 }
398
399 if (zv->zv_flags & ZVOL_RDONLY) {
400 error = SET_ERROR(EROFS);
401 goto out;
402 }
403
404 tx = dmu_tx_create(zv->zv_objset);
405 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
406 error = dmu_tx_assign(tx, TXG_WAIT);
407 if (error) {
408 dmu_tx_abort(tx);
409 } else {
410 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
411 volblocksize, 0, tx);
412 if (error == ENOTSUP)
413 error = SET_ERROR(EBUSY);
414 dmu_tx_commit(tx);
415 if (error == 0)
416 zv->zv_volblocksize = volblocksize;
417 }
418 out:
419 mutex_exit(&zvol_state_lock);
420
421 return (error);
422 }
423
424 /*
425 * Replay a TX_WRITE ZIL transaction that didn't get committed
426 * after a system failure
427 */
428 static int
429 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
430 {
431 objset_t *os = zv->zv_objset;
432 char *data = (char *)(lr + 1); /* data follows lr_write_t */
433 uint64_t off = lr->lr_offset;
434 uint64_t len = lr->lr_length;
435 dmu_tx_t *tx;
436 int error;
437
438 if (byteswap)
439 byteswap_uint64_array(lr, sizeof (*lr));
440
441 tx = dmu_tx_create(os);
442 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
443 error = dmu_tx_assign(tx, TXG_WAIT);
444 if (error) {
445 dmu_tx_abort(tx);
446 } else {
447 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
448 dmu_tx_commit(tx);
449 }
450
451 return (error);
452 }
453
454 static int
455 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
456 {
457 return (SET_ERROR(ENOTSUP));
458 }
459
460 /*
461 * Callback vectors for replaying records.
462 * Only TX_WRITE is needed for zvol.
463 */
464 zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
465 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
466 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
467 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
468 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
469 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
470 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
471 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
472 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
473 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
474 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
475 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
476 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
477 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
478 };
479
480 /*
481 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
482 *
483 * We store data in the log buffers if it's small enough.
484 * Otherwise we will later flush the data out via dmu_sync().
485 */
486 ssize_t zvol_immediate_write_sz = 32768;
487
488 static void
489 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
490 uint64_t offset, uint64_t size, int sync)
491 {
492 uint32_t blocksize = zv->zv_volblocksize;
493 zilog_t *zilog = zv->zv_zilog;
494 boolean_t slogging;
495 ssize_t immediate_write_sz;
496
497 if (zil_replaying(zilog, tx))
498 return;
499
500 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
501 ? 0 : zvol_immediate_write_sz;
502 slogging = spa_has_slogs(zilog->zl_spa) &&
503 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
504
505 while (size) {
506 itx_t *itx;
507 lr_write_t *lr;
508 ssize_t len;
509 itx_wr_state_t write_state;
510
511 /*
512 * Unlike zfs_log_write() we can be called with
513 * up to DMU_MAX_ACCESS/2 (5MB) writes.
514 */
515 if (blocksize > immediate_write_sz && !slogging &&
516 size >= blocksize && offset % blocksize == 0) {
517 write_state = WR_INDIRECT; /* uses dmu_sync */
518 len = blocksize;
519 } else if (sync) {
520 write_state = WR_COPIED;
521 len = MIN(ZIL_MAX_LOG_DATA, size);
522 } else {
523 write_state = WR_NEED_COPY;
524 len = MIN(ZIL_MAX_LOG_DATA, size);
525 }
526
527 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
528 (write_state == WR_COPIED ? len : 0));
529 lr = (lr_write_t *)&itx->itx_lr;
530 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
531 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
532 zil_itx_destroy(itx);
533 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
534 lr = (lr_write_t *)&itx->itx_lr;
535 write_state = WR_NEED_COPY;
536 }
537
538 itx->itx_wr_state = write_state;
539 if (write_state == WR_NEED_COPY)
540 itx->itx_sod += len;
541 lr->lr_foid = ZVOL_OBJ;
542 lr->lr_offset = offset;
543 lr->lr_length = len;
544 lr->lr_blkoff = 0;
545 BP_ZERO(&lr->lr_blkptr);
546
547 itx->itx_private = zv;
548 itx->itx_sync = sync;
549
550 (void) zil_itx_assign(zilog, itx, tx);
551
552 offset += len;
553 size -= len;
554 }
555 }
556
557 /*
558 * Common write path running under the zvol taskq context. This function
559 * is responsible for copying the request structure data in to the DMU and
560 * signaling the request queue with the result of the copy.
561 */
562 static void
563 zvol_write(void *arg)
564 {
565 struct request *req = (struct request *)arg;
566 struct request_queue *q = req->q;
567 zvol_state_t *zv = q->queuedata;
568 uint64_t offset = blk_rq_pos(req) << 9;
569 uint64_t size = blk_rq_bytes(req);
570 int error = 0;
571 dmu_tx_t *tx;
572 rl_t *rl;
573
574 /*
575 * Annotate this call path with a flag that indicates that it is
576 * unsafe to use KM_SLEEP during memory allocations due to the
577 * potential for a deadlock. KM_PUSHPAGE should be used instead.
578 */
579 ASSERT(!(current->flags & PF_NOFS));
580 current->flags |= PF_NOFS;
581
582 if (req->cmd_flags & VDEV_REQ_FLUSH)
583 zil_commit(zv->zv_zilog, ZVOL_OBJ);
584
585 /*
586 * Some requests are just for flush and nothing else.
587 */
588 if (size == 0) {
589 blk_end_request(req, 0, size);
590 goto out;
591 }
592
593 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
594
595 tx = dmu_tx_create(zv->zv_objset);
596 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
597
598 /* This will only fail for ENOSPC */
599 error = dmu_tx_assign(tx, TXG_WAIT);
600 if (error) {
601 dmu_tx_abort(tx);
602 zfs_range_unlock(rl);
603 blk_end_request(req, -error, size);
604 goto out;
605 }
606
607 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
608 if (error == 0)
609 zvol_log_write(zv, tx, offset, size,
610 req->cmd_flags & VDEV_REQ_FUA);
611
612 dmu_tx_commit(tx);
613 zfs_range_unlock(rl);
614
615 if ((req->cmd_flags & VDEV_REQ_FUA) ||
616 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
617 zil_commit(zv->zv_zilog, ZVOL_OBJ);
618
619 blk_end_request(req, -error, size);
620 out:
621 current->flags &= ~PF_NOFS;
622 }
623
624 #ifdef HAVE_BLK_QUEUE_DISCARD
625 static void
626 zvol_discard(void *arg)
627 {
628 struct request *req = (struct request *)arg;
629 struct request_queue *q = req->q;
630 zvol_state_t *zv = q->queuedata;
631 uint64_t start = blk_rq_pos(req) << 9;
632 uint64_t end = start + blk_rq_bytes(req);
633 int error;
634 rl_t *rl;
635
636 /*
637 * Annotate this call path with a flag that indicates that it is
638 * unsafe to use KM_SLEEP during memory allocations due to the
639 * potential for a deadlock. KM_PUSHPAGE should be used instead.
640 */
641 ASSERT(!(current->flags & PF_NOFS));
642 current->flags |= PF_NOFS;
643
644 if (end > zv->zv_volsize) {
645 blk_end_request(req, -EIO, blk_rq_bytes(req));
646 goto out;
647 }
648
649 /*
650 * Align the request to volume block boundaries. If we don't,
651 * then this will force dnode_free_range() to zero out the
652 * unaligned parts, which is slow (read-modify-write) and
653 * useless since we are not freeing any space by doing so.
654 */
655 start = P2ROUNDUP(start, zv->zv_volblocksize);
656 end = P2ALIGN(end, zv->zv_volblocksize);
657
658 if (start >= end) {
659 blk_end_request(req, 0, blk_rq_bytes(req));
660 goto out;
661 }
662
663 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
664
665 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
666
667 /*
668 * TODO: maybe we should add the operation to the log.
669 */
670
671 zfs_range_unlock(rl);
672
673 blk_end_request(req, -error, blk_rq_bytes(req));
674 out:
675 current->flags &= ~PF_NOFS;
676 }
677 #endif /* HAVE_BLK_QUEUE_DISCARD */
678
679 /*
680 * Common read path running under the zvol taskq context. This function
681 * is responsible for copying the requested data out of the DMU and in to
682 * a linux request structure. It then must signal the request queue with
683 * an error code describing the result of the copy.
684 */
685 static void
686 zvol_read(void *arg)
687 {
688 struct request *req = (struct request *)arg;
689 struct request_queue *q = req->q;
690 zvol_state_t *zv = q->queuedata;
691 uint64_t offset = blk_rq_pos(req) << 9;
692 uint64_t size = blk_rq_bytes(req);
693 int error;
694 rl_t *rl;
695
696 if (size == 0) {
697 blk_end_request(req, 0, size);
698 return;
699 }
700
701 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
702
703 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
704
705 zfs_range_unlock(rl);
706
707 /* convert checksum errors into IO errors */
708 if (error == ECKSUM)
709 error = SET_ERROR(EIO);
710
711 blk_end_request(req, -error, size);
712 }
713
714 /*
715 * Request will be added back to the request queue and retried if
716 * it cannot be immediately dispatched to the taskq for handling
717 */
718 static inline void
719 zvol_dispatch(task_func_t func, struct request *req)
720 {
721 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
722 blk_requeue_request(req->q, req);
723 }
724
725 /*
726 * Common request path. Rather than registering a custom make_request()
727 * function we use the generic Linux version. This is done because it allows
728 * us to easily merge read requests which would otherwise we performed
729 * synchronously by the DMU. This is less critical in write case where the
730 * DMU will perform the correct merging within a transaction group. Using
731 * the generic make_request() also let's use leverage the fact that the
732 * elevator with ensure correct ordering in regards to barrior IOs. On
733 * the downside it means that in the write case we end up doing request
734 * merging twice once in the elevator and once in the DMU.
735 *
736 * The request handler is called under a spin lock so all the real work
737 * is handed off to be done in the context of the zvol taskq. This function
738 * simply performs basic request sanity checking and hands off the request.
739 */
740 static void
741 zvol_request(struct request_queue *q)
742 {
743 zvol_state_t *zv = q->queuedata;
744 struct request *req;
745 unsigned int size;
746
747 while ((req = blk_fetch_request(q)) != NULL) {
748 size = blk_rq_bytes(req);
749
750 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
751 get_capacity(zv->zv_disk)) {
752 printk(KERN_INFO
753 "%s: bad access: block=%llu, count=%lu\n",
754 req->rq_disk->disk_name,
755 (long long unsigned)blk_rq_pos(req),
756 (long unsigned)blk_rq_sectors(req));
757 __blk_end_request(req, -EIO, size);
758 continue;
759 }
760
761 if (!blk_fs_request(req)) {
762 printk(KERN_INFO "%s: non-fs cmd\n",
763 req->rq_disk->disk_name);
764 __blk_end_request(req, -EIO, size);
765 continue;
766 }
767
768 switch (rq_data_dir(req)) {
769 case READ:
770 zvol_dispatch(zvol_read, req);
771 break;
772 case WRITE:
773 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
774 __blk_end_request(req, -EROFS, size);
775 break;
776 }
777
778 #ifdef HAVE_BLK_QUEUE_DISCARD
779 if (req->cmd_flags & VDEV_REQ_DISCARD) {
780 zvol_dispatch(zvol_discard, req);
781 break;
782 }
783 #endif /* HAVE_BLK_QUEUE_DISCARD */
784
785 zvol_dispatch(zvol_write, req);
786 break;
787 default:
788 printk(KERN_INFO "%s: unknown cmd: %d\n",
789 req->rq_disk->disk_name, (int)rq_data_dir(req));
790 __blk_end_request(req, -EIO, size);
791 break;
792 }
793 }
794 }
795
796 static void
797 zvol_get_done(zgd_t *zgd, int error)
798 {
799 if (zgd->zgd_db)
800 dmu_buf_rele(zgd->zgd_db, zgd);
801
802 zfs_range_unlock(zgd->zgd_rl);
803
804 if (error == 0 && zgd->zgd_bp)
805 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
806
807 kmem_free(zgd, sizeof (zgd_t));
808 }
809
810 /*
811 * Get data to generate a TX_WRITE intent log record.
812 */
813 static int
814 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
815 {
816 zvol_state_t *zv = arg;
817 objset_t *os = zv->zv_objset;
818 uint64_t object = ZVOL_OBJ;
819 uint64_t offset = lr->lr_offset;
820 uint64_t size = lr->lr_length;
821 blkptr_t *bp = &lr->lr_blkptr;
822 dmu_buf_t *db;
823 zgd_t *zgd;
824 int error;
825
826 ASSERT(zio != NULL);
827 ASSERT(size != 0);
828
829 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
830 zgd->zgd_zilog = zv->zv_zilog;
831 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
832
833 /*
834 * Write records come in two flavors: immediate and indirect.
835 * For small writes it's cheaper to store the data with the
836 * log record (immediate); for large writes it's cheaper to
837 * sync the data and get a pointer to it (indirect) so that
838 * we don't have to write the data twice.
839 */
840 if (buf != NULL) { /* immediate write */
841 error = dmu_read(os, object, offset, size, buf,
842 DMU_READ_NO_PREFETCH);
843 } else {
844 size = zv->zv_volblocksize;
845 offset = P2ALIGN_TYPED(offset, size, uint64_t);
846 error = dmu_buf_hold(os, object, offset, zgd, &db,
847 DMU_READ_NO_PREFETCH);
848 if (error == 0) {
849 blkptr_t *obp = dmu_buf_get_blkptr(db);
850 if (obp) {
851 ASSERT(BP_IS_HOLE(bp));
852 *bp = *obp;
853 }
854
855 zgd->zgd_db = db;
856 zgd->zgd_bp = &lr->lr_blkptr;
857
858 ASSERT(db != NULL);
859 ASSERT(db->db_offset == offset);
860 ASSERT(db->db_size == size);
861
862 error = dmu_sync(zio, lr->lr_common.lrc_txg,
863 zvol_get_done, zgd);
864
865 if (error == 0)
866 return (0);
867 }
868 }
869
870 zvol_get_done(zgd, error);
871
872 return (error);
873 }
874
875 /*
876 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
877 */
878 static void
879 zvol_insert(zvol_state_t *zv_insert)
880 {
881 zvol_state_t *zv = NULL;
882
883 ASSERT(MUTEX_HELD(&zvol_state_lock));
884 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
885 for (zv = list_head(&zvol_state_list); zv != NULL;
886 zv = list_next(&zvol_state_list, zv)) {
887 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
888 break;
889 }
890
891 list_insert_before(&zvol_state_list, zv, zv_insert);
892 }
893
894 /*
895 * Simply remove the zvol from to list of zvols.
896 */
897 static void
898 zvol_remove(zvol_state_t *zv_remove)
899 {
900 ASSERT(MUTEX_HELD(&zvol_state_lock));
901 list_remove(&zvol_state_list, zv_remove);
902 }
903
904 static int
905 zvol_first_open(zvol_state_t *zv)
906 {
907 objset_t *os;
908 uint64_t volsize;
909 int locked = 0;
910 int error;
911 uint64_t ro;
912
913 /*
914 * In all other cases the spa_namespace_lock is taken before the
915 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
916 * function calls fops->open() with the bdev->bd_mutex lock held.
917 *
918 * To avoid a potential lock inversion deadlock we preemptively
919 * try to take the spa_namespace_lock(). Normally it will not
920 * be contended and this is safe because spa_open_common() handles
921 * the case where the caller already holds the spa_namespace_lock.
922 *
923 * When it is contended we risk a lock inversion if we were to
924 * block waiting for the lock. Luckily, the __blkdev_get()
925 * function allows us to return -ERESTARTSYS which will result in
926 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
927 * called again. This process can be repeated safely until both
928 * locks are acquired.
929 */
930 if (!mutex_owned(&spa_namespace_lock)) {
931 locked = mutex_tryenter(&spa_namespace_lock);
932 if (!locked)
933 return (-SET_ERROR(ERESTARTSYS));
934 }
935
936 /* lie and say we're read-only */
937 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
938 if (error)
939 goto out_mutex;
940
941 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
942 if (error) {
943 dmu_objset_disown(os, zvol_tag);
944 goto out_mutex;
945 }
946
947 zv->zv_objset = os;
948 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
949 if (error) {
950 dmu_objset_disown(os, zvol_tag);
951 goto out_mutex;
952 }
953
954 set_capacity(zv->zv_disk, volsize >> 9);
955 zv->zv_volsize = volsize;
956 zv->zv_zilog = zil_open(os, zvol_get_data);
957
958 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
959 if (ro || dmu_objset_is_snapshot(os) ||
960 !spa_writeable(dmu_objset_spa(os))) {
961 set_disk_ro(zv->zv_disk, 1);
962 zv->zv_flags |= ZVOL_RDONLY;
963 } else {
964 set_disk_ro(zv->zv_disk, 0);
965 zv->zv_flags &= ~ZVOL_RDONLY;
966 }
967
968 out_mutex:
969 if (locked)
970 mutex_exit(&spa_namespace_lock);
971
972 return (-error);
973 }
974
975 static void
976 zvol_last_close(zvol_state_t *zv)
977 {
978 zil_close(zv->zv_zilog);
979 zv->zv_zilog = NULL;
980
981 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
982 zv->zv_dbuf = NULL;
983
984 /*
985 * Evict cached data
986 */
987 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
988 !(zv->zv_flags & ZVOL_RDONLY))
989 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
990 (void) dmu_objset_evict_dbufs(zv->zv_objset);
991
992 dmu_objset_disown(zv->zv_objset, zvol_tag);
993 zv->zv_objset = NULL;
994 }
995
996 static int
997 zvol_open(struct block_device *bdev, fmode_t flag)
998 {
999 zvol_state_t *zv = bdev->bd_disk->private_data;
1000 int error = 0, drop_mutex = 0;
1001
1002 /*
1003 * If the caller is already holding the mutex do not take it
1004 * again, this will happen as part of zvol_create_minor().
1005 * Once add_disk() is called the device is live and the kernel
1006 * will attempt to open it to read the partition information.
1007 */
1008 if (!mutex_owned(&zvol_state_lock)) {
1009 mutex_enter(&zvol_state_lock);
1010 drop_mutex = 1;
1011 }
1012
1013 ASSERT3P(zv, !=, NULL);
1014
1015 if (zv->zv_open_count == 0) {
1016 error = zvol_first_open(zv);
1017 if (error)
1018 goto out_mutex;
1019 }
1020
1021 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
1022 error = -EROFS;
1023 goto out_open_count;
1024 }
1025
1026 zv->zv_open_count++;
1027
1028 out_open_count:
1029 if (zv->zv_open_count == 0)
1030 zvol_last_close(zv);
1031
1032 out_mutex:
1033 if (drop_mutex)
1034 mutex_exit(&zvol_state_lock);
1035
1036 check_disk_change(bdev);
1037
1038 return (error);
1039 }
1040
1041 #ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1042 static void
1043 #else
1044 static int
1045 #endif
1046 zvol_release(struct gendisk *disk, fmode_t mode)
1047 {
1048 zvol_state_t *zv = disk->private_data;
1049 int drop_mutex = 0;
1050
1051 if (!mutex_owned(&zvol_state_lock)) {
1052 mutex_enter(&zvol_state_lock);
1053 drop_mutex = 1;
1054 }
1055
1056 ASSERT3P(zv, !=, NULL);
1057 ASSERT3U(zv->zv_open_count, >, 0);
1058 zv->zv_open_count--;
1059 if (zv->zv_open_count == 0)
1060 zvol_last_close(zv);
1061
1062 if (drop_mutex)
1063 mutex_exit(&zvol_state_lock);
1064
1065 #ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1066 return (0);
1067 #endif
1068 }
1069
1070 static int
1071 zvol_ioctl(struct block_device *bdev, fmode_t mode,
1072 unsigned int cmd, unsigned long arg)
1073 {
1074 zvol_state_t *zv = bdev->bd_disk->private_data;
1075 int error = 0;
1076
1077 if (zv == NULL)
1078 return (-SET_ERROR(ENXIO));
1079
1080 switch (cmd) {
1081 case BLKFLSBUF:
1082 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1083 break;
1084 case BLKZNAME:
1085 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1086 break;
1087
1088 default:
1089 error = -ENOTTY;
1090 break;
1091
1092 }
1093
1094 return (error);
1095 }
1096
1097 #ifdef CONFIG_COMPAT
1098 static int
1099 zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1100 unsigned cmd, unsigned long arg)
1101 {
1102 return zvol_ioctl(bdev, mode, cmd, arg);
1103 }
1104 #else
1105 #define zvol_compat_ioctl NULL
1106 #endif
1107
1108 static int zvol_media_changed(struct gendisk *disk)
1109 {
1110 zvol_state_t *zv = disk->private_data;
1111
1112 return zv->zv_changed;
1113 }
1114
1115 static int zvol_revalidate_disk(struct gendisk *disk)
1116 {
1117 zvol_state_t *zv = disk->private_data;
1118
1119 zv->zv_changed = 0;
1120 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1121
1122 return 0;
1123 }
1124
1125 /*
1126 * Provide a simple virtual geometry for legacy compatibility. For devices
1127 * smaller than 1 MiB a small head and sector count is used to allow very
1128 * tiny devices. For devices over 1 Mib a standard head and sector count
1129 * is used to keep the cylinders count reasonable.
1130 */
1131 static int
1132 zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1133 {
1134 zvol_state_t *zv = bdev->bd_disk->private_data;
1135 sector_t sectors = get_capacity(zv->zv_disk);
1136
1137 if (sectors > 2048) {
1138 geo->heads = 16;
1139 geo->sectors = 63;
1140 } else {
1141 geo->heads = 2;
1142 geo->sectors = 4;
1143 }
1144
1145 geo->start = 0;
1146 geo->cylinders = sectors / (geo->heads * geo->sectors);
1147
1148 return 0;
1149 }
1150
1151 static struct kobject *
1152 zvol_probe(dev_t dev, int *part, void *arg)
1153 {
1154 zvol_state_t *zv;
1155 struct kobject *kobj;
1156
1157 mutex_enter(&zvol_state_lock);
1158 zv = zvol_find_by_dev(dev);
1159 kobj = zv ? get_disk(zv->zv_disk) : NULL;
1160 mutex_exit(&zvol_state_lock);
1161
1162 return kobj;
1163 }
1164
1165 #ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1166 static struct block_device_operations zvol_ops = {
1167 .open = zvol_open,
1168 .release = zvol_release,
1169 .ioctl = zvol_ioctl,
1170 .compat_ioctl = zvol_compat_ioctl,
1171 .media_changed = zvol_media_changed,
1172 .revalidate_disk = zvol_revalidate_disk,
1173 .getgeo = zvol_getgeo,
1174 .owner = THIS_MODULE,
1175 };
1176
1177 #else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1178
1179 static int
1180 zvol_open_by_inode(struct inode *inode, struct file *file)
1181 {
1182 return zvol_open(inode->i_bdev, file->f_mode);
1183 }
1184
1185 static int
1186 zvol_release_by_inode(struct inode *inode, struct file *file)
1187 {
1188 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1189 }
1190
1191 static int
1192 zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1193 unsigned int cmd, unsigned long arg)
1194 {
1195 if (file == NULL || inode == NULL)
1196 return -EINVAL;
1197 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1198 }
1199
1200 # ifdef CONFIG_COMPAT
1201 static long
1202 zvol_compat_ioctl_by_inode(struct file *file,
1203 unsigned int cmd, unsigned long arg)
1204 {
1205 if (file == NULL)
1206 return -EINVAL;
1207 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1208 file->f_mode, cmd, arg);
1209 }
1210 # else
1211 # define zvol_compat_ioctl_by_inode NULL
1212 # endif
1213
1214 static struct block_device_operations zvol_ops = {
1215 .open = zvol_open_by_inode,
1216 .release = zvol_release_by_inode,
1217 .ioctl = zvol_ioctl_by_inode,
1218 .compat_ioctl = zvol_compat_ioctl_by_inode,
1219 .media_changed = zvol_media_changed,
1220 .revalidate_disk = zvol_revalidate_disk,
1221 .getgeo = zvol_getgeo,
1222 .owner = THIS_MODULE,
1223 };
1224 #endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1225
1226 /*
1227 * Allocate memory for a new zvol_state_t and setup the required
1228 * request queue and generic disk structures for the block device.
1229 */
1230 static zvol_state_t *
1231 zvol_alloc(dev_t dev, const char *name)
1232 {
1233 zvol_state_t *zv;
1234 int error = 0;
1235
1236 zv = kmem_zalloc(sizeof (zvol_state_t), KM_PUSHPAGE);
1237
1238 spin_lock_init(&zv->zv_lock);
1239 list_link_init(&zv->zv_next);
1240
1241 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1242 if (zv->zv_queue == NULL)
1243 goto out_kmem;
1244
1245 #ifdef HAVE_ELEVATOR_CHANGE
1246 error = elevator_change(zv->zv_queue, "noop");
1247 #endif /* HAVE_ELEVATOR_CHANGE */
1248 if (error) {
1249 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1250 "noop", name, error);
1251 goto out_queue;
1252 }
1253
1254 #ifdef HAVE_BLK_QUEUE_FLUSH
1255 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1256 #else
1257 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1258 #endif /* HAVE_BLK_QUEUE_FLUSH */
1259
1260 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1261 if (zv->zv_disk == NULL)
1262 goto out_queue;
1263
1264 zv->zv_queue->queuedata = zv;
1265 zv->zv_dev = dev;
1266 zv->zv_open_count = 0;
1267 strlcpy(zv->zv_name, name, MAXNAMELEN);
1268
1269 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1270 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1271 sizeof (rl_t), offsetof(rl_t, r_node));
1272 zv->zv_znode.z_is_zvol = TRUE;
1273
1274 zv->zv_disk->major = zvol_major;
1275 zv->zv_disk->first_minor = (dev & MINORMASK);
1276 zv->zv_disk->fops = &zvol_ops;
1277 zv->zv_disk->private_data = zv;
1278 zv->zv_disk->queue = zv->zv_queue;
1279 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1280 ZVOL_DEV_NAME, (dev & MINORMASK));
1281
1282 return zv;
1283
1284 out_queue:
1285 blk_cleanup_queue(zv->zv_queue);
1286 out_kmem:
1287 kmem_free(zv, sizeof (zvol_state_t));
1288
1289 return NULL;
1290 }
1291
1292 /*
1293 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1294 */
1295 static void
1296 zvol_free(zvol_state_t *zv)
1297 {
1298 avl_destroy(&zv->zv_znode.z_range_avl);
1299 mutex_destroy(&zv->zv_znode.z_range_lock);
1300
1301 del_gendisk(zv->zv_disk);
1302 blk_cleanup_queue(zv->zv_queue);
1303 put_disk(zv->zv_disk);
1304
1305 kmem_free(zv, sizeof (zvol_state_t));
1306 }
1307
1308 static int
1309 __zvol_snapdev_hidden(const char *name)
1310 {
1311 uint64_t snapdev;
1312 char *parent;
1313 char *atp;
1314 int error = 0;
1315
1316 parent = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
1317 (void) strlcpy(parent, name, MAXPATHLEN);
1318
1319 if ((atp = strrchr(parent, '@')) != NULL) {
1320 *atp = '\0';
1321 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1322 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
1323 error = SET_ERROR(ENODEV);
1324 }
1325 kmem_free(parent, MAXPATHLEN);
1326 return (error);
1327 }
1328
1329 static int
1330 __zvol_create_minor(const char *name, boolean_t ignore_snapdev)
1331 {
1332 zvol_state_t *zv;
1333 objset_t *os;
1334 dmu_object_info_t *doi;
1335 uint64_t volsize;
1336 unsigned minor = 0;
1337 int error = 0;
1338
1339 ASSERT(MUTEX_HELD(&zvol_state_lock));
1340
1341 zv = zvol_find_by_name(name);
1342 if (zv) {
1343 error = SET_ERROR(EEXIST);
1344 goto out;
1345 }
1346
1347 if (ignore_snapdev == B_FALSE) {
1348 error = __zvol_snapdev_hidden(name);
1349 if (error)
1350 goto out;
1351 }
1352
1353 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_PUSHPAGE);
1354
1355 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1356 if (error)
1357 goto out_doi;
1358
1359 error = dmu_object_info(os, ZVOL_OBJ, doi);
1360 if (error)
1361 goto out_dmu_objset_disown;
1362
1363 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1364 if (error)
1365 goto out_dmu_objset_disown;
1366
1367 error = zvol_find_minor(&minor);
1368 if (error)
1369 goto out_dmu_objset_disown;
1370
1371 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1372 if (zv == NULL) {
1373 error = SET_ERROR(EAGAIN);
1374 goto out_dmu_objset_disown;
1375 }
1376
1377 if (dmu_objset_is_snapshot(os))
1378 zv->zv_flags |= ZVOL_RDONLY;
1379
1380 zv->zv_volblocksize = doi->doi_data_block_size;
1381 zv->zv_volsize = volsize;
1382 zv->zv_objset = os;
1383
1384 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1385
1386 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1387 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1388 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1389 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1390 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
1391 #ifdef HAVE_BLK_QUEUE_DISCARD
1392 blk_queue_max_discard_sectors(zv->zv_queue,
1393 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
1394 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
1395 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1396 #endif
1397 #ifdef HAVE_BLK_QUEUE_NONROT
1398 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1399 #endif
1400
1401 if (spa_writeable(dmu_objset_spa(os))) {
1402 if (zil_replay_disable)
1403 zil_destroy(dmu_objset_zil(os), B_FALSE);
1404 else
1405 zil_replay(os, zv, zvol_replay_vector);
1406 }
1407
1408 zv->zv_objset = NULL;
1409 out_dmu_objset_disown:
1410 dmu_objset_disown(os, zvol_tag);
1411 out_doi:
1412 kmem_free(doi, sizeof(dmu_object_info_t));
1413 out:
1414
1415 if (error == 0) {
1416 zvol_insert(zv);
1417 add_disk(zv->zv_disk);
1418 }
1419
1420 return (error);
1421 }
1422
1423 /*
1424 * Create a block device minor node and setup the linkage between it
1425 * and the specified volume. Once this function returns the block
1426 * device is live and ready for use.
1427 */
1428 int
1429 zvol_create_minor(const char *name)
1430 {
1431 int error;
1432
1433 mutex_enter(&zvol_state_lock);
1434 error = __zvol_create_minor(name, B_FALSE);
1435 mutex_exit(&zvol_state_lock);
1436
1437 return (error);
1438 }
1439
1440 static int
1441 __zvol_remove_minor(const char *name)
1442 {
1443 zvol_state_t *zv;
1444
1445 ASSERT(MUTEX_HELD(&zvol_state_lock));
1446
1447 zv = zvol_find_by_name(name);
1448 if (zv == NULL)
1449 return (SET_ERROR(ENXIO));
1450
1451 if (zv->zv_open_count > 0)
1452 return (SET_ERROR(EBUSY));
1453
1454 zvol_remove(zv);
1455 zvol_free(zv);
1456
1457 return (0);
1458 }
1459
1460 /*
1461 * Remove a block device minor node for the specified volume.
1462 */
1463 int
1464 zvol_remove_minor(const char *name)
1465 {
1466 int error;
1467
1468 mutex_enter(&zvol_state_lock);
1469 error = __zvol_remove_minor(name);
1470 mutex_exit(&zvol_state_lock);
1471
1472 return (error);
1473 }
1474
1475 /*
1476 * Rename a block device minor mode for the specified volume.
1477 */
1478 static void
1479 __zvol_rename_minor(zvol_state_t *zv, const char *newname)
1480 {
1481 int readonly = get_disk_ro(zv->zv_disk);
1482
1483 ASSERT(MUTEX_HELD(&zvol_state_lock));
1484
1485 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1486
1487 /*
1488 * The block device's read-only state is briefly changed causing
1489 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1490 * the name change and fixes the symlinks. This does not change
1491 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1492 * changes. This would normally be done using kobject_uevent() but
1493 * that is a GPL-only symbol which is why we need this workaround.
1494 */
1495 set_disk_ro(zv->zv_disk, !readonly);
1496 set_disk_ro(zv->zv_disk, readonly);
1497 }
1498
1499 static int
1500 zvol_create_minors_cb(const char *dsname, void *arg)
1501 {
1502 (void) zvol_create_minor(dsname);
1503
1504 return (0);
1505 }
1506
1507 /*
1508 * Create minors for specified dataset including children and snapshots.
1509 */
1510 int
1511 zvol_create_minors(const char *name)
1512 {
1513 int error = 0;
1514
1515 if (!zvol_inhibit_dev)
1516 error = dmu_objset_find((char *)name, zvol_create_minors_cb,
1517 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1518
1519 return (SET_ERROR(error));
1520 }
1521
1522 /*
1523 * Remove minors for specified dataset including children and snapshots.
1524 */
1525 void
1526 zvol_remove_minors(const char *name)
1527 {
1528 zvol_state_t *zv, *zv_next;
1529 int namelen = ((name) ? strlen(name) : 0);
1530
1531 if (zvol_inhibit_dev)
1532 return;
1533
1534 mutex_enter(&zvol_state_lock);
1535
1536 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1537 zv_next = list_next(&zvol_state_list, zv);
1538
1539 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1540 (strncmp(zv->zv_name, name, namelen) == 0 &&
1541 zv->zv_name[namelen] == '/')) {
1542 zvol_remove(zv);
1543 zvol_free(zv);
1544 }
1545 }
1546
1547 mutex_exit(&zvol_state_lock);
1548 }
1549
1550 /*
1551 * Rename minors for specified dataset including children and snapshots.
1552 */
1553 void
1554 zvol_rename_minors(const char *oldname, const char *newname)
1555 {
1556 zvol_state_t *zv, *zv_next;
1557 int oldnamelen, newnamelen;
1558 char *name;
1559
1560 if (zvol_inhibit_dev)
1561 return;
1562
1563 oldnamelen = strlen(oldname);
1564 newnamelen = strlen(newname);
1565 name = kmem_alloc(MAXNAMELEN, KM_PUSHPAGE);
1566
1567 mutex_enter(&zvol_state_lock);
1568
1569 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1570 zv_next = list_next(&zvol_state_list, zv);
1571
1572 if (strcmp(zv->zv_name, oldname) == 0) {
1573 __zvol_rename_minor(zv, newname);
1574 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1575 (zv->zv_name[oldnamelen] == '/' ||
1576 zv->zv_name[oldnamelen] == '@')) {
1577 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
1578 zv->zv_name[oldnamelen],
1579 zv->zv_name + oldnamelen + 1);
1580 __zvol_rename_minor(zv, name);
1581 }
1582 }
1583
1584 mutex_exit(&zvol_state_lock);
1585
1586 kmem_free(name, MAXNAMELEN);
1587 }
1588
1589 static int
1590 snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1591 uint64_t snapdev = *(uint64_t *) arg;
1592
1593 if (strchr(dsname, '@') == NULL)
1594 return (0);
1595
1596 switch (snapdev) {
1597 case ZFS_SNAPDEV_VISIBLE:
1598 mutex_enter(&zvol_state_lock);
1599 (void) __zvol_create_minor(dsname, B_TRUE);
1600 mutex_exit(&zvol_state_lock);
1601 break;
1602 case ZFS_SNAPDEV_HIDDEN:
1603 (void) zvol_remove_minor(dsname);
1604 break;
1605 }
1606
1607 return (0);
1608 }
1609
1610 int
1611 zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1612 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1613 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1614 /* caller should continue to modify snapdev property */
1615 return (-1);
1616 }
1617
1618
1619 int
1620 zvol_init(void)
1621 {
1622 int error;
1623
1624 list_create(&zvol_state_list, sizeof (zvol_state_t),
1625 offsetof(zvol_state_t, zv_next));
1626 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1627
1628 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
1629 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
1630 if (zvol_taskq == NULL) {
1631 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1632 error = -ENOMEM;
1633 goto out1;
1634 }
1635
1636 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1637 if (error) {
1638 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1639 goto out2;
1640 }
1641
1642 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1643 THIS_MODULE, zvol_probe, NULL, NULL);
1644
1645 return (0);
1646
1647 out2:
1648 taskq_destroy(zvol_taskq);
1649 out1:
1650 mutex_destroy(&zvol_state_lock);
1651 list_destroy(&zvol_state_list);
1652
1653 return (error);
1654 }
1655
1656 void
1657 zvol_fini(void)
1658 {
1659 zvol_remove_minors(NULL);
1660 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1661 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1662 taskq_destroy(zvol_taskq);
1663 mutex_destroy(&zvol_state_lock);
1664 list_destroy(&zvol_state_list);
1665 }
1666
1667 module_param(zvol_inhibit_dev, uint, 0644);
1668 MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1669
1670 module_param(zvol_major, uint, 0444);
1671 MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1672
1673 module_param(zvol_threads, uint, 0444);
1674 MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
1675
1676 module_param(zvol_max_discard_blocks, ulong, 0444);
1677 MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");