]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zvol.c
Set default zvol elevator to noop
[mirror_zfs.git] / module / zfs / zvol.c
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
38#include <sys/dmu_traverse.h>
39#include <sys/dsl_dataset.h>
40#include <sys/dsl_prop.h>
41#include <sys/zap.h>
42#include <sys/zil_impl.h>
43#include <sys/zio.h>
44#include <sys/zfs_rlock.h>
45#include <sys/zfs_znode.h>
46#include <sys/zvol.h>
61e90960 47#include <linux/blkdev_compat.h>
60101509 48
74497b7a 49unsigned int zvol_inhibit_dev = 0;
60101509 50unsigned int zvol_major = ZVOL_MAJOR;
dde9380a 51unsigned int zvol_threads = 32;
7c0e5708 52unsigned long zvol_max_discard_blocks = 16384;
60101509
BB
53
54static taskq_t *zvol_taskq;
55static kmutex_t zvol_state_lock;
56static list_t zvol_state_list;
57static char *zvol_tag = "zvol_tag";
58
59/*
60 * The in-core state of each volume.
61 */
62typedef struct zvol_state {
4c0d8e50 63 char zv_name[MAXNAMELEN]; /* name */
60101509
BB
64 uint64_t zv_volsize; /* advertised space */
65 uint64_t zv_volblocksize;/* volume block size */
66 objset_t *zv_objset; /* objset handle */
67 uint32_t zv_flags; /* ZVOL_* flags */
68 uint32_t zv_open_count; /* open counts */
69 uint32_t zv_changed; /* disk changed */
70 zilog_t *zv_zilog; /* ZIL handle */
71 znode_t zv_znode; /* for range locking */
72 dmu_buf_t *zv_dbuf; /* bonus handle */
73 dev_t zv_dev; /* device id */
74 struct gendisk *zv_disk; /* generic disk */
75 struct request_queue *zv_queue; /* request queue */
76 spinlock_t zv_lock; /* request queue lock */
77 list_node_t zv_next; /* next zvol_state_t linkage */
78} zvol_state_t;
79
80#define ZVOL_RDONLY 0x1
81
82/*
83 * Find the next available range of ZVOL_MINORS minor numbers. The
84 * zvol_state_list is kept in ascending minor order so we simply need
85 * to scan the list for the first gap in the sequence. This allows us
86 * to recycle minor number as devices are created and removed.
87 */
88static int
89zvol_find_minor(unsigned *minor)
90{
91 zvol_state_t *zv;
92
93 *minor = 0;
94 ASSERT(MUTEX_HELD(&zvol_state_lock));
95 for (zv = list_head(&zvol_state_list); zv != NULL;
96 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
97 if (MINOR(zv->zv_dev) != MINOR(*minor))
98 break;
99 }
100
101 /* All minors are in use */
102 if (*minor >= (1 << MINORBITS))
103 return ENXIO;
104
105 return 0;
106}
107
108/*
109 * Find a zvol_state_t given the full major+minor dev_t.
110 */
111static zvol_state_t *
112zvol_find_by_dev(dev_t dev)
113{
114 zvol_state_t *zv;
115
116 ASSERT(MUTEX_HELD(&zvol_state_lock));
117 for (zv = list_head(&zvol_state_list); zv != NULL;
118 zv = list_next(&zvol_state_list, zv)) {
119 if (zv->zv_dev == dev)
120 return zv;
121 }
122
123 return NULL;
124}
125
126/*
127 * Find a zvol_state_t given the name provided at zvol_alloc() time.
128 */
129static zvol_state_t *
130zvol_find_by_name(const char *name)
131{
132 zvol_state_t *zv;
133
134 ASSERT(MUTEX_HELD(&zvol_state_lock));
135 for (zv = list_head(&zvol_state_list); zv != NULL;
136 zv = list_next(&zvol_state_list, zv)) {
4c0d8e50 137 if (!strncmp(zv->zv_name, name, MAXNAMELEN))
60101509
BB
138 return zv;
139 }
140
141 return NULL;
142}
143
144/*
145 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
146 */
147void
148zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
149{
150 zfs_creat_t *zct = arg;
151 nvlist_t *nvprops = zct->zct_props;
152 int error;
153 uint64_t volblocksize, volsize;
154
155 VERIFY(nvlist_lookup_uint64(nvprops,
156 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
157 if (nvlist_lookup_uint64(nvprops,
158 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
159 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
160
161 /*
162 * These properties must be removed from the list so the generic
163 * property setting step won't apply to them.
164 */
165 VERIFY(nvlist_remove_all(nvprops,
166 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
167 (void) nvlist_remove_all(nvprops,
168 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
169
170 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
171 DMU_OT_NONE, 0, tx);
172 ASSERT(error == 0);
173
174 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
175 DMU_OT_NONE, 0, tx);
176 ASSERT(error == 0);
177
178 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
179 ASSERT(error == 0);
180}
181
182/*
183 * ZFS_IOC_OBJSET_STATS entry point.
184 */
185int
186zvol_get_stats(objset_t *os, nvlist_t *nv)
187{
188 int error;
189 dmu_object_info_t *doi;
190 uint64_t val;
191
192 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
193 if (error)
194 return (error);
195
196 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
197 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
198 error = dmu_object_info(os, ZVOL_OBJ, doi);
199
200 if (error == 0) {
201 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
202 doi->doi_data_block_size);
203 }
204
205 kmem_free(doi, sizeof(dmu_object_info_t));
206
207 return (error);
208}
209
210/*
211 * Sanity check volume size.
212 */
213int
214zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
215{
216 if (volsize == 0)
217 return (EINVAL);
218
219 if (volsize % blocksize != 0)
220 return (EINVAL);
221
222#ifdef _ILP32
223 if (volsize - 1 > MAXOFFSET_T)
224 return (EOVERFLOW);
225#endif
226 return (0);
227}
228
229/*
230 * Ensure the zap is flushed then inform the VFS of the capacity change.
231 */
232static int
df554c14 233zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
60101509
BB
234{
235 struct block_device *bdev;
236 dmu_tx_t *tx;
237 int error;
238
239 ASSERT(MUTEX_HELD(&zvol_state_lock));
240
df554c14 241 tx = dmu_tx_create(os);
60101509
BB
242 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
243 error = dmu_tx_assign(tx, TXG_WAIT);
244 if (error) {
245 dmu_tx_abort(tx);
246 return (error);
247 }
248
df554c14 249 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
60101509
BB
250 &volsize, tx);
251 dmu_tx_commit(tx);
252
253 if (error)
254 return (error);
255
df554c14 256 error = dmu_free_long_range(os,
60101509
BB
257 ZVOL_OBJ, volsize, DMU_OBJECT_END);
258 if (error)
259 return (error);
260
60101509
BB
261 bdev = bdget_disk(zv->zv_disk, 0);
262 if (!bdev)
df554c14
BB
263 return (EIO);
264/*
265 * 2.6.28 API change
266 * Added check_disk_size_change() helper function.
267 */
268#ifdef HAVE_CHECK_DISK_SIZE_CHANGE
269 set_capacity(zv->zv_disk, volsize >> 9);
270 zv->zv_volsize = volsize;
271 check_disk_size_change(zv->zv_disk, bdev);
272#else
273 zv->zv_volsize = volsize;
274 zv->zv_changed = 1;
275 (void) check_disk_change(bdev);
276#endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
60101509 277
60101509
BB
278 bdput(bdev);
279
280 return (0);
281}
282
283/*
284 * Set ZFS_PROP_VOLSIZE set entry point.
285 */
286int
287zvol_set_volsize(const char *name, uint64_t volsize)
288{
289 zvol_state_t *zv;
290 dmu_object_info_t *doi;
291 objset_t *os = NULL;
292 uint64_t readonly;
293 int error;
294
295 mutex_enter(&zvol_state_lock);
296
297 zv = zvol_find_by_name(name);
298 if (zv == NULL) {
299 error = ENXIO;
300 goto out;
301 }
302
303 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
304
305 error = dmu_objset_hold(name, FTAG, &os);
306 if (error)
307 goto out_doi;
308
309 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
310 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
311 goto out_doi;
312
313 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
314 if (readonly) {
315 error = EROFS;
316 goto out_doi;
317 }
318
319 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
320 error = EROFS;
321 goto out_doi;
322 }
323
df554c14 324 error = zvol_update_volsize(zv, volsize, os);
60101509
BB
325out_doi:
326 kmem_free(doi, sizeof(dmu_object_info_t));
327out:
328 if (os)
329 dmu_objset_rele(os, FTAG);
330
331 mutex_exit(&zvol_state_lock);
332
333 return (error);
334}
335
336/*
337 * Sanity check volume block size.
338 */
339int
340zvol_check_volblocksize(uint64_t volblocksize)
341{
342 if (volblocksize < SPA_MINBLOCKSIZE ||
343 volblocksize > SPA_MAXBLOCKSIZE ||
344 !ISP2(volblocksize))
345 return (EDOM);
346
347 return (0);
348}
349
350/*
351 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
352 */
353int
354zvol_set_volblocksize(const char *name, uint64_t volblocksize)
355{
356 zvol_state_t *zv;
357 dmu_tx_t *tx;
358 int error;
359
360 mutex_enter(&zvol_state_lock);
361
362 zv = zvol_find_by_name(name);
363 if (zv == NULL) {
364 error = ENXIO;
365 goto out;
366 }
367
368 if (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY)) {
369 error = EROFS;
370 goto out;
371 }
372
373 tx = dmu_tx_create(zv->zv_objset);
374 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
375 error = dmu_tx_assign(tx, TXG_WAIT);
376 if (error) {
377 dmu_tx_abort(tx);
378 } else {
379 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
380 volblocksize, 0, tx);
381 if (error == ENOTSUP)
382 error = EBUSY;
383 dmu_tx_commit(tx);
384 if (error == 0)
385 zv->zv_volblocksize = volblocksize;
386 }
387out:
388 mutex_exit(&zvol_state_lock);
389
390 return (error);
391}
392
393/*
394 * Replay a TX_WRITE ZIL transaction that didn't get committed
395 * after a system failure
396 */
397static int
398zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
399{
400 objset_t *os = zv->zv_objset;
401 char *data = (char *)(lr + 1); /* data follows lr_write_t */
402 uint64_t off = lr->lr_offset;
403 uint64_t len = lr->lr_length;
404 dmu_tx_t *tx;
405 int error;
406
407 if (byteswap)
408 byteswap_uint64_array(lr, sizeof (*lr));
409
410 tx = dmu_tx_create(os);
411 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
412 error = dmu_tx_assign(tx, TXG_WAIT);
413 if (error) {
414 dmu_tx_abort(tx);
415 } else {
416 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
417 dmu_tx_commit(tx);
418 }
419
420 return (error);
421}
422
423static int
424zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
425{
426 return (ENOTSUP);
427}
428
429/*
430 * Callback vectors for replaying records.
431 * Only TX_WRITE is needed for zvol.
432 */
433zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
434 (zil_replay_func_t *)zvol_replay_err, /* no such transaction type */
435 (zil_replay_func_t *)zvol_replay_err, /* TX_CREATE */
436 (zil_replay_func_t *)zvol_replay_err, /* TX_MKDIR */
437 (zil_replay_func_t *)zvol_replay_err, /* TX_MKXATTR */
438 (zil_replay_func_t *)zvol_replay_err, /* TX_SYMLINK */
439 (zil_replay_func_t *)zvol_replay_err, /* TX_REMOVE */
440 (zil_replay_func_t *)zvol_replay_err, /* TX_RMDIR */
441 (zil_replay_func_t *)zvol_replay_err, /* TX_LINK */
442 (zil_replay_func_t *)zvol_replay_err, /* TX_RENAME */
443 (zil_replay_func_t *)zvol_replay_write, /* TX_WRITE */
444 (zil_replay_func_t *)zvol_replay_err, /* TX_TRUNCATE */
445 (zil_replay_func_t *)zvol_replay_err, /* TX_SETATTR */
446 (zil_replay_func_t *)zvol_replay_err, /* TX_ACL */
447};
448
449/*
450 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
451 *
452 * We store data in the log buffers if it's small enough.
453 * Otherwise we will later flush the data out via dmu_sync().
454 */
455ssize_t zvol_immediate_write_sz = 32768;
456
457static void
458zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
459 uint64_t offset, uint64_t size, int sync)
460{
461 uint32_t blocksize = zv->zv_volblocksize;
462 zilog_t *zilog = zv->zv_zilog;
463 boolean_t slogging;
ab85f845 464 ssize_t immediate_write_sz;
60101509
BB
465
466 if (zil_replaying(zilog, tx))
467 return;
468
ab85f845
ED
469 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
470 ? 0 : zvol_immediate_write_sz;
471 slogging = spa_has_slogs(zilog->zl_spa) &&
472 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
60101509
BB
473
474 while (size) {
475 itx_t *itx;
476 lr_write_t *lr;
477 ssize_t len;
478 itx_wr_state_t write_state;
479
480 /*
481 * Unlike zfs_log_write() we can be called with
482 * up to DMU_MAX_ACCESS/2 (5MB) writes.
483 */
ab85f845 484 if (blocksize > immediate_write_sz && !slogging &&
60101509
BB
485 size >= blocksize && offset % blocksize == 0) {
486 write_state = WR_INDIRECT; /* uses dmu_sync */
487 len = blocksize;
488 } else if (sync) {
489 write_state = WR_COPIED;
490 len = MIN(ZIL_MAX_LOG_DATA, size);
491 } else {
492 write_state = WR_NEED_COPY;
493 len = MIN(ZIL_MAX_LOG_DATA, size);
494 }
495
496 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
497 (write_state == WR_COPIED ? len : 0));
498 lr = (lr_write_t *)&itx->itx_lr;
499 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
500 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
501 zil_itx_destroy(itx);
502 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
503 lr = (lr_write_t *)&itx->itx_lr;
504 write_state = WR_NEED_COPY;
505 }
506
507 itx->itx_wr_state = write_state;
508 if (write_state == WR_NEED_COPY)
509 itx->itx_sod += len;
510 lr->lr_foid = ZVOL_OBJ;
511 lr->lr_offset = offset;
512 lr->lr_length = len;
513 lr->lr_blkoff = 0;
514 BP_ZERO(&lr->lr_blkptr);
515
516 itx->itx_private = zv;
517 itx->itx_sync = sync;
518
519 (void) zil_itx_assign(zilog, itx, tx);
520
521 offset += len;
522 size -= len;
523 }
524}
525
526/*
527 * Common write path running under the zvol taskq context. This function
528 * is responsible for copying the request structure data in to the DMU and
529 * signaling the request queue with the result of the copy.
530 */
531static void
532zvol_write(void *arg)
533{
534 struct request *req = (struct request *)arg;
535 struct request_queue *q = req->q;
536 zvol_state_t *zv = q->queuedata;
537 uint64_t offset = blk_rq_pos(req) << 9;
538 uint64_t size = blk_rq_bytes(req);
539 int error = 0;
540 dmu_tx_t *tx;
541 rl_t *rl;
542
8630650a
BB
543 /*
544 * Annotate this call path with a flag that indicates that it is
545 * unsafe to use KM_SLEEP during memory allocations due to the
546 * potential for a deadlock. KM_PUSHPAGE should be used instead.
547 */
548 ASSERT(!(current->flags & PF_NOFS));
549 current->flags |= PF_NOFS;
550
b18019d2
ED
551 if (req->cmd_flags & VDEV_REQ_FLUSH)
552 zil_commit(zv->zv_zilog, ZVOL_OBJ);
553
554 /*
555 * Some requests are just for flush and nothing else.
556 */
557 if (size == 0) {
558 blk_end_request(req, 0, size);
8630650a 559 goto out;
b18019d2
ED
560 }
561
60101509
BB
562 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
563
564 tx = dmu_tx_create(zv->zv_objset);
565 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
566
567 /* This will only fail for ENOSPC */
568 error = dmu_tx_assign(tx, TXG_WAIT);
569 if (error) {
570 dmu_tx_abort(tx);
571 zfs_range_unlock(rl);
572 blk_end_request(req, -error, size);
8630650a 573 goto out;
60101509
BB
574 }
575
576 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
577 if (error == 0)
b18019d2
ED
578 zvol_log_write(zv, tx, offset, size,
579 req->cmd_flags & VDEV_REQ_FUA);
60101509
BB
580
581 dmu_tx_commit(tx);
582 zfs_range_unlock(rl);
583
b18019d2
ED
584 if ((req->cmd_flags & VDEV_REQ_FUA) ||
585 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
60101509
BB
586 zil_commit(zv->zv_zilog, ZVOL_OBJ);
587
588 blk_end_request(req, -error, size);
8630650a
BB
589out:
590 current->flags &= ~PF_NOFS;
60101509
BB
591}
592
30930fba
ED
593#ifdef HAVE_BLK_QUEUE_DISCARD
594static void
595zvol_discard(void *arg)
596{
597 struct request *req = (struct request *)arg;
598 struct request_queue *q = req->q;
599 zvol_state_t *zv = q->queuedata;
089fa91b
ED
600 uint64_t start = blk_rq_pos(req) << 9;
601 uint64_t end = start + blk_rq_bytes(req);
30930fba
ED
602 int error;
603 rl_t *rl;
604
8630650a
BB
605 /*
606 * Annotate this call path with a flag that indicates that it is
607 * unsafe to use KM_SLEEP during memory allocations due to the
608 * potential for a deadlock. KM_PUSHPAGE should be used instead.
609 */
610 ASSERT(!(current->flags & PF_NOFS));
611 current->flags |= PF_NOFS;
612
089fa91b
ED
613 if (end > zv->zv_volsize) {
614 blk_end_request(req, -EIO, blk_rq_bytes(req));
8630650a 615 goto out;
30930fba
ED
616 }
617
089fa91b
ED
618 /*
619 * Align the request to volume block boundaries. If we don't,
620 * then this will force dnode_free_range() to zero out the
621 * unaligned parts, which is slow (read-modify-write) and
622 * useless since we are not freeing any space by doing so.
623 */
624 start = P2ROUNDUP(start, zv->zv_volblocksize);
625 end = P2ALIGN(end, zv->zv_volblocksize);
626
627 if (start >= end) {
628 blk_end_request(req, 0, blk_rq_bytes(req));
8630650a 629 goto out;
30930fba
ED
630 }
631
089fa91b 632 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
30930fba 633
089fa91b 634 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
30930fba
ED
635
636 /*
637 * TODO: maybe we should add the operation to the log.
638 */
639
640 zfs_range_unlock(rl);
641
089fa91b 642 blk_end_request(req, -error, blk_rq_bytes(req));
8630650a
BB
643out:
644 current->flags &= ~PF_NOFS;
30930fba
ED
645}
646#endif /* HAVE_BLK_QUEUE_DISCARD */
647
60101509
BB
648/*
649 * Common read path running under the zvol taskq context. This function
650 * is responsible for copying the requested data out of the DMU and in to
651 * a linux request structure. It then must signal the request queue with
652 * an error code describing the result of the copy.
653 */
654static void
655zvol_read(void *arg)
656{
657 struct request *req = (struct request *)arg;
658 struct request_queue *q = req->q;
659 zvol_state_t *zv = q->queuedata;
660 uint64_t offset = blk_rq_pos(req) << 9;
661 uint64_t size = blk_rq_bytes(req);
662 int error;
663 rl_t *rl;
664
b18019d2
ED
665 if (size == 0) {
666 blk_end_request(req, 0, size);
667 return;
668 }
669
60101509
BB
670 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
671
672 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
673
674 zfs_range_unlock(rl);
675
676 /* convert checksum errors into IO errors */
677 if (error == ECKSUM)
678 error = EIO;
679
680 blk_end_request(req, -error, size);
681}
682
683/*
684 * Request will be added back to the request queue and retried if
685 * it cannot be immediately dispatched to the taskq for handling
686 */
687static inline void
688zvol_dispatch(task_func_t func, struct request *req)
689{
690 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
691 blk_requeue_request(req->q, req);
692}
693
694/*
695 * Common request path. Rather than registering a custom make_request()
696 * function we use the generic Linux version. This is done because it allows
697 * us to easily merge read requests which would otherwise we performed
698 * synchronously by the DMU. This is less critical in write case where the
699 * DMU will perform the correct merging within a transaction group. Using
700 * the generic make_request() also let's use leverage the fact that the
701 * elevator with ensure correct ordering in regards to barrior IOs. On
702 * the downside it means that in the write case we end up doing request
703 * merging twice once in the elevator and once in the DMU.
704 *
705 * The request handler is called under a spin lock so all the real work
706 * is handed off to be done in the context of the zvol taskq. This function
707 * simply performs basic request sanity checking and hands off the request.
708 */
709static void
710zvol_request(struct request_queue *q)
711{
712 zvol_state_t *zv = q->queuedata;
713 struct request *req;
714 unsigned int size;
715
716 while ((req = blk_fetch_request(q)) != NULL) {
717 size = blk_rq_bytes(req);
718
b18019d2 719 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
60101509
BB
720 get_capacity(zv->zv_disk)) {
721 printk(KERN_INFO
722 "%s: bad access: block=%llu, count=%lu\n",
723 req->rq_disk->disk_name,
724 (long long unsigned)blk_rq_pos(req),
725 (long unsigned)blk_rq_sectors(req));
726 __blk_end_request(req, -EIO, size);
727 continue;
728 }
729
730 if (!blk_fs_request(req)) {
731 printk(KERN_INFO "%s: non-fs cmd\n",
732 req->rq_disk->disk_name);
733 __blk_end_request(req, -EIO, size);
734 continue;
735 }
736
737 switch (rq_data_dir(req)) {
738 case READ:
739 zvol_dispatch(zvol_read, req);
740 break;
741 case WRITE:
742 if (unlikely(get_disk_ro(zv->zv_disk)) ||
743 unlikely(zv->zv_flags & ZVOL_RDONLY)) {
744 __blk_end_request(req, -EROFS, size);
745 break;
746 }
747
30930fba
ED
748#ifdef HAVE_BLK_QUEUE_DISCARD
749 if (req->cmd_flags & VDEV_REQ_DISCARD) {
750 zvol_dispatch(zvol_discard, req);
751 break;
752 }
753#endif /* HAVE_BLK_QUEUE_DISCARD */
754
60101509
BB
755 zvol_dispatch(zvol_write, req);
756 break;
757 default:
758 printk(KERN_INFO "%s: unknown cmd: %d\n",
759 req->rq_disk->disk_name, (int)rq_data_dir(req));
760 __blk_end_request(req, -EIO, size);
761 break;
762 }
763 }
764}
765
766static void
767zvol_get_done(zgd_t *zgd, int error)
768{
769 if (zgd->zgd_db)
770 dmu_buf_rele(zgd->zgd_db, zgd);
771
772 zfs_range_unlock(zgd->zgd_rl);
773
774 if (error == 0 && zgd->zgd_bp)
775 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
776
777 kmem_free(zgd, sizeof (zgd_t));
778}
779
780/*
781 * Get data to generate a TX_WRITE intent log record.
782 */
783static int
784zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
785{
786 zvol_state_t *zv = arg;
787 objset_t *os = zv->zv_objset;
788 uint64_t offset = lr->lr_offset;
789 uint64_t size = lr->lr_length;
790 dmu_buf_t *db;
791 zgd_t *zgd;
792 int error;
793
794 ASSERT(zio != NULL);
795 ASSERT(size != 0);
796
b8d06fca 797 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
60101509
BB
798 zgd->zgd_zilog = zv->zv_zilog;
799 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
800
801 /*
802 * Write records come in two flavors: immediate and indirect.
803 * For small writes it's cheaper to store the data with the
804 * log record (immediate); for large writes it's cheaper to
805 * sync the data and get a pointer to it (indirect) so that
806 * we don't have to write the data twice.
807 */
808 if (buf != NULL) { /* immediate write */
809 error = dmu_read(os, ZVOL_OBJ, offset, size, buf,
810 DMU_READ_NO_PREFETCH);
811 } else {
812 size = zv->zv_volblocksize;
813 offset = P2ALIGN_TYPED(offset, size, uint64_t);
814 error = dmu_buf_hold(os, ZVOL_OBJ, offset, zgd, &db,
815 DMU_READ_NO_PREFETCH);
816 if (error == 0) {
817 zgd->zgd_db = db;
818 zgd->zgd_bp = &lr->lr_blkptr;
819
820 ASSERT(db != NULL);
821 ASSERT(db->db_offset == offset);
822 ASSERT(db->db_size == size);
823
824 error = dmu_sync(zio, lr->lr_common.lrc_txg,
825 zvol_get_done, zgd);
826
827 if (error == 0)
828 return (0);
829 }
830 }
831
832 zvol_get_done(zgd, error);
833
834 return (error);
835}
836
837/*
838 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
839 */
840static void
841zvol_insert(zvol_state_t *zv_insert)
842{
843 zvol_state_t *zv = NULL;
844
845 ASSERT(MUTEX_HELD(&zvol_state_lock));
846 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
847 for (zv = list_head(&zvol_state_list); zv != NULL;
848 zv = list_next(&zvol_state_list, zv)) {
849 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
850 break;
851 }
852
853 list_insert_before(&zvol_state_list, zv, zv_insert);
854}
855
856/*
857 * Simply remove the zvol from to list of zvols.
858 */
859static void
860zvol_remove(zvol_state_t *zv_remove)
861{
862 ASSERT(MUTEX_HELD(&zvol_state_lock));
863 list_remove(&zvol_state_list, zv_remove);
864}
865
866static int
867zvol_first_open(zvol_state_t *zv)
868{
869 objset_t *os;
870 uint64_t volsize;
871 int error;
872 uint64_t ro;
873
874 /* lie and say we're read-only */
875 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
876 if (error)
877 return (-error);
878
879 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
880 if (error) {
881 dmu_objset_disown(os, zvol_tag);
882 return (-error);
883 }
884
885 zv->zv_objset = os;
886 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
887 if (error) {
888 dmu_objset_disown(os, zvol_tag);
889 return (-error);
890 }
891
892 set_capacity(zv->zv_disk, volsize >> 9);
893 zv->zv_volsize = volsize;
894 zv->zv_zilog = zil_open(os, zvol_get_data);
895
896 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
897 if (ro || dmu_objset_is_snapshot(os)) {
898 set_disk_ro(zv->zv_disk, 1);
899 zv->zv_flags |= ZVOL_RDONLY;
900 } else {
901 set_disk_ro(zv->zv_disk, 0);
902 zv->zv_flags &= ~ZVOL_RDONLY;
903 }
904
905 return (-error);
906}
907
908static void
909zvol_last_close(zvol_state_t *zv)
910{
911 zil_close(zv->zv_zilog);
912 zv->zv_zilog = NULL;
04434775 913
60101509
BB
914 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
915 zv->zv_dbuf = NULL;
04434775
MA
916
917 /*
918 * Evict cached data
919 */
920 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
921 !(zv->zv_flags & ZVOL_RDONLY))
922 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
923 (void) dmu_objset_evict_dbufs(zv->zv_objset);
924
60101509
BB
925 dmu_objset_disown(zv->zv_objset, zvol_tag);
926 zv->zv_objset = NULL;
927}
928
929static int
930zvol_open(struct block_device *bdev, fmode_t flag)
931{
932 zvol_state_t *zv = bdev->bd_disk->private_data;
933 int error = 0, drop_mutex = 0;
934
935 /*
936 * If the caller is already holding the mutex do not take it
937 * again, this will happen as part of zvol_create_minor().
938 * Once add_disk() is called the device is live and the kernel
939 * will attempt to open it to read the partition information.
940 */
941 if (!mutex_owned(&zvol_state_lock)) {
942 mutex_enter(&zvol_state_lock);
943 drop_mutex = 1;
944 }
945
946 ASSERT3P(zv, !=, NULL);
947
948 if (zv->zv_open_count == 0) {
949 error = zvol_first_open(zv);
950 if (error)
951 goto out_mutex;
952 }
953
954 if ((flag & FMODE_WRITE) &&
955 (get_disk_ro(zv->zv_disk) || (zv->zv_flags & ZVOL_RDONLY))) {
956 error = -EROFS;
957 goto out_open_count;
958 }
959
960 zv->zv_open_count++;
961
962out_open_count:
963 if (zv->zv_open_count == 0)
964 zvol_last_close(zv);
965
966out_mutex:
967 if (drop_mutex)
968 mutex_exit(&zvol_state_lock);
969
970 check_disk_change(bdev);
971
972 return (error);
973}
974
975static int
976zvol_release(struct gendisk *disk, fmode_t mode)
977{
978 zvol_state_t *zv = disk->private_data;
979 int drop_mutex = 0;
980
981 if (!mutex_owned(&zvol_state_lock)) {
982 mutex_enter(&zvol_state_lock);
983 drop_mutex = 1;
984 }
985
986 ASSERT3P(zv, !=, NULL);
987 ASSERT3U(zv->zv_open_count, >, 0);
988 zv->zv_open_count--;
989 if (zv->zv_open_count == 0)
990 zvol_last_close(zv);
991
992 if (drop_mutex)
993 mutex_exit(&zvol_state_lock);
994
995 return (0);
996}
997
998static int
999zvol_ioctl(struct block_device *bdev, fmode_t mode,
1000 unsigned int cmd, unsigned long arg)
1001{
1002 zvol_state_t *zv = bdev->bd_disk->private_data;
1003 int error = 0;
1004
1005 if (zv == NULL)
1006 return (-ENXIO);
1007
1008 switch (cmd) {
1009 case BLKFLSBUF:
1010 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1011 break;
4c0d8e50
FN
1012 case BLKZNAME:
1013 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1014 break;
60101509
BB
1015
1016 default:
1017 error = -ENOTTY;
1018 break;
1019
1020 }
1021
1022 return (error);
1023}
1024
1025#ifdef CONFIG_COMPAT
1026static int
1027zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1028 unsigned cmd, unsigned long arg)
1029{
1030 return zvol_ioctl(bdev, mode, cmd, arg);
1031}
1032#else
1033#define zvol_compat_ioctl NULL
1034#endif
1035
1036static int zvol_media_changed(struct gendisk *disk)
1037{
1038 zvol_state_t *zv = disk->private_data;
1039
1040 return zv->zv_changed;
1041}
1042
1043static int zvol_revalidate_disk(struct gendisk *disk)
1044{
1045 zvol_state_t *zv = disk->private_data;
1046
1047 zv->zv_changed = 0;
1048 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1049
1050 return 0;
1051}
1052
1053/*
1054 * Provide a simple virtual geometry for legacy compatibility. For devices
1055 * smaller than 1 MiB a small head and sector count is used to allow very
1056 * tiny devices. For devices over 1 Mib a standard head and sector count
1057 * is used to keep the cylinders count reasonable.
1058 */
1059static int
1060zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1061{
1062 zvol_state_t *zv = bdev->bd_disk->private_data;
1063 sector_t sectors = get_capacity(zv->zv_disk);
1064
1065 if (sectors > 2048) {
1066 geo->heads = 16;
1067 geo->sectors = 63;
1068 } else {
1069 geo->heads = 2;
1070 geo->sectors = 4;
1071 }
1072
1073 geo->start = 0;
1074 geo->cylinders = sectors / (geo->heads * geo->sectors);
1075
1076 return 0;
1077}
1078
1079static struct kobject *
1080zvol_probe(dev_t dev, int *part, void *arg)
1081{
1082 zvol_state_t *zv;
1083 struct kobject *kobj;
1084
1085 mutex_enter(&zvol_state_lock);
1086 zv = zvol_find_by_dev(dev);
23a61ccc 1087 kobj = zv ? get_disk(zv->zv_disk) : NULL;
60101509
BB
1088 mutex_exit(&zvol_state_lock);
1089
1090 return kobj;
1091}
1092
1093#ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1094static struct block_device_operations zvol_ops = {
1095 .open = zvol_open,
1096 .release = zvol_release,
1097 .ioctl = zvol_ioctl,
1098 .compat_ioctl = zvol_compat_ioctl,
1099 .media_changed = zvol_media_changed,
1100 .revalidate_disk = zvol_revalidate_disk,
1101 .getgeo = zvol_getgeo,
1102 .owner = THIS_MODULE,
1103};
1104
1105#else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1106
1107static int
1108zvol_open_by_inode(struct inode *inode, struct file *file)
1109{
1110 return zvol_open(inode->i_bdev, file->f_mode);
1111}
1112
1113static int
1114zvol_release_by_inode(struct inode *inode, struct file *file)
1115{
1116 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1117}
1118
1119static int
1120zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1121 unsigned int cmd, unsigned long arg)
1122{
b1c58213
NB
1123 if (file == NULL || inode == NULL)
1124 return -EINVAL;
60101509
BB
1125 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1126}
1127
1128# ifdef CONFIG_COMPAT
1129static long
1130zvol_compat_ioctl_by_inode(struct file *file,
1131 unsigned int cmd, unsigned long arg)
1132{
b1c58213
NB
1133 if (file == NULL)
1134 return -EINVAL;
60101509
BB
1135 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1136 file->f_mode, cmd, arg);
1137}
1138# else
1139# define zvol_compat_ioctl_by_inode NULL
1140# endif
1141
1142static struct block_device_operations zvol_ops = {
1143 .open = zvol_open_by_inode,
1144 .release = zvol_release_by_inode,
1145 .ioctl = zvol_ioctl_by_inode,
1146 .compat_ioctl = zvol_compat_ioctl_by_inode,
1147 .media_changed = zvol_media_changed,
1148 .revalidate_disk = zvol_revalidate_disk,
1149 .getgeo = zvol_getgeo,
1150 .owner = THIS_MODULE,
1151};
1152#endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1153
1154/*
1155 * Allocate memory for a new zvol_state_t and setup the required
1156 * request queue and generic disk structures for the block device.
1157 */
1158static zvol_state_t *
1159zvol_alloc(dev_t dev, const char *name)
1160{
1161 zvol_state_t *zv;
7bd04f2d 1162 int error = 0;
60101509
BB
1163
1164 zv = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
1165 if (zv == NULL)
1166 goto out;
1167
1168 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1169 if (zv->zv_queue == NULL)
1170 goto out_kmem;
1171
7bd04f2d
BB
1172#ifdef HAVE_ELEVATOR_CHANGE
1173 error = elevator_change(zv->zv_queue, "noop");
1174#endif /* HAVE_ELEVATOR_CHANGE */
1175 if (error) {
1176 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1177 "noop", name, error);
1178 goto out_queue;
1179 }
1180
b18019d2
ED
1181#ifdef HAVE_BLK_QUEUE_FLUSH
1182 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1183#else
1184 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1185#endif /* HAVE_BLK_QUEUE_FLUSH */
1186
60101509
BB
1187 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1188 if (zv->zv_disk == NULL)
1189 goto out_queue;
1190
1191 zv->zv_queue->queuedata = zv;
1192 zv->zv_dev = dev;
1193 zv->zv_open_count = 0;
4c0d8e50 1194 strlcpy(zv->zv_name, name, MAXNAMELEN);
60101509
BB
1195
1196 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1197 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1198 sizeof (rl_t), offsetof(rl_t, r_node));
3c4988c8
BB
1199 zv->zv_znode.z_is_zvol = TRUE;
1200
60101509
BB
1201 spin_lock_init(&zv->zv_lock);
1202 list_link_init(&zv->zv_next);
1203
1204 zv->zv_disk->major = zvol_major;
1205 zv->zv_disk->first_minor = (dev & MINORMASK);
1206 zv->zv_disk->fops = &zvol_ops;
1207 zv->zv_disk->private_data = zv;
1208 zv->zv_disk->queue = zv->zv_queue;
4c0d8e50
FN
1209 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1210 ZVOL_DEV_NAME, (dev & MINORMASK));
60101509
BB
1211
1212 return zv;
1213
1214out_queue:
1215 blk_cleanup_queue(zv->zv_queue);
1216out_kmem:
1217 kmem_free(zv, sizeof (zvol_state_t));
1218out:
1219 return NULL;
1220}
1221
1222/*
1223 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1224 */
1225static void
1226zvol_free(zvol_state_t *zv)
1227{
1228 avl_destroy(&zv->zv_znode.z_range_avl);
1229 mutex_destroy(&zv->zv_znode.z_range_lock);
1230
1231 del_gendisk(zv->zv_disk);
1232 blk_cleanup_queue(zv->zv_queue);
1233 put_disk(zv->zv_disk);
1234
1235 kmem_free(zv, sizeof (zvol_state_t));
1236}
1237
1238static int
1239__zvol_create_minor(const char *name)
1240{
1241 zvol_state_t *zv;
1242 objset_t *os;
1243 dmu_object_info_t *doi;
1244 uint64_t volsize;
1245 unsigned minor = 0;
1246 int error = 0;
1247
1248 ASSERT(MUTEX_HELD(&zvol_state_lock));
1249
1250 zv = zvol_find_by_name(name);
1251 if (zv) {
1252 error = EEXIST;
1253 goto out;
1254 }
1255
1256 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
1257
1258 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1259 if (error)
1260 goto out_doi;
1261
1262 error = dmu_object_info(os, ZVOL_OBJ, doi);
1263 if (error)
1264 goto out_dmu_objset_disown;
1265
1266 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1267 if (error)
1268 goto out_dmu_objset_disown;
1269
1270 error = zvol_find_minor(&minor);
1271 if (error)
1272 goto out_dmu_objset_disown;
1273
1274 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1275 if (zv == NULL) {
1276 error = EAGAIN;
1277 goto out_dmu_objset_disown;
1278 }
1279
1280 if (dmu_objset_is_snapshot(os))
1281 zv->zv_flags |= ZVOL_RDONLY;
1282
1283 zv->zv_volblocksize = doi->doi_data_block_size;
1284 zv->zv_volsize = volsize;
1285 zv->zv_objset = os;
1286
1287 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1288
34037afe
ED
1289 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1290 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1291 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1292 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1293 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
30930fba 1294#ifdef HAVE_BLK_QUEUE_DISCARD
7c0e5708
ED
1295 blk_queue_max_discard_sectors(zv->zv_queue,
1296 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
ee5fd0bb 1297 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
30930fba
ED
1298 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1299#endif
34037afe
ED
1300#ifdef HAVE_BLK_QUEUE_NONROT
1301 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1302#endif
1303
60101509
BB
1304 if (zil_replay_disable)
1305 zil_destroy(dmu_objset_zil(os), B_FALSE);
1306 else
1307 zil_replay(os, zv, zvol_replay_vector);
1308
1309out_dmu_objset_disown:
1310 dmu_objset_disown(os, zvol_tag);
1311 zv->zv_objset = NULL;
1312out_doi:
1313 kmem_free(doi, sizeof(dmu_object_info_t));
1314out:
1315
1316 if (error == 0) {
1317 zvol_insert(zv);
1318 add_disk(zv->zv_disk);
1319 }
1320
1321 return (error);
1322}
1323
1324/*
1325 * Create a block device minor node and setup the linkage between it
1326 * and the specified volume. Once this function returns the block
1327 * device is live and ready for use.
1328 */
1329int
1330zvol_create_minor(const char *name)
1331{
1332 int error;
1333
1334 mutex_enter(&zvol_state_lock);
1335 error = __zvol_create_minor(name);
1336 mutex_exit(&zvol_state_lock);
1337
1338 return (error);
1339}
1340
1341static int
1342__zvol_remove_minor(const char *name)
1343{
1344 zvol_state_t *zv;
1345
1346 ASSERT(MUTEX_HELD(&zvol_state_lock));
1347
1348 zv = zvol_find_by_name(name);
1349 if (zv == NULL)
1350 return (ENXIO);
1351
1352 if (zv->zv_open_count > 0)
1353 return (EBUSY);
1354
1355 zvol_remove(zv);
1356 zvol_free(zv);
1357
1358 return (0);
1359}
1360
1361/*
1362 * Remove a block device minor node for the specified volume.
1363 */
1364int
1365zvol_remove_minor(const char *name)
1366{
1367 int error;
1368
1369 mutex_enter(&zvol_state_lock);
1370 error = __zvol_remove_minor(name);
1371 mutex_exit(&zvol_state_lock);
1372
1373 return (error);
1374}
1375
1376static int
1377zvol_create_minors_cb(spa_t *spa, uint64_t dsobj,
1378 const char *dsname, void *arg)
1379{
1380 if (strchr(dsname, '/') == NULL)
1381 return 0;
1382
d5674448
BB
1383 (void) __zvol_create_minor(dsname);
1384 return (0);
60101509
BB
1385}
1386
1387/*
1388 * Create minors for specified pool, if pool is NULL create minors
1389 * for all available pools.
1390 */
1391int
1392zvol_create_minors(const char *pool)
1393{
1394 spa_t *spa = NULL;
1395 int error = 0;
1396
74497b7a
DH
1397 if (zvol_inhibit_dev)
1398 return (0);
1399
60101509
BB
1400 mutex_enter(&zvol_state_lock);
1401 if (pool) {
1402 error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
1403 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1404 } else {
1405 mutex_enter(&spa_namespace_lock);
1406 while ((spa = spa_next(spa)) != NULL) {
1407 error = dmu_objset_find_spa(NULL,
1408 spa_name(spa), zvol_create_minors_cb, NULL,
1409 DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1410 if (error)
1411 break;
1412 }
1413 mutex_exit(&spa_namespace_lock);
1414 }
1415 mutex_exit(&zvol_state_lock);
1416
1417 return error;
1418}
1419
1420/*
1421 * Remove minors for specified pool, if pool is NULL remove all minors.
1422 */
1423void
1424zvol_remove_minors(const char *pool)
1425{
1426 zvol_state_t *zv, *zv_next;
1427 char *str;
1428
74497b7a
DH
1429 if (zvol_inhibit_dev)
1430 return;
1431
4c0d8e50 1432 str = kmem_zalloc(MAXNAMELEN, KM_SLEEP);
60101509
BB
1433 if (pool) {
1434 (void) strncpy(str, pool, strlen(pool));
1435 (void) strcat(str, "/");
1436 }
1437
1438 mutex_enter(&zvol_state_lock);
1439 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1440 zv_next = list_next(&zvol_state_list, zv);
1441
1442 if (pool == NULL || !strncmp(str, zv->zv_name, strlen(str))) {
1443 zvol_remove(zv);
1444 zvol_free(zv);
1445 }
1446 }
1447 mutex_exit(&zvol_state_lock);
4c0d8e50 1448 kmem_free(str, MAXNAMELEN);
60101509
BB
1449}
1450
1451int
1452zvol_init(void)
1453{
1454 int error;
1455
60101509 1456 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
71011408 1457 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
60101509
BB
1458 if (zvol_taskq == NULL) {
1459 printk(KERN_INFO "ZFS: taskq_create() failed\n");
1460 return (-ENOMEM);
1461 }
1462
1463 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1464 if (error) {
1465 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
1466 taskq_destroy(zvol_taskq);
1467 return (error);
1468 }
1469
1470 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1471 THIS_MODULE, zvol_probe, NULL, NULL);
1472
1473 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1474 list_create(&zvol_state_list, sizeof (zvol_state_t),
1475 offsetof(zvol_state_t, zv_next));
1476
1477 (void) zvol_create_minors(NULL);
1478
1479 return (0);
1480}
1481
1482void
1483zvol_fini(void)
1484{
1485 zvol_remove_minors(NULL);
1486 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1487 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1488 taskq_destroy(zvol_taskq);
1489 mutex_destroy(&zvol_state_lock);
1490 list_destroy(&zvol_state_list);
1491}
1492
74497b7a
DH
1493module_param(zvol_inhibit_dev, uint, 0644);
1494MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1495
30a9524e 1496module_param(zvol_major, uint, 0444);
60101509
BB
1497MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1498
30a9524e 1499module_param(zvol_threads, uint, 0444);
60101509 1500MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
7c0e5708
ED
1501
1502module_param(zvol_max_discard_blocks, ulong, 0444);
1503MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");