]> git.proxmox.com Git - mirror_zfs.git/blame - module/zfs/zvol.c
Update zfs(8) Snapshots section
[mirror_zfs.git] / module / zfs / zvol.c
CommitLineData
60101509
BB
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 *
27 * ZFS volume emulation driver.
28 *
29 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
30 * Volumes are accessed through the symbolic links named:
31 *
32 * /dev/<pool_name>/<dataset_name>
33 *
34 * Volumes are persistent through reboot and module load. No user command
35 * needs to be run before opening and using a device.
36 */
37
03c6040b 38#include <sys/dbuf.h>
60101509
BB
39#include <sys/dmu_traverse.h>
40#include <sys/dsl_dataset.h>
41#include <sys/dsl_prop.h>
42#include <sys/zap.h>
43#include <sys/zil_impl.h>
44#include <sys/zio.h>
45#include <sys/zfs_rlock.h>
46#include <sys/zfs_znode.h>
47#include <sys/zvol.h>
61e90960 48#include <linux/blkdev_compat.h>
60101509 49
74497b7a 50unsigned int zvol_inhibit_dev = 0;
60101509 51unsigned int zvol_major = ZVOL_MAJOR;
dde9380a 52unsigned int zvol_threads = 32;
7c0e5708 53unsigned long zvol_max_discard_blocks = 16384;
60101509
BB
54
55static taskq_t *zvol_taskq;
56static kmutex_t zvol_state_lock;
57static list_t zvol_state_list;
58static char *zvol_tag = "zvol_tag";
59
60/*
61 * The in-core state of each volume.
62 */
63typedef struct zvol_state {
4c0d8e50 64 char zv_name[MAXNAMELEN]; /* name */
60101509
BB
65 uint64_t zv_volsize; /* advertised space */
66 uint64_t zv_volblocksize;/* volume block size */
67 objset_t *zv_objset; /* objset handle */
68 uint32_t zv_flags; /* ZVOL_* flags */
69 uint32_t zv_open_count; /* open counts */
70 uint32_t zv_changed; /* disk changed */
71 zilog_t *zv_zilog; /* ZIL handle */
72 znode_t zv_znode; /* for range locking */
73 dmu_buf_t *zv_dbuf; /* bonus handle */
74 dev_t zv_dev; /* device id */
75 struct gendisk *zv_disk; /* generic disk */
76 struct request_queue *zv_queue; /* request queue */
77 spinlock_t zv_lock; /* request queue lock */
78 list_node_t zv_next; /* next zvol_state_t linkage */
79} zvol_state_t;
80
81#define ZVOL_RDONLY 0x1
82
83/*
84 * Find the next available range of ZVOL_MINORS minor numbers. The
85 * zvol_state_list is kept in ascending minor order so we simply need
86 * to scan the list for the first gap in the sequence. This allows us
87 * to recycle minor number as devices are created and removed.
88 */
89static int
90zvol_find_minor(unsigned *minor)
91{
92 zvol_state_t *zv;
93
94 *minor = 0;
95 ASSERT(MUTEX_HELD(&zvol_state_lock));
96 for (zv = list_head(&zvol_state_list); zv != NULL;
97 zv = list_next(&zvol_state_list, zv), *minor += ZVOL_MINORS) {
98 if (MINOR(zv->zv_dev) != MINOR(*minor))
99 break;
100 }
101
102 /* All minors are in use */
103 if (*minor >= (1 << MINORBITS))
104 return ENXIO;
105
106 return 0;
107}
108
109/*
110 * Find a zvol_state_t given the full major+minor dev_t.
111 */
112static zvol_state_t *
113zvol_find_by_dev(dev_t dev)
114{
115 zvol_state_t *zv;
116
117 ASSERT(MUTEX_HELD(&zvol_state_lock));
118 for (zv = list_head(&zvol_state_list); zv != NULL;
119 zv = list_next(&zvol_state_list, zv)) {
120 if (zv->zv_dev == dev)
121 return zv;
122 }
123
124 return NULL;
125}
126
127/*
128 * Find a zvol_state_t given the name provided at zvol_alloc() time.
129 */
130static zvol_state_t *
131zvol_find_by_name(const char *name)
132{
133 zvol_state_t *zv;
134
135 ASSERT(MUTEX_HELD(&zvol_state_lock));
136 for (zv = list_head(&zvol_state_list); zv != NULL;
137 zv = list_next(&zvol_state_list, zv)) {
4c0d8e50 138 if (!strncmp(zv->zv_name, name, MAXNAMELEN))
60101509
BB
139 return zv;
140 }
141
142 return NULL;
143}
144
6c285672
JL
145
146/*
147 * Given a path, return TRUE if path is a ZVOL.
148 */
149boolean_t
150zvol_is_zvol(const char *device)
151{
152 struct block_device *bdev;
153 unsigned int major;
154
155 bdev = lookup_bdev(device);
156 if (IS_ERR(bdev))
157 return (B_FALSE);
158
159 major = MAJOR(bdev->bd_dev);
160 bdput(bdev);
161
162 if (major == zvol_major)
163 return (B_TRUE);
164
165 return (B_FALSE);
166}
167
60101509
BB
168/*
169 * ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
170 */
171void
172zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
173{
174 zfs_creat_t *zct = arg;
175 nvlist_t *nvprops = zct->zct_props;
176 int error;
177 uint64_t volblocksize, volsize;
178
179 VERIFY(nvlist_lookup_uint64(nvprops,
180 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
181 if (nvlist_lookup_uint64(nvprops,
182 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
183 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
184
185 /*
186 * These properties must be removed from the list so the generic
187 * property setting step won't apply to them.
188 */
189 VERIFY(nvlist_remove_all(nvprops,
190 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
191 (void) nvlist_remove_all(nvprops,
192 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
193
194 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
195 DMU_OT_NONE, 0, tx);
196 ASSERT(error == 0);
197
198 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
199 DMU_OT_NONE, 0, tx);
200 ASSERT(error == 0);
201
202 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
203 ASSERT(error == 0);
204}
205
206/*
207 * ZFS_IOC_OBJSET_STATS entry point.
208 */
209int
210zvol_get_stats(objset_t *os, nvlist_t *nv)
211{
212 int error;
213 dmu_object_info_t *doi;
214 uint64_t val;
215
216 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
217 if (error)
218 return (error);
219
220 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
221 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
222 error = dmu_object_info(os, ZVOL_OBJ, doi);
223
224 if (error == 0) {
225 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
226 doi->doi_data_block_size);
227 }
228
229 kmem_free(doi, sizeof(dmu_object_info_t));
230
231 return (error);
232}
233
234/*
235 * Sanity check volume size.
236 */
237int
238zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
239{
240 if (volsize == 0)
2e528b49 241 return (SET_ERROR(EINVAL));
60101509
BB
242
243 if (volsize % blocksize != 0)
2e528b49 244 return (SET_ERROR(EINVAL));
60101509
BB
245
246#ifdef _ILP32
247 if (volsize - 1 > MAXOFFSET_T)
2e528b49 248 return (SET_ERROR(EOVERFLOW));
60101509
BB
249#endif
250 return (0);
251}
252
253/*
254 * Ensure the zap is flushed then inform the VFS of the capacity change.
255 */
256static int
df554c14 257zvol_update_volsize(zvol_state_t *zv, uint64_t volsize, objset_t *os)
60101509
BB
258{
259 struct block_device *bdev;
260 dmu_tx_t *tx;
261 int error;
262
263 ASSERT(MUTEX_HELD(&zvol_state_lock));
264
df554c14 265 tx = dmu_tx_create(os);
60101509
BB
266 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
267 error = dmu_tx_assign(tx, TXG_WAIT);
268 if (error) {
269 dmu_tx_abort(tx);
270 return (error);
271 }
272
df554c14 273 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
60101509
BB
274 &volsize, tx);
275 dmu_tx_commit(tx);
276
277 if (error)
278 return (error);
279
df554c14 280 error = dmu_free_long_range(os,
60101509
BB
281 ZVOL_OBJ, volsize, DMU_OBJECT_END);
282 if (error)
283 return (error);
284
60101509
BB
285 bdev = bdget_disk(zv->zv_disk, 0);
286 if (!bdev)
2e528b49 287 return (SET_ERROR(EIO));
df554c14
BB
288/*
289 * 2.6.28 API change
290 * Added check_disk_size_change() helper function.
291 */
292#ifdef HAVE_CHECK_DISK_SIZE_CHANGE
293 set_capacity(zv->zv_disk, volsize >> 9);
294 zv->zv_volsize = volsize;
295 check_disk_size_change(zv->zv_disk, bdev);
296#else
297 zv->zv_volsize = volsize;
298 zv->zv_changed = 1;
299 (void) check_disk_change(bdev);
300#endif /* HAVE_CHECK_DISK_SIZE_CHANGE */
60101509 301
60101509
BB
302 bdput(bdev);
303
304 return (0);
305}
306
307/*
308 * Set ZFS_PROP_VOLSIZE set entry point.
309 */
310int
311zvol_set_volsize(const char *name, uint64_t volsize)
312{
313 zvol_state_t *zv;
314 dmu_object_info_t *doi;
315 objset_t *os = NULL;
316 uint64_t readonly;
317 int error;
318
13fe0198
MA
319 error = dsl_prop_get_integer(name,
320 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
321 if (error != 0)
322 return (error);
323 if (readonly)
2e528b49 324 return (SET_ERROR(EROFS));
13fe0198 325
60101509
BB
326 mutex_enter(&zvol_state_lock);
327
328 zv = zvol_find_by_name(name);
329 if (zv == NULL) {
2e528b49 330 error = SET_ERROR(ENXIO);
60101509
BB
331 goto out;
332 }
333
334 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_SLEEP);
335
336 error = dmu_objset_hold(name, FTAG, &os);
337 if (error)
338 goto out_doi;
339
340 if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) != 0 ||
341 (error = zvol_check_volsize(volsize,doi->doi_data_block_size)) != 0)
342 goto out_doi;
343
344 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly, NULL) == 0);
345 if (readonly) {
2e528b49 346 error = SET_ERROR(EROFS);
60101509
BB
347 goto out_doi;
348 }
349
ba6a2402 350 if (zv->zv_flags & ZVOL_RDONLY) {
2e528b49 351 error = SET_ERROR(EROFS);
60101509
BB
352 goto out_doi;
353 }
354
df554c14 355 error = zvol_update_volsize(zv, volsize, os);
60101509
BB
356out_doi:
357 kmem_free(doi, sizeof(dmu_object_info_t));
358out:
359 if (os)
360 dmu_objset_rele(os, FTAG);
361
362 mutex_exit(&zvol_state_lock);
363
364 return (error);
365}
366
367/*
368 * Sanity check volume block size.
369 */
370int
371zvol_check_volblocksize(uint64_t volblocksize)
372{
373 if (volblocksize < SPA_MINBLOCKSIZE ||
374 volblocksize > SPA_MAXBLOCKSIZE ||
375 !ISP2(volblocksize))
2e528b49 376 return (SET_ERROR(EDOM));
60101509
BB
377
378 return (0);
379}
380
381/*
382 * Set ZFS_PROP_VOLBLOCKSIZE set entry point.
383 */
384int
385zvol_set_volblocksize(const char *name, uint64_t volblocksize)
386{
387 zvol_state_t *zv;
388 dmu_tx_t *tx;
389 int error;
390
391 mutex_enter(&zvol_state_lock);
392
393 zv = zvol_find_by_name(name);
394 if (zv == NULL) {
2e528b49 395 error = SET_ERROR(ENXIO);
60101509
BB
396 goto out;
397 }
398
ba6a2402 399 if (zv->zv_flags & ZVOL_RDONLY) {
2e528b49 400 error = SET_ERROR(EROFS);
60101509
BB
401 goto out;
402 }
403
404 tx = dmu_tx_create(zv->zv_objset);
405 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
406 error = dmu_tx_assign(tx, TXG_WAIT);
407 if (error) {
408 dmu_tx_abort(tx);
409 } else {
410 error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
411 volblocksize, 0, tx);
412 if (error == ENOTSUP)
2e528b49 413 error = SET_ERROR(EBUSY);
60101509
BB
414 dmu_tx_commit(tx);
415 if (error == 0)
416 zv->zv_volblocksize = volblocksize;
417 }
418out:
419 mutex_exit(&zvol_state_lock);
420
421 return (error);
422}
423
424/*
425 * Replay a TX_WRITE ZIL transaction that didn't get committed
426 * after a system failure
427 */
428static int
429zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
430{
431 objset_t *os = zv->zv_objset;
432 char *data = (char *)(lr + 1); /* data follows lr_write_t */
433 uint64_t off = lr->lr_offset;
434 uint64_t len = lr->lr_length;
435 dmu_tx_t *tx;
436 int error;
437
438 if (byteswap)
439 byteswap_uint64_array(lr, sizeof (*lr));
440
441 tx = dmu_tx_create(os);
442 dmu_tx_hold_write(tx, ZVOL_OBJ, off, len);
443 error = dmu_tx_assign(tx, TXG_WAIT);
444 if (error) {
445 dmu_tx_abort(tx);
446 } else {
447 dmu_write(os, ZVOL_OBJ, off, len, data, tx);
448 dmu_tx_commit(tx);
449 }
450
451 return (error);
452}
453
454static int
455zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
456{
2e528b49 457 return (SET_ERROR(ENOTSUP));
60101509
BB
458}
459
460/*
461 * Callback vectors for replaying records.
462 * Only TX_WRITE is needed for zvol.
463 */
b01615d5
RY
464zil_replay_func_t zvol_replay_vector[TX_MAX_TYPE] = {
465 (zil_replay_func_t)zvol_replay_err, /* no such transaction type */
466 (zil_replay_func_t)zvol_replay_err, /* TX_CREATE */
467 (zil_replay_func_t)zvol_replay_err, /* TX_MKDIR */
468 (zil_replay_func_t)zvol_replay_err, /* TX_MKXATTR */
469 (zil_replay_func_t)zvol_replay_err, /* TX_SYMLINK */
470 (zil_replay_func_t)zvol_replay_err, /* TX_REMOVE */
471 (zil_replay_func_t)zvol_replay_err, /* TX_RMDIR */
472 (zil_replay_func_t)zvol_replay_err, /* TX_LINK */
473 (zil_replay_func_t)zvol_replay_err, /* TX_RENAME */
474 (zil_replay_func_t)zvol_replay_write, /* TX_WRITE */
475 (zil_replay_func_t)zvol_replay_err, /* TX_TRUNCATE */
476 (zil_replay_func_t)zvol_replay_err, /* TX_SETATTR */
477 (zil_replay_func_t)zvol_replay_err, /* TX_ACL */
60101509
BB
478};
479
480/*
481 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
482 *
483 * We store data in the log buffers if it's small enough.
484 * Otherwise we will later flush the data out via dmu_sync().
485 */
486ssize_t zvol_immediate_write_sz = 32768;
487
488static void
489zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx,
490 uint64_t offset, uint64_t size, int sync)
491{
492 uint32_t blocksize = zv->zv_volblocksize;
493 zilog_t *zilog = zv->zv_zilog;
494 boolean_t slogging;
ab85f845 495 ssize_t immediate_write_sz;
60101509
BB
496
497 if (zil_replaying(zilog, tx))
498 return;
499
ab85f845
ED
500 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
501 ? 0 : zvol_immediate_write_sz;
502 slogging = spa_has_slogs(zilog->zl_spa) &&
503 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
60101509
BB
504
505 while (size) {
506 itx_t *itx;
507 lr_write_t *lr;
508 ssize_t len;
509 itx_wr_state_t write_state;
510
511 /*
512 * Unlike zfs_log_write() we can be called with
513 * up to DMU_MAX_ACCESS/2 (5MB) writes.
514 */
ab85f845 515 if (blocksize > immediate_write_sz && !slogging &&
60101509
BB
516 size >= blocksize && offset % blocksize == 0) {
517 write_state = WR_INDIRECT; /* uses dmu_sync */
518 len = blocksize;
519 } else if (sync) {
520 write_state = WR_COPIED;
521 len = MIN(ZIL_MAX_LOG_DATA, size);
522 } else {
523 write_state = WR_NEED_COPY;
524 len = MIN(ZIL_MAX_LOG_DATA, size);
525 }
526
527 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
528 (write_state == WR_COPIED ? len : 0));
529 lr = (lr_write_t *)&itx->itx_lr;
530 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
531 ZVOL_OBJ, offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
532 zil_itx_destroy(itx);
533 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
534 lr = (lr_write_t *)&itx->itx_lr;
535 write_state = WR_NEED_COPY;
536 }
537
538 itx->itx_wr_state = write_state;
539 if (write_state == WR_NEED_COPY)
540 itx->itx_sod += len;
541 lr->lr_foid = ZVOL_OBJ;
542 lr->lr_offset = offset;
543 lr->lr_length = len;
544 lr->lr_blkoff = 0;
545 BP_ZERO(&lr->lr_blkptr);
546
547 itx->itx_private = zv;
548 itx->itx_sync = sync;
549
550 (void) zil_itx_assign(zilog, itx, tx);
551
552 offset += len;
553 size -= len;
554 }
555}
556
557/*
558 * Common write path running under the zvol taskq context. This function
559 * is responsible for copying the request structure data in to the DMU and
560 * signaling the request queue with the result of the copy.
561 */
562static void
563zvol_write(void *arg)
564{
565 struct request *req = (struct request *)arg;
566 struct request_queue *q = req->q;
567 zvol_state_t *zv = q->queuedata;
568 uint64_t offset = blk_rq_pos(req) << 9;
569 uint64_t size = blk_rq_bytes(req);
570 int error = 0;
571 dmu_tx_t *tx;
572 rl_t *rl;
573
8630650a
BB
574 /*
575 * Annotate this call path with a flag that indicates that it is
576 * unsafe to use KM_SLEEP during memory allocations due to the
577 * potential for a deadlock. KM_PUSHPAGE should be used instead.
578 */
579 ASSERT(!(current->flags & PF_NOFS));
580 current->flags |= PF_NOFS;
581
b18019d2
ED
582 if (req->cmd_flags & VDEV_REQ_FLUSH)
583 zil_commit(zv->zv_zilog, ZVOL_OBJ);
584
585 /*
586 * Some requests are just for flush and nothing else.
587 */
588 if (size == 0) {
589 blk_end_request(req, 0, size);
8630650a 590 goto out;
b18019d2
ED
591 }
592
60101509
BB
593 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_WRITER);
594
595 tx = dmu_tx_create(zv->zv_objset);
596 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, size);
597
598 /* This will only fail for ENOSPC */
599 error = dmu_tx_assign(tx, TXG_WAIT);
600 if (error) {
601 dmu_tx_abort(tx);
602 zfs_range_unlock(rl);
603 blk_end_request(req, -error, size);
8630650a 604 goto out;
60101509
BB
605 }
606
607 error = dmu_write_req(zv->zv_objset, ZVOL_OBJ, req, tx);
608 if (error == 0)
b18019d2
ED
609 zvol_log_write(zv, tx, offset, size,
610 req->cmd_flags & VDEV_REQ_FUA);
60101509
BB
611
612 dmu_tx_commit(tx);
613 zfs_range_unlock(rl);
614
b18019d2
ED
615 if ((req->cmd_flags & VDEV_REQ_FUA) ||
616 zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)
60101509
BB
617 zil_commit(zv->zv_zilog, ZVOL_OBJ);
618
619 blk_end_request(req, -error, size);
8630650a
BB
620out:
621 current->flags &= ~PF_NOFS;
60101509
BB
622}
623
30930fba
ED
624#ifdef HAVE_BLK_QUEUE_DISCARD
625static void
626zvol_discard(void *arg)
627{
628 struct request *req = (struct request *)arg;
629 struct request_queue *q = req->q;
630 zvol_state_t *zv = q->queuedata;
089fa91b
ED
631 uint64_t start = blk_rq_pos(req) << 9;
632 uint64_t end = start + blk_rq_bytes(req);
30930fba
ED
633 int error;
634 rl_t *rl;
635
8630650a
BB
636 /*
637 * Annotate this call path with a flag that indicates that it is
638 * unsafe to use KM_SLEEP during memory allocations due to the
639 * potential for a deadlock. KM_PUSHPAGE should be used instead.
640 */
641 ASSERT(!(current->flags & PF_NOFS));
642 current->flags |= PF_NOFS;
643
089fa91b
ED
644 if (end > zv->zv_volsize) {
645 blk_end_request(req, -EIO, blk_rq_bytes(req));
8630650a 646 goto out;
30930fba
ED
647 }
648
089fa91b
ED
649 /*
650 * Align the request to volume block boundaries. If we don't,
651 * then this will force dnode_free_range() to zero out the
652 * unaligned parts, which is slow (read-modify-write) and
653 * useless since we are not freeing any space by doing so.
654 */
655 start = P2ROUNDUP(start, zv->zv_volblocksize);
656 end = P2ALIGN(end, zv->zv_volblocksize);
657
658 if (start >= end) {
659 blk_end_request(req, 0, blk_rq_bytes(req));
8630650a 660 goto out;
30930fba
ED
661 }
662
089fa91b 663 rl = zfs_range_lock(&zv->zv_znode, start, end - start, RL_WRITER);
30930fba 664
089fa91b 665 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, start, end - start);
30930fba
ED
666
667 /*
668 * TODO: maybe we should add the operation to the log.
669 */
670
671 zfs_range_unlock(rl);
672
089fa91b 673 blk_end_request(req, -error, blk_rq_bytes(req));
8630650a
BB
674out:
675 current->flags &= ~PF_NOFS;
30930fba
ED
676}
677#endif /* HAVE_BLK_QUEUE_DISCARD */
678
60101509
BB
679/*
680 * Common read path running under the zvol taskq context. This function
681 * is responsible for copying the requested data out of the DMU and in to
682 * a linux request structure. It then must signal the request queue with
683 * an error code describing the result of the copy.
684 */
685static void
686zvol_read(void *arg)
687{
688 struct request *req = (struct request *)arg;
689 struct request_queue *q = req->q;
690 zvol_state_t *zv = q->queuedata;
691 uint64_t offset = blk_rq_pos(req) << 9;
692 uint64_t size = blk_rq_bytes(req);
693 int error;
694 rl_t *rl;
695
b18019d2
ED
696 if (size == 0) {
697 blk_end_request(req, 0, size);
698 return;
699 }
700
60101509
BB
701 rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
702
703 error = dmu_read_req(zv->zv_objset, ZVOL_OBJ, req);
704
705 zfs_range_unlock(rl);
706
707 /* convert checksum errors into IO errors */
708 if (error == ECKSUM)
2e528b49 709 error = SET_ERROR(EIO);
60101509
BB
710
711 blk_end_request(req, -error, size);
712}
713
714/*
715 * Request will be added back to the request queue and retried if
716 * it cannot be immediately dispatched to the taskq for handling
717 */
718static inline void
719zvol_dispatch(task_func_t func, struct request *req)
720{
721 if (!taskq_dispatch(zvol_taskq, func, (void *)req, TQ_NOSLEEP))
722 blk_requeue_request(req->q, req);
723}
724
725/*
726 * Common request path. Rather than registering a custom make_request()
727 * function we use the generic Linux version. This is done because it allows
728 * us to easily merge read requests which would otherwise we performed
729 * synchronously by the DMU. This is less critical in write case where the
730 * DMU will perform the correct merging within a transaction group. Using
731 * the generic make_request() also let's use leverage the fact that the
732 * elevator with ensure correct ordering in regards to barrior IOs. On
733 * the downside it means that in the write case we end up doing request
734 * merging twice once in the elevator and once in the DMU.
735 *
736 * The request handler is called under a spin lock so all the real work
737 * is handed off to be done in the context of the zvol taskq. This function
738 * simply performs basic request sanity checking and hands off the request.
739 */
740static void
741zvol_request(struct request_queue *q)
742{
743 zvol_state_t *zv = q->queuedata;
744 struct request *req;
745 unsigned int size;
746
747 while ((req = blk_fetch_request(q)) != NULL) {
748 size = blk_rq_bytes(req);
749
b18019d2 750 if (size != 0 && blk_rq_pos(req) + blk_rq_sectors(req) >
60101509
BB
751 get_capacity(zv->zv_disk)) {
752 printk(KERN_INFO
753 "%s: bad access: block=%llu, count=%lu\n",
754 req->rq_disk->disk_name,
755 (long long unsigned)blk_rq_pos(req),
756 (long unsigned)blk_rq_sectors(req));
757 __blk_end_request(req, -EIO, size);
758 continue;
759 }
760
761 if (!blk_fs_request(req)) {
762 printk(KERN_INFO "%s: non-fs cmd\n",
763 req->rq_disk->disk_name);
764 __blk_end_request(req, -EIO, size);
765 continue;
766 }
767
768 switch (rq_data_dir(req)) {
769 case READ:
770 zvol_dispatch(zvol_read, req);
771 break;
772 case WRITE:
ba6a2402 773 if (unlikely(zv->zv_flags & ZVOL_RDONLY)) {
60101509
BB
774 __blk_end_request(req, -EROFS, size);
775 break;
776 }
777
30930fba
ED
778#ifdef HAVE_BLK_QUEUE_DISCARD
779 if (req->cmd_flags & VDEV_REQ_DISCARD) {
780 zvol_dispatch(zvol_discard, req);
781 break;
782 }
783#endif /* HAVE_BLK_QUEUE_DISCARD */
784
60101509
BB
785 zvol_dispatch(zvol_write, req);
786 break;
787 default:
788 printk(KERN_INFO "%s: unknown cmd: %d\n",
789 req->rq_disk->disk_name, (int)rq_data_dir(req));
790 __blk_end_request(req, -EIO, size);
791 break;
792 }
793 }
794}
795
796static void
797zvol_get_done(zgd_t *zgd, int error)
798{
799 if (zgd->zgd_db)
800 dmu_buf_rele(zgd->zgd_db, zgd);
801
802 zfs_range_unlock(zgd->zgd_rl);
803
804 if (error == 0 && zgd->zgd_bp)
805 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
806
807 kmem_free(zgd, sizeof (zgd_t));
808}
809
810/*
811 * Get data to generate a TX_WRITE intent log record.
812 */
813static int
814zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
815{
816 zvol_state_t *zv = arg;
817 objset_t *os = zv->zv_objset;
03c6040b 818 uint64_t object = ZVOL_OBJ;
60101509
BB
819 uint64_t offset = lr->lr_offset;
820 uint64_t size = lr->lr_length;
03c6040b 821 blkptr_t *bp = &lr->lr_blkptr;
60101509
BB
822 dmu_buf_t *db;
823 zgd_t *zgd;
824 int error;
825
826 ASSERT(zio != NULL);
827 ASSERT(size != 0);
828
b8d06fca 829 zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_PUSHPAGE);
60101509
BB
830 zgd->zgd_zilog = zv->zv_zilog;
831 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
832
833 /*
834 * Write records come in two flavors: immediate and indirect.
835 * For small writes it's cheaper to store the data with the
836 * log record (immediate); for large writes it's cheaper to
837 * sync the data and get a pointer to it (indirect) so that
838 * we don't have to write the data twice.
839 */
840 if (buf != NULL) { /* immediate write */
03c6040b 841 error = dmu_read(os, object, offset, size, buf,
60101509
BB
842 DMU_READ_NO_PREFETCH);
843 } else {
844 size = zv->zv_volblocksize;
845 offset = P2ALIGN_TYPED(offset, size, uint64_t);
03c6040b 846 error = dmu_buf_hold(os, object, offset, zgd, &db,
60101509
BB
847 DMU_READ_NO_PREFETCH);
848 if (error == 0) {
03c6040b
GW
849 blkptr_t *obp = dmu_buf_get_blkptr(db);
850 if (obp) {
851 ASSERT(BP_IS_HOLE(bp));
852 *bp = *obp;
853 }
854
60101509
BB
855 zgd->zgd_db = db;
856 zgd->zgd_bp = &lr->lr_blkptr;
857
858 ASSERT(db != NULL);
859 ASSERT(db->db_offset == offset);
860 ASSERT(db->db_size == size);
861
862 error = dmu_sync(zio, lr->lr_common.lrc_txg,
863 zvol_get_done, zgd);
864
865 if (error == 0)
866 return (0);
867 }
868 }
869
870 zvol_get_done(zgd, error);
871
872 return (error);
873}
874
875/*
876 * The zvol_state_t's are inserted in increasing MINOR(dev_t) order.
877 */
878static void
879zvol_insert(zvol_state_t *zv_insert)
880{
881 zvol_state_t *zv = NULL;
882
883 ASSERT(MUTEX_HELD(&zvol_state_lock));
884 ASSERT3U(MINOR(zv_insert->zv_dev) & ZVOL_MINOR_MASK, ==, 0);
885 for (zv = list_head(&zvol_state_list); zv != NULL;
886 zv = list_next(&zvol_state_list, zv)) {
887 if (MINOR(zv->zv_dev) > MINOR(zv_insert->zv_dev))
888 break;
889 }
890
891 list_insert_before(&zvol_state_list, zv, zv_insert);
892}
893
894/*
895 * Simply remove the zvol from to list of zvols.
896 */
897static void
898zvol_remove(zvol_state_t *zv_remove)
899{
900 ASSERT(MUTEX_HELD(&zvol_state_lock));
901 list_remove(&zvol_state_list, zv_remove);
902}
903
904static int
905zvol_first_open(zvol_state_t *zv)
906{
907 objset_t *os;
908 uint64_t volsize;
65d56083 909 int locked = 0;
60101509
BB
910 int error;
911 uint64_t ro;
912
65d56083
BB
913 /*
914 * In all other cases the spa_namespace_lock is taken before the
915 * bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
916 * function calls fops->open() with the bdev->bd_mutex lock held.
917 *
918 * To avoid a potential lock inversion deadlock we preemptively
919 * try to take the spa_namespace_lock(). Normally it will not
920 * be contended and this is safe because spa_open_common() handles
921 * the case where the caller already holds the spa_namespace_lock.
922 *
923 * When it is contended we risk a lock inversion if we were to
924 * block waiting for the lock. Luckily, the __blkdev_get()
925 * function allows us to return -ERESTARTSYS which will result in
926 * bdev->bd_mutex being dropped, reacquired, and fops->open() being
927 * called again. This process can be repeated safely until both
928 * locks are acquired.
929 */
930 if (!mutex_owned(&spa_namespace_lock)) {
931 locked = mutex_tryenter(&spa_namespace_lock);
932 if (!locked)
2e528b49 933 return (-SET_ERROR(ERESTARTSYS));
65d56083
BB
934 }
935
60101509
BB
936 /* lie and say we're read-only */
937 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, 1, zvol_tag, &os);
938 if (error)
babf3f9b 939 goto out_mutex;
60101509
BB
940
941 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
942 if (error) {
babf3f9b
MM
943 dmu_objset_disown(os, zvol_tag);
944 goto out_mutex;
60101509
BB
945 }
946
947 zv->zv_objset = os;
948 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
949 if (error) {
babf3f9b
MM
950 dmu_objset_disown(os, zvol_tag);
951 goto out_mutex;
60101509
BB
952 }
953
954 set_capacity(zv->zv_disk, volsize >> 9);
955 zv->zv_volsize = volsize;
956 zv->zv_zilog = zil_open(os, zvol_get_data);
957
958 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL) == 0);
a4430fce
GW
959 if (ro || dmu_objset_is_snapshot(os) ||
960 !spa_writeable(dmu_objset_spa(os))) {
babf3f9b
MM
961 set_disk_ro(zv->zv_disk, 1);
962 zv->zv_flags |= ZVOL_RDONLY;
60101509 963 } else {
babf3f9b
MM
964 set_disk_ro(zv->zv_disk, 0);
965 zv->zv_flags &= ~ZVOL_RDONLY;
60101509
BB
966 }
967
babf3f9b
MM
968out_mutex:
969 if (locked)
970 mutex_exit(&spa_namespace_lock);
971
60101509
BB
972 return (-error);
973}
974
975static void
976zvol_last_close(zvol_state_t *zv)
977{
978 zil_close(zv->zv_zilog);
979 zv->zv_zilog = NULL;
04434775 980
60101509
BB
981 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
982 zv->zv_dbuf = NULL;
04434775
MA
983
984 /*
985 * Evict cached data
986 */
987 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
988 !(zv->zv_flags & ZVOL_RDONLY))
989 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
990 (void) dmu_objset_evict_dbufs(zv->zv_objset);
991
60101509
BB
992 dmu_objset_disown(zv->zv_objset, zvol_tag);
993 zv->zv_objset = NULL;
994}
995
996static int
997zvol_open(struct block_device *bdev, fmode_t flag)
998{
999 zvol_state_t *zv = bdev->bd_disk->private_data;
1000 int error = 0, drop_mutex = 0;
1001
1002 /*
1003 * If the caller is already holding the mutex do not take it
1004 * again, this will happen as part of zvol_create_minor().
1005 * Once add_disk() is called the device is live and the kernel
1006 * will attempt to open it to read the partition information.
1007 */
1008 if (!mutex_owned(&zvol_state_lock)) {
1009 mutex_enter(&zvol_state_lock);
1010 drop_mutex = 1;
1011 }
1012
1013 ASSERT3P(zv, !=, NULL);
1014
1015 if (zv->zv_open_count == 0) {
1016 error = zvol_first_open(zv);
1017 if (error)
1018 goto out_mutex;
1019 }
1020
ba6a2402 1021 if ((flag & FMODE_WRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
60101509
BB
1022 error = -EROFS;
1023 goto out_open_count;
1024 }
1025
1026 zv->zv_open_count++;
1027
1028out_open_count:
1029 if (zv->zv_open_count == 0)
1030 zvol_last_close(zv);
1031
1032out_mutex:
1033 if (drop_mutex)
1034 mutex_exit(&zvol_state_lock);
1035
1036 check_disk_change(bdev);
1037
1038 return (error);
1039}
1040
a1d9543a
CD
1041#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
1042static void
1043#else
60101509 1044static int
a1d9543a 1045#endif
60101509
BB
1046zvol_release(struct gendisk *disk, fmode_t mode)
1047{
1048 zvol_state_t *zv = disk->private_data;
1049 int drop_mutex = 0;
1050
1051 if (!mutex_owned(&zvol_state_lock)) {
1052 mutex_enter(&zvol_state_lock);
1053 drop_mutex = 1;
1054 }
1055
1056 ASSERT3P(zv, !=, NULL);
1057 ASSERT3U(zv->zv_open_count, >, 0);
1058 zv->zv_open_count--;
1059 if (zv->zv_open_count == 0)
1060 zvol_last_close(zv);
1061
1062 if (drop_mutex)
1063 mutex_exit(&zvol_state_lock);
1064
a1d9543a 1065#ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
60101509 1066 return (0);
a1d9543a 1067#endif
60101509
BB
1068}
1069
1070static int
1071zvol_ioctl(struct block_device *bdev, fmode_t mode,
1072 unsigned int cmd, unsigned long arg)
1073{
1074 zvol_state_t *zv = bdev->bd_disk->private_data;
1075 int error = 0;
1076
1077 if (zv == NULL)
2e528b49 1078 return (-SET_ERROR(ENXIO));
60101509
BB
1079
1080 switch (cmd) {
1081 case BLKFLSBUF:
1082 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1083 break;
4c0d8e50
FN
1084 case BLKZNAME:
1085 error = copy_to_user((void *)arg, zv->zv_name, MAXNAMELEN);
1086 break;
60101509
BB
1087
1088 default:
1089 error = -ENOTTY;
1090 break;
1091
1092 }
1093
1094 return (error);
1095}
1096
1097#ifdef CONFIG_COMPAT
1098static int
1099zvol_compat_ioctl(struct block_device *bdev, fmode_t mode,
1100 unsigned cmd, unsigned long arg)
1101{
1102 return zvol_ioctl(bdev, mode, cmd, arg);
1103}
1104#else
1105#define zvol_compat_ioctl NULL
1106#endif
1107
1108static int zvol_media_changed(struct gendisk *disk)
1109{
1110 zvol_state_t *zv = disk->private_data;
1111
1112 return zv->zv_changed;
1113}
1114
1115static int zvol_revalidate_disk(struct gendisk *disk)
1116{
1117 zvol_state_t *zv = disk->private_data;
1118
1119 zv->zv_changed = 0;
1120 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1121
1122 return 0;
1123}
1124
1125/*
1126 * Provide a simple virtual geometry for legacy compatibility. For devices
1127 * smaller than 1 MiB a small head and sector count is used to allow very
1128 * tiny devices. For devices over 1 Mib a standard head and sector count
1129 * is used to keep the cylinders count reasonable.
1130 */
1131static int
1132zvol_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1133{
1134 zvol_state_t *zv = bdev->bd_disk->private_data;
1135 sector_t sectors = get_capacity(zv->zv_disk);
1136
1137 if (sectors > 2048) {
1138 geo->heads = 16;
1139 geo->sectors = 63;
1140 } else {
1141 geo->heads = 2;
1142 geo->sectors = 4;
1143 }
1144
1145 geo->start = 0;
1146 geo->cylinders = sectors / (geo->heads * geo->sectors);
1147
1148 return 0;
1149}
1150
1151static struct kobject *
1152zvol_probe(dev_t dev, int *part, void *arg)
1153{
1154 zvol_state_t *zv;
1155 struct kobject *kobj;
1156
1157 mutex_enter(&zvol_state_lock);
1158 zv = zvol_find_by_dev(dev);
23a61ccc 1159 kobj = zv ? get_disk(zv->zv_disk) : NULL;
60101509
BB
1160 mutex_exit(&zvol_state_lock);
1161
1162 return kobj;
1163}
1164
1165#ifdef HAVE_BDEV_BLOCK_DEVICE_OPERATIONS
1166static struct block_device_operations zvol_ops = {
1167 .open = zvol_open,
1168 .release = zvol_release,
1169 .ioctl = zvol_ioctl,
1170 .compat_ioctl = zvol_compat_ioctl,
1171 .media_changed = zvol_media_changed,
1172 .revalidate_disk = zvol_revalidate_disk,
1173 .getgeo = zvol_getgeo,
1174 .owner = THIS_MODULE,
1175};
1176
1177#else /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1178
1179static int
1180zvol_open_by_inode(struct inode *inode, struct file *file)
1181{
1182 return zvol_open(inode->i_bdev, file->f_mode);
1183}
1184
1185static int
1186zvol_release_by_inode(struct inode *inode, struct file *file)
1187{
1188 return zvol_release(inode->i_bdev->bd_disk, file->f_mode);
1189}
1190
1191static int
1192zvol_ioctl_by_inode(struct inode *inode, struct file *file,
1193 unsigned int cmd, unsigned long arg)
1194{
b1c58213
NB
1195 if (file == NULL || inode == NULL)
1196 return -EINVAL;
60101509
BB
1197 return zvol_ioctl(inode->i_bdev, file->f_mode, cmd, arg);
1198}
1199
1200# ifdef CONFIG_COMPAT
1201static long
1202zvol_compat_ioctl_by_inode(struct file *file,
1203 unsigned int cmd, unsigned long arg)
1204{
b1c58213
NB
1205 if (file == NULL)
1206 return -EINVAL;
60101509
BB
1207 return zvol_compat_ioctl(file->f_dentry->d_inode->i_bdev,
1208 file->f_mode, cmd, arg);
1209}
1210# else
1211# define zvol_compat_ioctl_by_inode NULL
1212# endif
1213
1214static struct block_device_operations zvol_ops = {
1215 .open = zvol_open_by_inode,
1216 .release = zvol_release_by_inode,
1217 .ioctl = zvol_ioctl_by_inode,
1218 .compat_ioctl = zvol_compat_ioctl_by_inode,
1219 .media_changed = zvol_media_changed,
1220 .revalidate_disk = zvol_revalidate_disk,
1221 .getgeo = zvol_getgeo,
1222 .owner = THIS_MODULE,
1223};
1224#endif /* HAVE_BDEV_BLOCK_DEVICE_OPERATIONS */
1225
1226/*
1227 * Allocate memory for a new zvol_state_t and setup the required
1228 * request queue and generic disk structures for the block device.
1229 */
1230static zvol_state_t *
1231zvol_alloc(dev_t dev, const char *name)
1232{
1233 zvol_state_t *zv;
7bd04f2d 1234 int error = 0;
60101509 1235
ba6a2402 1236 zv = kmem_zalloc(sizeof (zvol_state_t), KM_PUSHPAGE);
60101509 1237
2a3871d4
RY
1238 spin_lock_init(&zv->zv_lock);
1239 list_link_init(&zv->zv_next);
1240
60101509
BB
1241 zv->zv_queue = blk_init_queue(zvol_request, &zv->zv_lock);
1242 if (zv->zv_queue == NULL)
1243 goto out_kmem;
1244
7bd04f2d
BB
1245#ifdef HAVE_ELEVATOR_CHANGE
1246 error = elevator_change(zv->zv_queue, "noop");
1247#endif /* HAVE_ELEVATOR_CHANGE */
1248 if (error) {
1249 printk("ZFS: Unable to set \"%s\" scheduler for zvol %s: %d\n",
1250 "noop", name, error);
1251 goto out_queue;
1252 }
1253
b18019d2
ED
1254#ifdef HAVE_BLK_QUEUE_FLUSH
1255 blk_queue_flush(zv->zv_queue, VDEV_REQ_FLUSH | VDEV_REQ_FUA);
1256#else
1257 blk_queue_ordered(zv->zv_queue, QUEUE_ORDERED_DRAIN, NULL);
1258#endif /* HAVE_BLK_QUEUE_FLUSH */
1259
60101509
BB
1260 zv->zv_disk = alloc_disk(ZVOL_MINORS);
1261 if (zv->zv_disk == NULL)
1262 goto out_queue;
1263
1264 zv->zv_queue->queuedata = zv;
1265 zv->zv_dev = dev;
1266 zv->zv_open_count = 0;
4c0d8e50 1267 strlcpy(zv->zv_name, name, MAXNAMELEN);
60101509
BB
1268
1269 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
1270 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
1271 sizeof (rl_t), offsetof(rl_t, r_node));
3c4988c8
BB
1272 zv->zv_znode.z_is_zvol = TRUE;
1273
60101509
BB
1274 zv->zv_disk->major = zvol_major;
1275 zv->zv_disk->first_minor = (dev & MINORMASK);
1276 zv->zv_disk->fops = &zvol_ops;
1277 zv->zv_disk->private_data = zv;
1278 zv->zv_disk->queue = zv->zv_queue;
4c0d8e50
FN
1279 snprintf(zv->zv_disk->disk_name, DISK_NAME_LEN, "%s%d",
1280 ZVOL_DEV_NAME, (dev & MINORMASK));
60101509
BB
1281
1282 return zv;
1283
1284out_queue:
1285 blk_cleanup_queue(zv->zv_queue);
1286out_kmem:
1287 kmem_free(zv, sizeof (zvol_state_t));
0a6bef26 1288
60101509
BB
1289 return NULL;
1290}
1291
1292/*
1293 * Cleanup then free a zvol_state_t which was created by zvol_alloc().
1294 */
1295static void
1296zvol_free(zvol_state_t *zv)
1297{
1298 avl_destroy(&zv->zv_znode.z_range_avl);
1299 mutex_destroy(&zv->zv_znode.z_range_lock);
1300
1301 del_gendisk(zv->zv_disk);
1302 blk_cleanup_queue(zv->zv_queue);
1303 put_disk(zv->zv_disk);
1304
1305 kmem_free(zv, sizeof (zvol_state_t));
1306}
1307
1308static int
0b4d1b58
ED
1309__zvol_snapdev_hidden(const char *name)
1310{
1311 uint64_t snapdev;
1312 char *parent;
1313 char *atp;
1314 int error = 0;
1315
ba6a2402 1316 parent = kmem_alloc(MAXPATHLEN, KM_PUSHPAGE);
0b4d1b58
ED
1317 (void) strlcpy(parent, name, MAXPATHLEN);
1318
1319 if ((atp = strrchr(parent, '@')) != NULL) {
1320 *atp = '\0';
1321 error = dsl_prop_get_integer(parent, "snapdev", &snapdev, NULL);
1322 if ((error == 0) && (snapdev == ZFS_SNAPDEV_HIDDEN))
2e528b49 1323 error = SET_ERROR(ENODEV);
0b4d1b58
ED
1324 }
1325 kmem_free(parent, MAXPATHLEN);
1326 return (error);
1327}
1328
1329static int
1330__zvol_create_minor(const char *name, boolean_t ignore_snapdev)
60101509
BB
1331{
1332 zvol_state_t *zv;
1333 objset_t *os;
1334 dmu_object_info_t *doi;
1335 uint64_t volsize;
1336 unsigned minor = 0;
1337 int error = 0;
1338
1339 ASSERT(MUTEX_HELD(&zvol_state_lock));
1340
1341 zv = zvol_find_by_name(name);
1342 if (zv) {
2e528b49 1343 error = SET_ERROR(EEXIST);
60101509
BB
1344 goto out;
1345 }
1346
0b4d1b58
ED
1347 if (ignore_snapdev == B_FALSE) {
1348 error = __zvol_snapdev_hidden(name);
1349 if (error)
1350 goto out;
1351 }
1352
ba6a2402 1353 doi = kmem_alloc(sizeof(dmu_object_info_t), KM_PUSHPAGE);
60101509
BB
1354
1355 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, zvol_tag, &os);
1356 if (error)
1357 goto out_doi;
1358
1359 error = dmu_object_info(os, ZVOL_OBJ, doi);
1360 if (error)
1361 goto out_dmu_objset_disown;
1362
1363 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
1364 if (error)
1365 goto out_dmu_objset_disown;
1366
1367 error = zvol_find_minor(&minor);
1368 if (error)
1369 goto out_dmu_objset_disown;
1370
1371 zv = zvol_alloc(MKDEV(zvol_major, minor), name);
1372 if (zv == NULL) {
2e528b49 1373 error = SET_ERROR(EAGAIN);
60101509
BB
1374 goto out_dmu_objset_disown;
1375 }
1376
1377 if (dmu_objset_is_snapshot(os))
1378 zv->zv_flags |= ZVOL_RDONLY;
1379
1380 zv->zv_volblocksize = doi->doi_data_block_size;
1381 zv->zv_volsize = volsize;
1382 zv->zv_objset = os;
1383
1384 set_capacity(zv->zv_disk, zv->zv_volsize >> 9);
1385
34037afe
ED
1386 blk_queue_max_hw_sectors(zv->zv_queue, UINT_MAX);
1387 blk_queue_max_segments(zv->zv_queue, UINT16_MAX);
1388 blk_queue_max_segment_size(zv->zv_queue, UINT_MAX);
1389 blk_queue_physical_block_size(zv->zv_queue, zv->zv_volblocksize);
1390 blk_queue_io_opt(zv->zv_queue, zv->zv_volblocksize);
30930fba 1391#ifdef HAVE_BLK_QUEUE_DISCARD
7c0e5708
ED
1392 blk_queue_max_discard_sectors(zv->zv_queue,
1393 (zvol_max_discard_blocks * zv->zv_volblocksize) >> 9);
ee5fd0bb 1394 blk_queue_discard_granularity(zv->zv_queue, zv->zv_volblocksize);
30930fba
ED
1395 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zv->zv_queue);
1396#endif
34037afe
ED
1397#ifdef HAVE_BLK_QUEUE_NONROT
1398 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zv->zv_queue);
1399#endif
1400
a4430fce
GW
1401 if (spa_writeable(dmu_objset_spa(os))) {
1402 if (zil_replay_disable)
1403 zil_destroy(dmu_objset_zil(os), B_FALSE);
1404 else
1405 zil_replay(os, zv, zvol_replay_vector);
1406 }
60101509 1407
f74a147c 1408 zv->zv_objset = NULL;
60101509
BB
1409out_dmu_objset_disown:
1410 dmu_objset_disown(os, zvol_tag);
60101509
BB
1411out_doi:
1412 kmem_free(doi, sizeof(dmu_object_info_t));
1413out:
1414
1415 if (error == 0) {
1416 zvol_insert(zv);
1417 add_disk(zv->zv_disk);
1418 }
1419
1420 return (error);
1421}
1422
1423/*
1424 * Create a block device minor node and setup the linkage between it
1425 * and the specified volume. Once this function returns the block
1426 * device is live and ready for use.
1427 */
1428int
1429zvol_create_minor(const char *name)
1430{
1431 int error;
1432
1433 mutex_enter(&zvol_state_lock);
0b4d1b58 1434 error = __zvol_create_minor(name, B_FALSE);
60101509
BB
1435 mutex_exit(&zvol_state_lock);
1436
1437 return (error);
1438}
1439
1440static int
1441__zvol_remove_minor(const char *name)
1442{
1443 zvol_state_t *zv;
1444
1445 ASSERT(MUTEX_HELD(&zvol_state_lock));
1446
1447 zv = zvol_find_by_name(name);
1448 if (zv == NULL)
2e528b49 1449 return (SET_ERROR(ENXIO));
60101509
BB
1450
1451 if (zv->zv_open_count > 0)
2e528b49 1452 return (SET_ERROR(EBUSY));
60101509
BB
1453
1454 zvol_remove(zv);
1455 zvol_free(zv);
1456
1457 return (0);
1458}
1459
1460/*
1461 * Remove a block device minor node for the specified volume.
1462 */
1463int
1464zvol_remove_minor(const char *name)
1465{
1466 int error;
1467
1468 mutex_enter(&zvol_state_lock);
1469 error = __zvol_remove_minor(name);
1470 mutex_exit(&zvol_state_lock);
1471
1472 return (error);
1473}
1474
ba6a2402
BB
1475/*
1476 * Rename a block device minor mode for the specified volume.
1477 */
1478static void
1479__zvol_rename_minor(zvol_state_t *zv, const char *newname)
1480{
1481 int readonly = get_disk_ro(zv->zv_disk);
1482
1483 ASSERT(MUTEX_HELD(&zvol_state_lock));
1484
1485 strlcpy(zv->zv_name, newname, sizeof (zv->zv_name));
1486
1487 /*
1488 * The block device's read-only state is briefly changed causing
1489 * a KOBJ_CHANGE uevent to be issued. This ensures udev detects
1490 * the name change and fixes the symlinks. This does not change
1491 * ZVOL_RDONLY in zv->zv_flags so the actual read-only state never
1492 * changes. This would normally be done using kobject_uevent() but
1493 * that is a GPL-only symbol which is why we need this workaround.
1494 */
1495 set_disk_ro(zv->zv_disk, !readonly);
1496 set_disk_ro(zv->zv_disk, readonly);
1497}
1498
60101509 1499static int
13fe0198 1500zvol_create_minors_cb(const char *dsname, void *arg)
60101509 1501{
ba6a2402 1502 (void) zvol_create_minor(dsname);
60101509 1503
d5674448 1504 return (0);
60101509
BB
1505}
1506
1507/*
ba6a2402 1508 * Create minors for specified dataset including children and snapshots.
60101509
BB
1509 */
1510int
ba6a2402 1511zvol_create_minors(const char *name)
60101509 1512{
60101509
BB
1513 int error = 0;
1514
ba6a2402
BB
1515 if (!zvol_inhibit_dev)
1516 error = dmu_objset_find((char *)name, zvol_create_minors_cb,
1517 NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
1518
1519 return (SET_ERROR(error));
1520}
1521
1522/*
1523 * Remove minors for specified dataset including children and snapshots.
1524 */
1525void
1526zvol_remove_minors(const char *name)
1527{
1528 zvol_state_t *zv, *zv_next;
1529 int namelen = ((name) ? strlen(name) : 0);
1530
74497b7a 1531 if (zvol_inhibit_dev)
ba6a2402 1532 return;
74497b7a 1533
60101509 1534 mutex_enter(&zvol_state_lock);
ba6a2402
BB
1535
1536 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1537 zv_next = list_next(&zvol_state_list, zv);
1538
1539 if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
1540 (strncmp(zv->zv_name, name, namelen) == 0 &&
1541 zv->zv_name[namelen] == '/')) {
1542 zvol_remove(zv);
1543 zvol_free(zv);
60101509 1544 }
60101509 1545 }
60101509 1546
ba6a2402 1547 mutex_exit(&zvol_state_lock);
60101509
BB
1548}
1549
1550/*
ba6a2402 1551 * Rename minors for specified dataset including children and snapshots.
60101509
BB
1552 */
1553void
ba6a2402 1554zvol_rename_minors(const char *oldname, const char *newname)
60101509
BB
1555{
1556 zvol_state_t *zv, *zv_next;
ba6a2402
BB
1557 int oldnamelen, newnamelen;
1558 char *name;
60101509 1559
74497b7a
DH
1560 if (zvol_inhibit_dev)
1561 return;
1562
ba6a2402
BB
1563 oldnamelen = strlen(oldname);
1564 newnamelen = strlen(newname);
1565 name = kmem_alloc(MAXNAMELEN, KM_PUSHPAGE);
60101509
BB
1566
1567 mutex_enter(&zvol_state_lock);
ba6a2402 1568
60101509
BB
1569 for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
1570 zv_next = list_next(&zvol_state_list, zv);
1571
ba6a2402
BB
1572 if (strcmp(zv->zv_name, oldname) == 0) {
1573 __zvol_rename_minor(zv, newname);
1574 } else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
1575 (zv->zv_name[oldnamelen] == '/' ||
1576 zv->zv_name[oldnamelen] == '@')) {
1577 snprintf(name, MAXNAMELEN, "%s%c%s", newname,
1578 zv->zv_name[oldnamelen],
1579 zv->zv_name + oldnamelen + 1);
1580 __zvol_rename_minor(zv, name);
60101509
BB
1581 }
1582 }
ba6a2402 1583
60101509 1584 mutex_exit(&zvol_state_lock);
ba6a2402
BB
1585
1586 kmem_free(name, MAXNAMELEN);
60101509
BB
1587}
1588
0b4d1b58
ED
1589static int
1590snapdev_snapshot_changed_cb(const char *dsname, void *arg) {
1591 uint64_t snapdev = *(uint64_t *) arg;
1592
1593 if (strchr(dsname, '@') == NULL)
ba6a2402 1594 return (0);
0b4d1b58
ED
1595
1596 switch (snapdev) {
1597 case ZFS_SNAPDEV_VISIBLE:
1598 mutex_enter(&zvol_state_lock);
1599 (void) __zvol_create_minor(dsname, B_TRUE);
1600 mutex_exit(&zvol_state_lock);
1601 break;
1602 case ZFS_SNAPDEV_HIDDEN:
1603 (void) zvol_remove_minor(dsname);
1604 break;
1605 }
ba6a2402
BB
1606
1607 return (0);
0b4d1b58
ED
1608}
1609
1610int
1611zvol_set_snapdev(const char *dsname, uint64_t snapdev) {
1612 (void) dmu_objset_find((char *) dsname, snapdev_snapshot_changed_cb,
1613 &snapdev, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
1614 /* caller should continue to modify snapdev property */
1615 return (-1);
1616}
1617
1618
60101509
BB
1619int
1620zvol_init(void)
1621{
1622 int error;
1623
2a3871d4
RY
1624 list_create(&zvol_state_list, sizeof (zvol_state_t),
1625 offsetof(zvol_state_t, zv_next));
1626 mutex_init(&zvol_state_lock, NULL, MUTEX_DEFAULT, NULL);
1627
60101509 1628 zvol_taskq = taskq_create(ZVOL_DRIVER, zvol_threads, maxclsyspri,
71011408 1629 zvol_threads, INT_MAX, TASKQ_PREPOPULATE);
60101509
BB
1630 if (zvol_taskq == NULL) {
1631 printk(KERN_INFO "ZFS: taskq_create() failed\n");
2a3871d4
RY
1632 error = -ENOMEM;
1633 goto out1;
60101509
BB
1634 }
1635
1636 error = register_blkdev(zvol_major, ZVOL_DRIVER);
1637 if (error) {
1638 printk(KERN_INFO "ZFS: register_blkdev() failed %d\n", error);
2a3871d4 1639 goto out2;
60101509
BB
1640 }
1641
1642 blk_register_region(MKDEV(zvol_major, 0), 1UL << MINORBITS,
1643 THIS_MODULE, zvol_probe, NULL, NULL);
1644
60101509 1645 return (0);
2a3871d4
RY
1646
1647out2:
1648 taskq_destroy(zvol_taskq);
1649out1:
1650 mutex_destroy(&zvol_state_lock);
1651 list_destroy(&zvol_state_list);
1652
1653 return (error);
60101509
BB
1654}
1655
1656void
1657zvol_fini(void)
1658{
1659 zvol_remove_minors(NULL);
1660 blk_unregister_region(MKDEV(zvol_major, 0), 1UL << MINORBITS);
1661 unregister_blkdev(zvol_major, ZVOL_DRIVER);
1662 taskq_destroy(zvol_taskq);
1663 mutex_destroy(&zvol_state_lock);
1664 list_destroy(&zvol_state_list);
1665}
1666
74497b7a
DH
1667module_param(zvol_inhibit_dev, uint, 0644);
1668MODULE_PARM_DESC(zvol_inhibit_dev, "Do not create zvol device nodes");
1669
30a9524e 1670module_param(zvol_major, uint, 0444);
60101509
BB
1671MODULE_PARM_DESC(zvol_major, "Major number for zvol device");
1672
30a9524e 1673module_param(zvol_threads, uint, 0444);
60101509 1674MODULE_PARM_DESC(zvol_threads, "Number of threads for zvol device");
7c0e5708
ED
1675
1676module_param(zvol_max_discard_blocks, ulong, 0444);
1677MODULE_PARM_DESC(zvol_max_discard_blocks, "Max number of blocks to discard at once");