4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/vdev_trim.h>
35 #include <sys/fs/zfs.h>
37 #include <linux/blkpg.h>
38 #include <linux/msdos_fs.h>
39 #include <linux/vfs_compat.h>
41 typedef struct vdev_disk
{
42 struct block_device
*vd_bdev
;
47 * Unique identifier for the exclusive vdev holder.
49 static void *zfs_vdev_holder
= VDEV_HOLDER
;
52 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
53 * device is missing. The missing path may be transient since the links
54 * can be briefly removed and recreated in response to udev events.
56 static unsigned zfs_vdev_open_timeout_ms
= 1000;
59 * Size of the "reserved" partition, in blocks.
61 #define EFI_MIN_RESV_SIZE (16 * 1024)
64 * Virtual device vector for disks.
66 typedef struct dio_request
{
67 zio_t
*dr_zio
; /* Parent ZIO */
68 atomic_t dr_ref
; /* References */
69 int dr_error
; /* Bio error */
70 int dr_bio_count
; /* Count of bio's */
71 struct bio
*dr_bio
[0]; /* Attached bio's */
75 vdev_bdev_mode(spa_mode_t spa_mode
)
79 if (spa_mode
& SPA_MODE_READ
)
82 if (spa_mode
& SPA_MODE_WRITE
)
89 * Returns the usable capacity (in bytes) for the partition or disk.
92 bdev_capacity(struct block_device
*bdev
)
94 return (i_size_read(bdev
->bd_inode
));
97 #if !defined(HAVE_BDEV_WHOLE)
98 static inline struct block_device
*
99 bdev_whole(struct block_device
*bdev
)
101 return (bdev
->bd_contains
);
106 * Returns the maximum expansion capacity of the block device (in bytes).
108 * It is possible to expand a vdev when it has been created as a wholedisk
109 * and the containing block device has increased in capacity. Or when the
110 * partition containing the pool has been manually increased in size.
112 * This function is only responsible for calculating the potential expansion
113 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
114 * responsible for verifying the expected partition layout in the wholedisk
115 * case, and updating the partition table if appropriate. Once the partition
116 * size has been increased the additional capacity will be visible using
119 * The returned maximum expansion capacity is always expected to be larger, or
120 * at the very least equal, to its usable capacity to prevent overestimating
121 * the pool expandsize.
124 bdev_max_capacity(struct block_device
*bdev
, uint64_t wholedisk
)
129 if (wholedisk
&& bdev
!= bdev_whole(bdev
)) {
131 * When reporting maximum expansion capacity for a wholedisk
132 * deduct any capacity which is expected to be lost due to
133 * alignment restrictions. Over reporting this value isn't
134 * harmful and would only result in slightly less capacity
135 * than expected post expansion.
136 * The estimated available space may be slightly smaller than
137 * bdev_capacity() for devices where the number of sectors is
138 * not a multiple of the alignment size and the partition layout
139 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
140 * "reserved" EFI partition: in such cases return the device
143 available
= i_size_read(bdev_whole(bdev
)->bd_inode
) -
144 ((EFI_MIN_RESV_SIZE
+ NEW_START_BLOCK
+
145 PARTITION_END_ALIGNMENT
) << SECTOR_BITS
);
146 psize
= MAX(available
, bdev_capacity(bdev
));
148 psize
= bdev_capacity(bdev
);
155 vdev_disk_error(zio_t
*zio
)
158 * This function can be called in interrupt context, for instance while
159 * handling IRQs coming from a misbehaving disk device; use printk()
160 * which is safe from any context.
162 printk(KERN_WARNING
"zio pool=%s vdev=%s error=%d type=%d "
163 "offset=%llu size=%llu flags=%x\n", spa_name(zio
->io_spa
),
164 zio
->io_vd
->vdev_path
, zio
->io_error
, zio
->io_type
,
165 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
170 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
171 uint64_t *logical_ashift
, uint64_t *physical_ashift
)
173 struct block_device
*bdev
;
174 fmode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
));
175 hrtime_t timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
);
178 /* Must have a pathname and it must be absolute. */
179 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
180 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
181 vdev_dbgmsg(v
, "invalid vdev_path");
182 return (SET_ERROR(EINVAL
));
186 * Reopen the device if it is currently open. When expanding a
187 * partition force re-scanning the partition table if userland
188 * did not take care of this already. We need to do this while closed
189 * in order to get an accurate updated block device size. Then
190 * since udev may need to recreate the device links increase the
191 * open retry timeout before reporting the device as unavailable.
195 char disk_name
[BDEVNAME_SIZE
+ 6] = "/dev/";
196 boolean_t reread_part
= B_FALSE
;
198 rw_enter(&vd
->vd_lock
, RW_WRITER
);
203 if (v
->vdev_expanding
&& bdev
!= bdev_whole(bdev
)) {
204 bdevname(bdev_whole(bdev
), disk_name
+ 5);
206 * If userland has BLKPG_RESIZE_PARTITION,
207 * then it should have updated the partition
208 * table already. We can detect this by
209 * comparing our current physical size
210 * with that of the device. If they are
211 * the same, then we must not have
212 * BLKPG_RESIZE_PARTITION or it failed to
213 * update the partition table online. We
214 * fallback to rescanning the partition
215 * table from the kernel below. However,
216 * if the capacity already reflects the
217 * updated partition, then we skip
218 * rescanning the partition table here.
220 if (v
->vdev_psize
== bdev_capacity(bdev
))
221 reread_part
= B_TRUE
;
224 blkdev_put(bdev
, mode
| FMODE_EXCL
);
228 bdev
= blkdev_get_by_path(disk_name
, mode
| FMODE_EXCL
,
231 int error
= vdev_bdev_reread_part(bdev
);
232 blkdev_put(bdev
, mode
| FMODE_EXCL
);
235 zfs_vdev_open_timeout_ms
* 2);
240 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
242 rw_init(&vd
->vd_lock
, NULL
, RW_DEFAULT
, NULL
);
243 rw_enter(&vd
->vd_lock
, RW_WRITER
);
247 * Devices are always opened by the path provided at configuration
248 * time. This means that if the provided path is a udev by-id path
249 * then drives may be re-cabled without an issue. If the provided
250 * path is a udev by-path path, then the physical location information
251 * will be preserved. This can be critical for more complicated
252 * configurations where drives are located in specific physical
253 * locations to maximize the systems tolerance to component failure.
255 * Alternatively, you can provide your own udev rule to flexibly map
256 * the drives as you see fit. It is not advised that you use the
257 * /dev/[hd]d devices which may be reordered due to probing order.
258 * Devices in the wrong locations will be detected by the higher
259 * level vdev validation.
261 * The specified paths may be briefly removed and recreated in
262 * response to udev events. This should be exceptionally unlikely
263 * because the zpool command makes every effort to verify these paths
264 * have already settled prior to reaching this point. Therefore,
265 * a ENOENT failure at this point is highly likely to be transient
266 * and it is reasonable to sleep and retry before giving up. In
267 * practice delays have been observed to be on the order of 100ms.
269 hrtime_t start
= gethrtime();
270 bdev
= ERR_PTR(-ENXIO
);
271 while (IS_ERR(bdev
) && ((gethrtime() - start
) < timeout
)) {
272 bdev
= blkdev_get_by_path(v
->vdev_path
, mode
| FMODE_EXCL
,
274 if (unlikely(PTR_ERR(bdev
) == -ENOENT
)) {
275 schedule_timeout(MSEC_TO_TICK(10));
276 } else if (IS_ERR(bdev
)) {
282 int error
= -PTR_ERR(bdev
);
283 vdev_dbgmsg(v
, "open error=%d timeout=%llu/%llu", error
,
284 (u_longlong_t
)(gethrtime() - start
),
285 (u_longlong_t
)timeout
);
288 rw_exit(&vd
->vd_lock
);
289 return (SET_ERROR(error
));
293 rw_exit(&vd
->vd_lock
);
296 struct request_queue
*q
= bdev_get_queue(vd
->vd_bdev
);
298 /* Determine the physical block size */
299 int physical_block_size
= bdev_physical_block_size(vd
->vd_bdev
);
301 /* Determine the logical block size */
302 int logical_block_size
= bdev_logical_block_size(vd
->vd_bdev
);
304 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
305 v
->vdev_nowritecache
= B_FALSE
;
307 /* Set when device reports it supports TRIM. */
308 v
->vdev_has_trim
= !!blk_queue_discard(q
);
310 /* Set when device reports it supports secure TRIM. */
311 v
->vdev_has_securetrim
= !!blk_queue_discard_secure(q
);
313 /* Inform the ZIO pipeline that we are non-rotational */
314 v
->vdev_nonrot
= blk_queue_nonrot(q
);
316 /* Physical volume size in bytes for the partition */
317 *psize
= bdev_capacity(vd
->vd_bdev
);
319 /* Physical volume size in bytes including possible expansion space */
320 *max_psize
= bdev_max_capacity(vd
->vd_bdev
, v
->vdev_wholedisk
);
322 /* Based on the minimum sector size set the block size */
323 *physical_ashift
= highbit64(MAX(physical_block_size
,
324 SPA_MINBLOCKSIZE
)) - 1;
326 *logical_ashift
= highbit64(MAX(logical_block_size
,
327 SPA_MINBLOCKSIZE
)) - 1;
333 vdev_disk_close(vdev_t
*v
)
335 vdev_disk_t
*vd
= v
->vdev_tsd
;
337 if (v
->vdev_reopening
|| vd
== NULL
)
340 if (vd
->vd_bdev
!= NULL
) {
341 blkdev_put(vd
->vd_bdev
,
342 vdev_bdev_mode(spa_mode(v
->vdev_spa
)) | FMODE_EXCL
);
345 rw_destroy(&vd
->vd_lock
);
346 kmem_free(vd
, sizeof (vdev_disk_t
));
350 static dio_request_t
*
351 vdev_disk_dio_alloc(int bio_count
)
353 dio_request_t
*dr
= kmem_zalloc(sizeof (dio_request_t
) +
354 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
355 atomic_set(&dr
->dr_ref
, 0);
356 dr
->dr_bio_count
= bio_count
;
359 for (int i
= 0; i
< dr
->dr_bio_count
; i
++)
360 dr
->dr_bio
[i
] = NULL
;
366 vdev_disk_dio_free(dio_request_t
*dr
)
370 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
372 bio_put(dr
->dr_bio
[i
]);
374 kmem_free(dr
, sizeof (dio_request_t
) +
375 sizeof (struct bio
*) * dr
->dr_bio_count
);
379 vdev_disk_dio_get(dio_request_t
*dr
)
381 atomic_inc(&dr
->dr_ref
);
385 vdev_disk_dio_put(dio_request_t
*dr
)
387 int rc
= atomic_dec_return(&dr
->dr_ref
);
390 * Free the dio_request when the last reference is dropped and
391 * ensure zio_interpret is called only once with the correct zio
394 zio_t
*zio
= dr
->dr_zio
;
395 int error
= dr
->dr_error
;
397 vdev_disk_dio_free(dr
);
400 zio
->io_error
= error
;
401 ASSERT3S(zio
->io_error
, >=, 0);
403 vdev_disk_error(zio
);
405 zio_delay_interrupt(zio
);
412 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, error
)
414 dio_request_t
*dr
= bio
->bi_private
;
417 if (dr
->dr_error
== 0) {
418 #ifdef HAVE_1ARG_BIO_END_IO_T
419 dr
->dr_error
= BIO_END_IO_ERROR(bio
);
422 dr
->dr_error
= -(error
);
423 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
428 /* Drop reference acquired by __vdev_disk_physio */
429 rc
= vdev_disk_dio_put(dr
);
433 vdev_submit_bio_impl(struct bio
*bio
)
435 #ifdef HAVE_1ARG_SUBMIT_BIO
443 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
444 * replace it with preempt_schedule under the following condition:
446 #if defined(CONFIG_ARM64) && \
447 defined(CONFIG_PREEMPTION) && \
448 defined(CONFIG_BLK_CGROUP)
449 #define preempt_schedule_notrace(x) preempt_schedule(x)
452 #ifdef HAVE_BIO_SET_DEV
453 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
455 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
456 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
457 * As a side effect the function was converted to GPL-only. Define our
458 * own version when needed which uses rcu_read_lock_sched().
460 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
462 vdev_blkg_tryget(struct blkcg_gq
*blkg
)
464 struct percpu_ref
*ref
= &blkg
->refcnt
;
465 unsigned long __percpu
*count
;
468 rcu_read_lock_sched();
470 if (__ref_is_percpu(ref
, &count
)) {
471 this_cpu_inc(*count
);
474 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
475 rc
= atomic_long_inc_not_zero(&ref
->data
->count
);
477 rc
= atomic_long_inc_not_zero(&ref
->count
);
481 rcu_read_unlock_sched();
485 #elif defined(HAVE_BLKG_TRYGET)
486 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
489 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
490 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
491 * the entire macro. Provide a minimal version which always assigns the
492 * request queue's root_blkg to the bio.
495 vdev_bio_associate_blkg(struct bio
*bio
)
497 struct request_queue
*q
= bio
->bi_disk
->queue
;
499 ASSERT3P(q
, !=, NULL
);
500 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
502 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
503 bio
->bi_blkg
= q
->root_blkg
;
505 #define bio_associate_blkg vdev_bio_associate_blkg
509 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
512 bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
516 #endif /* HAVE_BIO_SET_DEV */
519 vdev_submit_bio(struct bio
*bio
)
521 struct bio_list
*bio_list
= current
->bio_list
;
522 current
->bio_list
= NULL
;
523 vdev_submit_bio_impl(bio
);
524 current
->bio_list
= bio_list
;
528 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
,
529 size_t io_size
, uint64_t io_offset
, int rw
, int flags
)
537 struct blk_plug plug
;
540 * Accessing outside the block device is never allowed.
542 if (io_offset
+ io_size
> bdev
->bd_inode
->i_size
) {
543 vdev_dbgmsg(zio
->io_vd
,
544 "Illegal access %llu size %llu, device size %llu",
545 io_offset
, io_size
, i_size_read(bdev
->bd_inode
));
546 return (SET_ERROR(EIO
));
550 dr
= vdev_disk_dio_alloc(bio_count
);
552 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
553 bio_set_flags_failfast(bdev
, &flags
);
558 * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
559 * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
560 * can cover at least 128KB and at most 1MB. When the required number
561 * of iovec's exceeds this, we are forced to break the IO in multiple
562 * bio's and wait for them all to complete. This is likely if the
563 * recordsize property is increased beyond 1MB. The default
564 * bio_count=16 should typically accommodate the maximum-size zio of
569 bio_offset
= io_offset
;
571 for (int i
= 0; i
<= dr
->dr_bio_count
; i
++) {
573 /* Finished constructing bio's for given buffer */
578 * If additional bio's are required, we have to retry, but
579 * this should be rare - see the comment above.
581 if (dr
->dr_bio_count
== i
) {
582 vdev_disk_dio_free(dr
);
587 /* bio_alloc() with __GFP_WAIT never returns NULL */
588 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
589 MIN(abd_nr_pages_off(zio
->io_abd
, bio_size
, abd_offset
),
591 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
592 vdev_disk_dio_free(dr
);
593 return (SET_ERROR(ENOMEM
));
596 /* Matching put called by vdev_disk_physio_completion */
597 vdev_disk_dio_get(dr
);
599 bio_set_dev(dr
->dr_bio
[i
], bdev
);
600 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
601 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
602 dr
->dr_bio
[i
]->bi_private
= dr
;
603 bio_set_op_attrs(dr
->dr_bio
[i
], rw
, flags
);
605 /* Remaining size is returned to become the new size */
606 bio_size
= abd_bio_map_off(dr
->dr_bio
[i
], zio
->io_abd
,
607 bio_size
, abd_offset
);
609 /* Advance in buffer and construct another bio if needed */
610 abd_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
611 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
614 /* Extra reference to protect dio_request during vdev_submit_bio */
615 vdev_disk_dio_get(dr
);
617 if (dr
->dr_bio_count
> 1)
618 blk_start_plug(&plug
);
620 /* Submit all bio's associated with this dio */
621 for (int i
= 0; i
< dr
->dr_bio_count
; i
++) {
623 vdev_submit_bio(dr
->dr_bio
[i
]);
626 if (dr
->dr_bio_count
> 1)
627 blk_finish_plug(&plug
);
629 (void) vdev_disk_dio_put(dr
);
634 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, error
)
636 zio_t
*zio
= bio
->bi_private
;
637 #ifdef HAVE_1ARG_BIO_END_IO_T
638 zio
->io_error
= BIO_END_IO_ERROR(bio
);
640 zio
->io_error
= -error
;
643 if (zio
->io_error
&& (zio
->io_error
== EOPNOTSUPP
))
644 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
647 ASSERT3S(zio
->io_error
, >=, 0);
649 vdev_disk_error(zio
);
654 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
656 struct request_queue
*q
;
659 q
= bdev_get_queue(bdev
);
661 return (SET_ERROR(ENXIO
));
663 bio
= bio_alloc(GFP_NOIO
, 0);
664 /* bio_alloc() with __GFP_WAIT never returns NULL */
665 if (unlikely(bio
== NULL
))
666 return (SET_ERROR(ENOMEM
));
668 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
669 bio
->bi_private
= zio
;
670 bio_set_dev(bio
, bdev
);
672 vdev_submit_bio(bio
);
673 invalidate_bdev(bdev
);
679 vdev_disk_io_start(zio_t
*zio
)
681 vdev_t
*v
= zio
->io_vd
;
682 vdev_disk_t
*vd
= v
->vdev_tsd
;
683 unsigned long trim_flags
= 0;
687 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
688 * Nothing to be done here but return failure.
691 zio
->io_error
= ENXIO
;
696 rw_enter(&vd
->vd_lock
, RW_READER
);
699 * If the vdev is closed, it's likely due to a failed reopen and is
700 * in the UNAVAIL state. Nothing to be done here but return failure.
702 if (vd
->vd_bdev
== NULL
) {
703 rw_exit(&vd
->vd_lock
);
704 zio
->io_error
= ENXIO
;
709 switch (zio
->io_type
) {
712 if (!vdev_readable(v
)) {
713 rw_exit(&vd
->vd_lock
);
714 zio
->io_error
= SET_ERROR(ENXIO
);
719 switch (zio
->io_cmd
) {
720 case DKIOCFLUSHWRITECACHE
:
722 if (zfs_nocacheflush
)
725 if (v
->vdev_nowritecache
) {
726 zio
->io_error
= SET_ERROR(ENOTSUP
);
730 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
732 rw_exit(&vd
->vd_lock
);
736 zio
->io_error
= error
;
741 zio
->io_error
= SET_ERROR(ENOTSUP
);
744 rw_exit(&vd
->vd_lock
);
756 #if defined(BLKDEV_DISCARD_SECURE)
757 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
)
758 trim_flags
|= BLKDEV_DISCARD_SECURE
;
760 zio
->io_error
= -blkdev_issue_discard(vd
->vd_bdev
,
761 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
,
764 rw_exit(&vd
->vd_lock
);
769 rw_exit(&vd
->vd_lock
);
770 zio
->io_error
= SET_ERROR(ENOTSUP
);
775 zio
->io_target_timestamp
= zio_handle_io_delay(zio
);
776 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
,
777 zio
->io_size
, zio
->io_offset
, rw
, 0);
778 rw_exit(&vd
->vd_lock
);
781 zio
->io_error
= error
;
788 vdev_disk_io_done(zio_t
*zio
)
791 * If the device returned EIO, we revalidate the media. If it is
792 * determined the media has changed this triggers the asynchronous
793 * removal of the device from the configuration.
795 if (zio
->io_error
== EIO
) {
796 vdev_t
*v
= zio
->io_vd
;
797 vdev_disk_t
*vd
= v
->vdev_tsd
;
799 if (zfs_check_media_change(vd
->vd_bdev
)) {
800 invalidate_bdev(vd
->vd_bdev
);
801 v
->vdev_remove_wanted
= B_TRUE
;
802 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
808 vdev_disk_hold(vdev_t
*vd
)
810 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
812 /* We must have a pathname, and it must be absolute. */
813 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
817 * Only prefetch path and devid info if the device has
820 if (vd
->vdev_tsd
!= NULL
)
826 vdev_disk_rele(vdev_t
*vd
)
828 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
830 /* XXX: Implement me as a vnode rele for the device */
833 vdev_ops_t vdev_disk_ops
= {
834 .vdev_op_init
= NULL
,
835 .vdev_op_fini
= NULL
,
836 .vdev_op_open
= vdev_disk_open
,
837 .vdev_op_close
= vdev_disk_close
,
838 .vdev_op_asize
= vdev_default_asize
,
839 .vdev_op_min_asize
= vdev_default_min_asize
,
840 .vdev_op_min_alloc
= NULL
,
841 .vdev_op_io_start
= vdev_disk_io_start
,
842 .vdev_op_io_done
= vdev_disk_io_done
,
843 .vdev_op_state_change
= NULL
,
844 .vdev_op_need_resilver
= NULL
,
845 .vdev_op_hold
= vdev_disk_hold
,
846 .vdev_op_rele
= vdev_disk_rele
,
847 .vdev_op_remap
= NULL
,
848 .vdev_op_xlate
= vdev_default_xlate
,
849 .vdev_op_rebuild_asize
= NULL
,
850 .vdev_op_metaslab_init
= NULL
,
851 .vdev_op_config_generate
= NULL
,
852 .vdev_op_nparity
= NULL
,
853 .vdev_op_ndisks
= NULL
,
854 .vdev_op_type
= VDEV_TYPE_DISK
, /* name of this vdev type */
855 .vdev_op_leaf
= B_TRUE
/* leaf vdev */
859 * The zfs_vdev_scheduler module option has been deprecated. Setting this
860 * value no longer has any effect. It has not yet been entirely removed
861 * to allow the module to be loaded if this option is specified in the
862 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
865 param_set_vdev_scheduler(const char *val
, zfs_kernel_param_t
*kp
)
867 int error
= param_set_charp(val
, kp
);
869 printk(KERN_INFO
"The 'zfs_vdev_scheduler' module option "
870 "is not supported.\n");
876 char *zfs_vdev_scheduler
= "unused";
877 module_param_call(zfs_vdev_scheduler
, param_set_vdev_scheduler
,
878 param_get_charp
, &zfs_vdev_scheduler
, 0644);
879 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");
882 param_set_min_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
887 error
= kstrtoull(buf
, 0, &val
);
889 return (SET_ERROR(error
));
891 if (val
< ASHIFT_MIN
|| val
> zfs_vdev_max_auto_ashift
)
892 return (SET_ERROR(-EINVAL
));
894 error
= param_set_ulong(buf
, kp
);
896 return (SET_ERROR(error
));
902 param_set_max_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
907 error
= kstrtoull(buf
, 0, &val
);
909 return (SET_ERROR(error
));
911 if (val
> ASHIFT_MAX
|| val
< zfs_vdev_min_auto_ashift
)
912 return (SET_ERROR(-EINVAL
));
914 error
= param_set_ulong(buf
, kp
);
916 return (SET_ERROR(error
));