4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/vdev_trim.h>
35 #include <sys/fs/zfs.h>
37 #include <linux/blkpg.h>
38 #include <linux/msdos_fs.h>
39 #include <linux/vfs_compat.h>
41 typedef struct vdev_disk
{
42 struct block_device
*vd_bdev
;
47 * Unique identifier for the exclusive vdev holder.
49 static void *zfs_vdev_holder
= VDEV_HOLDER
;
52 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
53 * device is missing. The missing path may be transient since the links
54 * can be briefly removed and recreated in response to udev events.
56 static unsigned zfs_vdev_open_timeout_ms
= 1000;
59 * Size of the "reserved" partition, in blocks.
61 #define EFI_MIN_RESV_SIZE (16 * 1024)
64 * Virtual device vector for disks.
66 typedef struct dio_request
{
67 zio_t
*dr_zio
; /* Parent ZIO */
68 atomic_t dr_ref
; /* References */
69 int dr_error
; /* Bio error */
70 int dr_bio_count
; /* Count of bio's */
71 struct bio
*dr_bio
[0]; /* Attached bio's */
75 vdev_bdev_mode(spa_mode_t spa_mode
)
79 if (spa_mode
& SPA_MODE_READ
)
82 if (spa_mode
& SPA_MODE_WRITE
)
89 * Returns the usable capacity (in bytes) for the partition or disk.
92 bdev_capacity(struct block_device
*bdev
)
94 return (i_size_read(bdev
->bd_inode
));
98 * Returns the maximum expansion capacity of the block device (in bytes).
100 * It is possible to expand a vdev when it has been created as a wholedisk
101 * and the containing block device has increased in capacity. Or when the
102 * partition containing the pool has been manually increased in size.
104 * This function is only responsible for calculating the potential expansion
105 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
106 * responsible for verifying the expected partition layout in the wholedisk
107 * case, and updating the partition table if appropriate. Once the partition
108 * size has been increased the additional capacity will be visible using
111 * The returned maximum expansion capacity is always expected to be larger, or
112 * at the very least equal, to its usable capacity to prevent overestimating
113 * the pool expandsize.
116 bdev_max_capacity(struct block_device
*bdev
, uint64_t wholedisk
)
121 if (wholedisk
&& bdev
->bd_part
!= NULL
&& bdev
!= bdev
->bd_contains
) {
123 * When reporting maximum expansion capacity for a wholedisk
124 * deduct any capacity which is expected to be lost due to
125 * alignment restrictions. Over reporting this value isn't
126 * harmful and would only result in slightly less capacity
127 * than expected post expansion.
128 * The estimated available space may be slightly smaller than
129 * bdev_capacity() for devices where the number of sectors is
130 * not a multiple of the alignment size and the partition layout
131 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
132 * "reserved" EFI partition: in such cases return the device
135 available
= i_size_read(bdev
->bd_contains
->bd_inode
) -
136 ((EFI_MIN_RESV_SIZE
+ NEW_START_BLOCK
+
137 PARTITION_END_ALIGNMENT
) << SECTOR_BITS
);
138 psize
= MAX(available
, bdev_capacity(bdev
));
140 psize
= bdev_capacity(bdev
);
147 vdev_disk_error(zio_t
*zio
)
150 * This function can be called in interrupt context, for instance while
151 * handling IRQs coming from a misbehaving disk device; use printk()
152 * which is safe from any context.
154 printk(KERN_WARNING
"zio pool=%s vdev=%s error=%d type=%d "
155 "offset=%llu size=%llu flags=%x\n", spa_name(zio
->io_spa
),
156 zio
->io_vd
->vdev_path
, zio
->io_error
, zio
->io_type
,
157 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
162 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
163 uint64_t *logical_ashift
, uint64_t *physical_ashift
)
165 struct block_device
*bdev
;
166 fmode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
));
167 hrtime_t timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
);
170 /* Must have a pathname and it must be absolute. */
171 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
172 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
173 vdev_dbgmsg(v
, "invalid vdev_path");
174 return (SET_ERROR(EINVAL
));
178 * Reopen the device if it is currently open. When expanding a
179 * partition force re-scanning the partition table if userland
180 * did not take care of this already. We need to do this while closed
181 * in order to get an accurate updated block device size. Then
182 * since udev may need to recreate the device links increase the
183 * open retry timeout before reporting the device as unavailable.
187 char disk_name
[BDEVNAME_SIZE
+ 6] = "/dev/";
188 boolean_t reread_part
= B_FALSE
;
190 rw_enter(&vd
->vd_lock
, RW_WRITER
);
195 if (v
->vdev_expanding
&& bdev
!= bdev
->bd_contains
) {
196 bdevname(bdev
->bd_contains
, disk_name
+ 5);
198 * If userland has BLKPG_RESIZE_PARTITION,
199 * then it should have updated the partition
200 * table already. We can detect this by
201 * comparing our current physical size
202 * with that of the device. If they are
203 * the same, then we must not have
204 * BLKPG_RESIZE_PARTITION or it failed to
205 * update the partition table online. We
206 * fallback to rescanning the partition
207 * table from the kernel below. However,
208 * if the capacity already reflects the
209 * updated partition, then we skip
210 * rescanning the partition table here.
212 if (v
->vdev_psize
== bdev_capacity(bdev
))
213 reread_part
= B_TRUE
;
216 blkdev_put(bdev
, mode
| FMODE_EXCL
);
220 bdev
= blkdev_get_by_path(disk_name
, mode
| FMODE_EXCL
,
223 int error
= vdev_bdev_reread_part(bdev
);
224 blkdev_put(bdev
, mode
| FMODE_EXCL
);
227 zfs_vdev_open_timeout_ms
* 2);
232 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
234 rw_init(&vd
->vd_lock
, NULL
, RW_DEFAULT
, NULL
);
235 rw_enter(&vd
->vd_lock
, RW_WRITER
);
239 * Devices are always opened by the path provided at configuration
240 * time. This means that if the provided path is a udev by-id path
241 * then drives may be re-cabled without an issue. If the provided
242 * path is a udev by-path path, then the physical location information
243 * will be preserved. This can be critical for more complicated
244 * configurations where drives are located in specific physical
245 * locations to maximize the systems tolerance to component failure.
247 * Alternatively, you can provide your own udev rule to flexibly map
248 * the drives as you see fit. It is not advised that you use the
249 * /dev/[hd]d devices which may be reordered due to probing order.
250 * Devices in the wrong locations will be detected by the higher
251 * level vdev validation.
253 * The specified paths may be briefly removed and recreated in
254 * response to udev events. This should be exceptionally unlikely
255 * because the zpool command makes every effort to verify these paths
256 * have already settled prior to reaching this point. Therefore,
257 * a ENOENT failure at this point is highly likely to be transient
258 * and it is reasonable to sleep and retry before giving up. In
259 * practice delays have been observed to be on the order of 100ms.
261 hrtime_t start
= gethrtime();
262 bdev
= ERR_PTR(-ENXIO
);
263 while (IS_ERR(bdev
) && ((gethrtime() - start
) < timeout
)) {
264 bdev
= blkdev_get_by_path(v
->vdev_path
, mode
| FMODE_EXCL
,
266 if (unlikely(PTR_ERR(bdev
) == -ENOENT
)) {
267 schedule_timeout(MSEC_TO_TICK(10));
268 } else if (IS_ERR(bdev
)) {
274 int error
= -PTR_ERR(bdev
);
275 vdev_dbgmsg(v
, "open error=%d timeout=%llu/%llu", error
,
276 (u_longlong_t
)(gethrtime() - start
),
277 (u_longlong_t
)timeout
);
280 rw_exit(&vd
->vd_lock
);
281 return (SET_ERROR(error
));
285 rw_exit(&vd
->vd_lock
);
288 struct request_queue
*q
= bdev_get_queue(vd
->vd_bdev
);
290 /* Determine the physical block size */
291 int physical_block_size
= bdev_physical_block_size(vd
->vd_bdev
);
293 /* Determine the logical block size */
294 int logical_block_size
= bdev_logical_block_size(vd
->vd_bdev
);
296 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
297 v
->vdev_nowritecache
= B_FALSE
;
299 /* Set when device reports it supports TRIM. */
300 v
->vdev_has_trim
= !!blk_queue_discard(q
);
302 /* Set when device reports it supports secure TRIM. */
303 v
->vdev_has_securetrim
= !!blk_queue_discard_secure(q
);
305 /* Inform the ZIO pipeline that we are non-rotational */
306 v
->vdev_nonrot
= blk_queue_nonrot(q
);
308 /* Physical volume size in bytes for the partition */
309 *psize
= bdev_capacity(vd
->vd_bdev
);
311 /* Physical volume size in bytes including possible expansion space */
312 *max_psize
= bdev_max_capacity(vd
->vd_bdev
, v
->vdev_wholedisk
);
314 /* Based on the minimum sector size set the block size */
315 *physical_ashift
= highbit64(MAX(physical_block_size
,
316 SPA_MINBLOCKSIZE
)) - 1;
318 *logical_ashift
= highbit64(MAX(logical_block_size
,
319 SPA_MINBLOCKSIZE
)) - 1;
325 vdev_disk_close(vdev_t
*v
)
327 vdev_disk_t
*vd
= v
->vdev_tsd
;
329 if (v
->vdev_reopening
|| vd
== NULL
)
332 if (vd
->vd_bdev
!= NULL
) {
333 blkdev_put(vd
->vd_bdev
,
334 vdev_bdev_mode(spa_mode(v
->vdev_spa
)) | FMODE_EXCL
);
337 rw_destroy(&vd
->vd_lock
);
338 kmem_free(vd
, sizeof (vdev_disk_t
));
342 static dio_request_t
*
343 vdev_disk_dio_alloc(int bio_count
)
348 dr
= kmem_zalloc(sizeof (dio_request_t
) +
349 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
351 atomic_set(&dr
->dr_ref
, 0);
352 dr
->dr_bio_count
= bio_count
;
355 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
356 dr
->dr_bio
[i
] = NULL
;
363 vdev_disk_dio_free(dio_request_t
*dr
)
367 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
369 bio_put(dr
->dr_bio
[i
]);
371 kmem_free(dr
, sizeof (dio_request_t
) +
372 sizeof (struct bio
*) * dr
->dr_bio_count
);
376 vdev_disk_dio_get(dio_request_t
*dr
)
378 atomic_inc(&dr
->dr_ref
);
382 vdev_disk_dio_put(dio_request_t
*dr
)
384 int rc
= atomic_dec_return(&dr
->dr_ref
);
387 * Free the dio_request when the last reference is dropped and
388 * ensure zio_interpret is called only once with the correct zio
391 zio_t
*zio
= dr
->dr_zio
;
392 int error
= dr
->dr_error
;
394 vdev_disk_dio_free(dr
);
397 zio
->io_error
= error
;
398 ASSERT3S(zio
->io_error
, >=, 0);
400 vdev_disk_error(zio
);
402 zio_delay_interrupt(zio
);
409 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, error
)
411 dio_request_t
*dr
= bio
->bi_private
;
414 if (dr
->dr_error
== 0) {
415 #ifdef HAVE_1ARG_BIO_END_IO_T
416 dr
->dr_error
= BIO_END_IO_ERROR(bio
);
419 dr
->dr_error
= -(error
);
420 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
425 /* Drop reference acquired by __vdev_disk_physio */
426 rc
= vdev_disk_dio_put(dr
);
430 vdev_submit_bio_impl(struct bio
*bio
)
432 #ifdef HAVE_1ARG_SUBMIT_BIO
440 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
441 * replace it with preempt_schedule under the following condition:
443 #if defined(CONFIG_ARM64) && \
444 defined(CONFIG_PREEMPTION) && \
445 defined(CONFIG_BLK_CGROUP)
446 #define preempt_schedule_notrace(x) preempt_schedule(x)
449 #ifdef HAVE_BIO_SET_DEV
450 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
452 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
453 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
454 * As a side effect the function was converted to GPL-only. Define our
455 * own version when needed which uses rcu_read_lock_sched().
457 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
459 vdev_blkg_tryget(struct blkcg_gq
*blkg
)
461 struct percpu_ref
*ref
= &blkg
->refcnt
;
462 unsigned long __percpu
*count
;
465 rcu_read_lock_sched();
467 if (__ref_is_percpu(ref
, &count
)) {
468 this_cpu_inc(*count
);
471 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
472 rc
= atomic_long_inc_not_zero(&ref
->data
->count
);
474 rc
= atomic_long_inc_not_zero(&ref
->count
);
478 rcu_read_unlock_sched();
482 #elif defined(HAVE_BLKG_TRYGET)
483 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
486 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
487 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
488 * the entire macro. Provide a minimal version which always assigns the
489 * request queue's root_blkg to the bio.
492 vdev_bio_associate_blkg(struct bio
*bio
)
494 struct request_queue
*q
= bio
->bi_disk
->queue
;
496 ASSERT3P(q
, !=, NULL
);
497 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
499 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
500 bio
->bi_blkg
= q
->root_blkg
;
502 #define bio_associate_blkg vdev_bio_associate_blkg
506 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
509 bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
513 #endif /* HAVE_BIO_SET_DEV */
516 vdev_submit_bio(struct bio
*bio
)
518 struct bio_list
*bio_list
= current
->bio_list
;
519 current
->bio_list
= NULL
;
520 vdev_submit_bio_impl(bio
);
521 current
->bio_list
= bio_list
;
525 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
,
526 size_t io_size
, uint64_t io_offset
, int rw
, int flags
)
531 int bio_size
, bio_count
= 16;
532 int i
= 0, error
= 0;
533 struct blk_plug plug
;
536 * Accessing outside the block device is never allowed.
538 if (io_offset
+ io_size
> bdev
->bd_inode
->i_size
) {
539 vdev_dbgmsg(zio
->io_vd
,
540 "Illegal access %llu size %llu, device size %llu",
541 io_offset
, io_size
, i_size_read(bdev
->bd_inode
));
542 return (SET_ERROR(EIO
));
546 dr
= vdev_disk_dio_alloc(bio_count
);
548 return (SET_ERROR(ENOMEM
));
550 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
551 bio_set_flags_failfast(bdev
, &flags
);
556 * When the IO size exceeds the maximum bio size for the request
557 * queue we are forced to break the IO in multiple bio's and wait
558 * for them all to complete. Ideally, all pool users will set
559 * their volume block size to match the maximum request size and
560 * the common case will be one bio per vdev IO request.
564 bio_offset
= io_offset
;
566 for (i
= 0; i
<= dr
->dr_bio_count
; i
++) {
568 /* Finished constructing bio's for given buffer */
573 * By default only 'bio_count' bio's per dio are allowed.
574 * However, if we find ourselves in a situation where more
575 * are needed we allocate a larger dio and warn the user.
577 if (dr
->dr_bio_count
== i
) {
578 vdev_disk_dio_free(dr
);
583 /* bio_alloc() with __GFP_WAIT never returns NULL */
584 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
585 MIN(abd_nr_pages_off(zio
->io_abd
, bio_size
, abd_offset
),
587 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
588 vdev_disk_dio_free(dr
);
589 return (SET_ERROR(ENOMEM
));
592 /* Matching put called by vdev_disk_physio_completion */
593 vdev_disk_dio_get(dr
);
595 bio_set_dev(dr
->dr_bio
[i
], bdev
);
596 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
597 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
598 dr
->dr_bio
[i
]->bi_private
= dr
;
599 bio_set_op_attrs(dr
->dr_bio
[i
], rw
, flags
);
601 /* Remaining size is returned to become the new size */
602 bio_size
= abd_bio_map_off(dr
->dr_bio
[i
], zio
->io_abd
,
603 bio_size
, abd_offset
);
605 /* Advance in buffer and construct another bio if needed */
606 abd_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
607 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
610 /* Extra reference to protect dio_request during vdev_submit_bio */
611 vdev_disk_dio_get(dr
);
613 if (dr
->dr_bio_count
> 1)
614 blk_start_plug(&plug
);
616 /* Submit all bio's associated with this dio */
617 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
619 vdev_submit_bio(dr
->dr_bio
[i
]);
621 if (dr
->dr_bio_count
> 1)
622 blk_finish_plug(&plug
);
624 (void) vdev_disk_dio_put(dr
);
629 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, error
)
631 zio_t
*zio
= bio
->bi_private
;
632 #ifdef HAVE_1ARG_BIO_END_IO_T
633 zio
->io_error
= BIO_END_IO_ERROR(bio
);
635 zio
->io_error
= -error
;
638 if (zio
->io_error
&& (zio
->io_error
== EOPNOTSUPP
))
639 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
642 ASSERT3S(zio
->io_error
, >=, 0);
644 vdev_disk_error(zio
);
649 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
651 struct request_queue
*q
;
654 q
= bdev_get_queue(bdev
);
656 return (SET_ERROR(ENXIO
));
658 bio
= bio_alloc(GFP_NOIO
, 0);
659 /* bio_alloc() with __GFP_WAIT never returns NULL */
660 if (unlikely(bio
== NULL
))
661 return (SET_ERROR(ENOMEM
));
663 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
664 bio
->bi_private
= zio
;
665 bio_set_dev(bio
, bdev
);
667 vdev_submit_bio(bio
);
668 invalidate_bdev(bdev
);
674 vdev_disk_io_start(zio_t
*zio
)
676 vdev_t
*v
= zio
->io_vd
;
677 vdev_disk_t
*vd
= v
->vdev_tsd
;
678 unsigned long trim_flags
= 0;
682 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
683 * Nothing to be done here but return failure.
686 zio
->io_error
= ENXIO
;
691 rw_enter(&vd
->vd_lock
, RW_READER
);
694 * If the vdev is closed, it's likely due to a failed reopen and is
695 * in the UNAVAIL state. Nothing to be done here but return failure.
697 if (vd
->vd_bdev
== NULL
) {
698 rw_exit(&vd
->vd_lock
);
699 zio
->io_error
= ENXIO
;
704 switch (zio
->io_type
) {
707 if (!vdev_readable(v
)) {
708 rw_exit(&vd
->vd_lock
);
709 zio
->io_error
= SET_ERROR(ENXIO
);
714 switch (zio
->io_cmd
) {
715 case DKIOCFLUSHWRITECACHE
:
717 if (zfs_nocacheflush
)
720 if (v
->vdev_nowritecache
) {
721 zio
->io_error
= SET_ERROR(ENOTSUP
);
725 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
727 rw_exit(&vd
->vd_lock
);
731 zio
->io_error
= error
;
736 zio
->io_error
= SET_ERROR(ENOTSUP
);
739 rw_exit(&vd
->vd_lock
);
751 #if defined(BLKDEV_DISCARD_SECURE)
752 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
)
753 trim_flags
|= BLKDEV_DISCARD_SECURE
;
755 zio
->io_error
= -blkdev_issue_discard(vd
->vd_bdev
,
756 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
,
759 rw_exit(&vd
->vd_lock
);
764 rw_exit(&vd
->vd_lock
);
765 zio
->io_error
= SET_ERROR(ENOTSUP
);
770 zio
->io_target_timestamp
= zio_handle_io_delay(zio
);
771 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
,
772 zio
->io_size
, zio
->io_offset
, rw
, 0);
773 rw_exit(&vd
->vd_lock
);
776 zio
->io_error
= error
;
783 vdev_disk_io_done(zio_t
*zio
)
786 * If the device returned EIO, we revalidate the media. If it is
787 * determined the media has changed this triggers the asynchronous
788 * removal of the device from the configuration.
790 if (zio
->io_error
== EIO
) {
791 vdev_t
*v
= zio
->io_vd
;
792 vdev_disk_t
*vd
= v
->vdev_tsd
;
794 if (zfs_check_media_change(vd
->vd_bdev
)) {
795 invalidate_bdev(vd
->vd_bdev
);
796 v
->vdev_remove_wanted
= B_TRUE
;
797 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
803 vdev_disk_hold(vdev_t
*vd
)
805 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
807 /* We must have a pathname, and it must be absolute. */
808 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
812 * Only prefetch path and devid info if the device has
815 if (vd
->vdev_tsd
!= NULL
)
821 vdev_disk_rele(vdev_t
*vd
)
823 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
825 /* XXX: Implement me as a vnode rele for the device */
828 vdev_ops_t vdev_disk_ops
= {
829 .vdev_op_open
= vdev_disk_open
,
830 .vdev_op_close
= vdev_disk_close
,
831 .vdev_op_asize
= vdev_default_asize
,
832 .vdev_op_io_start
= vdev_disk_io_start
,
833 .vdev_op_io_done
= vdev_disk_io_done
,
834 .vdev_op_state_change
= NULL
,
835 .vdev_op_need_resilver
= NULL
,
836 .vdev_op_hold
= vdev_disk_hold
,
837 .vdev_op_rele
= vdev_disk_rele
,
838 .vdev_op_remap
= NULL
,
839 .vdev_op_xlate
= vdev_default_xlate
,
840 .vdev_op_type
= VDEV_TYPE_DISK
, /* name of this vdev type */
841 .vdev_op_leaf
= B_TRUE
/* leaf vdev */
845 * The zfs_vdev_scheduler module option has been deprecated. Setting this
846 * value no longer has any effect. It has not yet been entirely removed
847 * to allow the module to be loaded if this option is specified in the
848 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
851 param_set_vdev_scheduler(const char *val
, zfs_kernel_param_t
*kp
)
853 int error
= param_set_charp(val
, kp
);
855 printk(KERN_INFO
"The 'zfs_vdev_scheduler' module option "
856 "is not supported.\n");
862 char *zfs_vdev_scheduler
= "unused";
863 module_param_call(zfs_vdev_scheduler
, param_set_vdev_scheduler
,
864 param_get_charp
, &zfs_vdev_scheduler
, 0644);
865 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");
868 param_set_min_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
873 error
= kstrtoull(buf
, 0, &val
);
875 return (SET_ERROR(error
));
877 if (val
< ASHIFT_MIN
|| val
> zfs_vdev_max_auto_ashift
)
878 return (SET_ERROR(-EINVAL
));
880 error
= param_set_ulong(buf
, kp
);
882 return (SET_ERROR(error
));
888 param_set_max_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
893 error
= kstrtoull(buf
, 0, &val
);
895 return (SET_ERROR(error
));
897 if (val
> ASHIFT_MAX
|| val
< zfs_vdev_min_auto_ashift
)
898 return (SET_ERROR(-EINVAL
));
900 error
= param_set_ulong(buf
, kp
);
902 return (SET_ERROR(error
));