4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 * Copyright (c) 2023, 2024, Klara Inc.
30 #include <sys/zfs_context.h>
31 #include <sys/spa_impl.h>
32 #include <sys/vdev_disk.h>
33 #include <sys/vdev_impl.h>
34 #include <sys/vdev_trim.h>
36 #include <sys/fs/zfs.h>
38 #include <linux/blkpg.h>
39 #include <linux/msdos_fs.h>
40 #include <linux/vfs_compat.h>
41 #ifdef HAVE_LINUX_BLK_CGROUP_HEADER
42 #include <linux/blk-cgroup.h>
46 * Linux 6.8.x uses a bdev_handle as an instance/refcount for an underlying
47 * block_device. Since it carries the block_device inside, its convenient to
48 * just use the handle as a proxy. For pre-6.8, we just emulate this with
49 * a cast, since we don't need any of the other fields inside the handle.
51 #ifdef HAVE_BDEV_OPEN_BY_PATH
52 typedef struct bdev_handle zfs_bdev_handle_t
;
53 #define BDH_BDEV(bdh) ((bdh)->bdev)
54 #define BDH_IS_ERR(bdh) (IS_ERR(bdh))
55 #define BDH_PTR_ERR(bdh) (PTR_ERR(bdh))
56 #define BDH_ERR_PTR(err) (ERR_PTR(err))
58 typedef void zfs_bdev_handle_t
;
59 #define BDH_BDEV(bdh) ((struct block_device *)bdh)
60 #define BDH_IS_ERR(bdh) (IS_ERR(BDH_BDEV(bdh)))
61 #define BDH_PTR_ERR(bdh) (PTR_ERR(BDH_BDEV(bdh)))
62 #define BDH_ERR_PTR(err) (ERR_PTR(err))
65 typedef struct vdev_disk
{
66 zfs_bdev_handle_t
*vd_bdh
;
71 * Maximum number of segments to add to a bio (min 4). If this is higher than
72 * the maximum allowed by the device queue or the kernel itself, it will be
73 * clamped. Setting it to zero will cause the kernel's ideal size to be used.
75 uint_t zfs_vdev_disk_max_segs
= 0;
78 * Unique identifier for the exclusive vdev holder.
80 static void *zfs_vdev_holder
= VDEV_HOLDER
;
83 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
84 * device is missing. The missing path may be transient since the links
85 * can be briefly removed and recreated in response to udev events.
87 static uint_t zfs_vdev_open_timeout_ms
= 1000;
90 * Size of the "reserved" partition, in blocks.
92 #define EFI_MIN_RESV_SIZE (16 * 1024)
95 * BIO request failfast mask.
98 static unsigned int zfs_vdev_failfast_mask
= 1;
100 #ifdef HAVE_BLK_MODE_T
105 vdev_bdev_mode(spa_mode_t spa_mode
, boolean_t exclusive
)
107 #ifdef HAVE_BLK_MODE_T
110 if (spa_mode
& SPA_MODE_READ
)
111 mode
|= BLK_OPEN_READ
;
113 if (spa_mode
& SPA_MODE_WRITE
)
114 mode
|= BLK_OPEN_WRITE
;
117 mode
|= BLK_OPEN_EXCL
;
121 if (spa_mode
& SPA_MODE_READ
)
124 if (spa_mode
& SPA_MODE_WRITE
)
135 * Returns the usable capacity (in bytes) for the partition or disk.
138 bdev_capacity(struct block_device
*bdev
)
140 return (i_size_read(bdev
->bd_inode
));
143 #if !defined(HAVE_BDEV_WHOLE)
144 static inline struct block_device
*
145 bdev_whole(struct block_device
*bdev
)
147 return (bdev
->bd_contains
);
151 #if defined(HAVE_BDEVNAME)
152 #define vdev_bdevname(bdev, name) bdevname(bdev, name)
155 vdev_bdevname(struct block_device
*bdev
, char *name
)
157 snprintf(name
, BDEVNAME_SIZE
, "%pg", bdev
);
162 * Returns the maximum expansion capacity of the block device (in bytes).
164 * It is possible to expand a vdev when it has been created as a wholedisk
165 * and the containing block device has increased in capacity. Or when the
166 * partition containing the pool has been manually increased in size.
168 * This function is only responsible for calculating the potential expansion
169 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
170 * responsible for verifying the expected partition layout in the wholedisk
171 * case, and updating the partition table if appropriate. Once the partition
172 * size has been increased the additional capacity will be visible using
175 * The returned maximum expansion capacity is always expected to be larger, or
176 * at the very least equal, to its usable capacity to prevent overestimating
177 * the pool expandsize.
180 bdev_max_capacity(struct block_device
*bdev
, uint64_t wholedisk
)
185 if (wholedisk
&& bdev
!= bdev_whole(bdev
)) {
187 * When reporting maximum expansion capacity for a wholedisk
188 * deduct any capacity which is expected to be lost due to
189 * alignment restrictions. Over reporting this value isn't
190 * harmful and would only result in slightly less capacity
191 * than expected post expansion.
192 * The estimated available space may be slightly smaller than
193 * bdev_capacity() for devices where the number of sectors is
194 * not a multiple of the alignment size and the partition layout
195 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
196 * "reserved" EFI partition: in such cases return the device
199 available
= i_size_read(bdev_whole(bdev
)->bd_inode
) -
200 ((EFI_MIN_RESV_SIZE
+ NEW_START_BLOCK
+
201 PARTITION_END_ALIGNMENT
) << SECTOR_BITS
);
202 psize
= MAX(available
, bdev_capacity(bdev
));
204 psize
= bdev_capacity(bdev
);
211 vdev_disk_error(zio_t
*zio
)
214 * This function can be called in interrupt context, for instance while
215 * handling IRQs coming from a misbehaving disk device; use printk()
216 * which is safe from any context.
218 printk(KERN_WARNING
"zio pool=%s vdev=%s error=%d type=%d "
219 "offset=%llu size=%llu flags=%llu\n", spa_name(zio
->io_spa
),
220 zio
->io_vd
->vdev_path
, zio
->io_error
, zio
->io_type
,
221 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
226 vdev_disk_kobj_evt_post(vdev_t
*v
)
228 vdev_disk_t
*vd
= v
->vdev_tsd
;
229 if (vd
&& vd
->vd_bdh
) {
230 spl_signal_kobj_evt(BDH_BDEV(vd
->vd_bdh
));
232 vdev_dbgmsg(v
, "vdev_disk_t is NULL for VDEV:%s\n",
237 static zfs_bdev_handle_t
*
238 vdev_blkdev_get_by_path(const char *path
, spa_mode_t mode
, void *holder
)
240 #if defined(HAVE_BDEV_OPEN_BY_PATH)
241 return (bdev_open_by_path(path
,
242 vdev_bdev_mode(mode
, B_TRUE
), holder
, NULL
));
243 #elif defined(HAVE_BLKDEV_GET_BY_PATH_4ARG)
244 return (blkdev_get_by_path(path
,
245 vdev_bdev_mode(mode
, B_TRUE
), holder
, NULL
));
247 return (blkdev_get_by_path(path
,
248 vdev_bdev_mode(mode
, B_TRUE
), holder
));
253 vdev_blkdev_put(zfs_bdev_handle_t
*bdh
, spa_mode_t mode
, void *holder
)
255 #if defined(HAVE_BDEV_RELEASE)
256 return (bdev_release(bdh
));
257 #elif defined(HAVE_BLKDEV_PUT_HOLDER)
258 return (blkdev_put(BDH_BDEV(bdh
), holder
));
260 return (blkdev_put(BDH_BDEV(bdh
),
261 vdev_bdev_mode(mode
, B_TRUE
)));
266 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
267 uint64_t *logical_ashift
, uint64_t *physical_ashift
)
269 zfs_bdev_handle_t
*bdh
;
270 #ifdef HAVE_BLK_MODE_T
271 blk_mode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
), B_FALSE
);
273 fmode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
), B_FALSE
);
275 hrtime_t timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
);
278 /* Must have a pathname and it must be absolute. */
279 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
280 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
281 vdev_dbgmsg(v
, "invalid vdev_path");
282 return (SET_ERROR(EINVAL
));
286 * Reopen the device if it is currently open. When expanding a
287 * partition force re-scanning the partition table if userland
288 * did not take care of this already. We need to do this while closed
289 * in order to get an accurate updated block device size. Then
290 * since udev may need to recreate the device links increase the
291 * open retry timeout before reporting the device as unavailable.
295 char disk_name
[BDEVNAME_SIZE
+ 6] = "/dev/";
296 boolean_t reread_part
= B_FALSE
;
298 rw_enter(&vd
->vd_lock
, RW_WRITER
);
303 struct block_device
*bdev
= BDH_BDEV(bdh
);
304 if (v
->vdev_expanding
&& bdev
!= bdev_whole(bdev
)) {
305 vdev_bdevname(bdev_whole(bdev
), disk_name
+ 5);
307 * If userland has BLKPG_RESIZE_PARTITION,
308 * then it should have updated the partition
309 * table already. We can detect this by
310 * comparing our current physical size
311 * with that of the device. If they are
312 * the same, then we must not have
313 * BLKPG_RESIZE_PARTITION or it failed to
314 * update the partition table online. We
315 * fallback to rescanning the partition
316 * table from the kernel below. However,
317 * if the capacity already reflects the
318 * updated partition, then we skip
319 * rescanning the partition table here.
321 if (v
->vdev_psize
== bdev_capacity(bdev
))
322 reread_part
= B_TRUE
;
325 vdev_blkdev_put(bdh
, mode
, zfs_vdev_holder
);
329 bdh
= vdev_blkdev_get_by_path(disk_name
, mode
,
331 if (!BDH_IS_ERR(bdh
)) {
333 vdev_bdev_reread_part(BDH_BDEV(bdh
));
334 vdev_blkdev_put(bdh
, mode
, zfs_vdev_holder
);
337 zfs_vdev_open_timeout_ms
* 2);
342 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
344 rw_init(&vd
->vd_lock
, NULL
, RW_DEFAULT
, NULL
);
345 rw_enter(&vd
->vd_lock
, RW_WRITER
);
349 * Devices are always opened by the path provided at configuration
350 * time. This means that if the provided path is a udev by-id path
351 * then drives may be re-cabled without an issue. If the provided
352 * path is a udev by-path path, then the physical location information
353 * will be preserved. This can be critical for more complicated
354 * configurations where drives are located in specific physical
355 * locations to maximize the systems tolerance to component failure.
357 * Alternatively, you can provide your own udev rule to flexibly map
358 * the drives as you see fit. It is not advised that you use the
359 * /dev/[hd]d devices which may be reordered due to probing order.
360 * Devices in the wrong locations will be detected by the higher
361 * level vdev validation.
363 * The specified paths may be briefly removed and recreated in
364 * response to udev events. This should be exceptionally unlikely
365 * because the zpool command makes every effort to verify these paths
366 * have already settled prior to reaching this point. Therefore,
367 * a ENOENT failure at this point is highly likely to be transient
368 * and it is reasonable to sleep and retry before giving up. In
369 * practice delays have been observed to be on the order of 100ms.
371 * When ERESTARTSYS is returned it indicates the block device is
372 * a zvol which could not be opened due to the deadlock detection
373 * logic in zvol_open(). Extend the timeout and retry the open
374 * subsequent attempts are expected to eventually succeed.
376 hrtime_t start
= gethrtime();
377 bdh
= BDH_ERR_PTR(-ENXIO
);
378 while (BDH_IS_ERR(bdh
) && ((gethrtime() - start
) < timeout
)) {
379 bdh
= vdev_blkdev_get_by_path(v
->vdev_path
, mode
,
381 if (unlikely(BDH_PTR_ERR(bdh
) == -ENOENT
)) {
383 * There is no point of waiting since device is removed
389 schedule_timeout(MSEC_TO_TICK(10));
390 } else if (unlikely(BDH_PTR_ERR(bdh
) == -ERESTARTSYS
)) {
391 timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
* 10);
393 } else if (BDH_IS_ERR(bdh
)) {
398 if (BDH_IS_ERR(bdh
)) {
399 int error
= -BDH_PTR_ERR(bdh
);
400 vdev_dbgmsg(v
, "open error=%d timeout=%llu/%llu", error
,
401 (u_longlong_t
)(gethrtime() - start
),
402 (u_longlong_t
)timeout
);
405 rw_exit(&vd
->vd_lock
);
406 return (SET_ERROR(error
));
410 rw_exit(&vd
->vd_lock
);
413 struct block_device
*bdev
= BDH_BDEV(vd
->vd_bdh
);
415 /* Determine the physical block size */
416 int physical_block_size
= bdev_physical_block_size(bdev
);
418 /* Determine the logical block size */
419 int logical_block_size
= bdev_logical_block_size(bdev
);
421 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
422 v
->vdev_nowritecache
= B_FALSE
;
424 /* Set when device reports it supports TRIM. */
425 v
->vdev_has_trim
= bdev_discard_supported(bdev
);
427 /* Set when device reports it supports secure TRIM. */
428 v
->vdev_has_securetrim
= bdev_secure_discard_supported(bdev
);
430 /* Inform the ZIO pipeline that we are non-rotational */
431 v
->vdev_nonrot
= blk_queue_nonrot(bdev_get_queue(bdev
));
433 /* Physical volume size in bytes for the partition */
434 *psize
= bdev_capacity(bdev
);
436 /* Physical volume size in bytes including possible expansion space */
437 *max_psize
= bdev_max_capacity(bdev
, v
->vdev_wholedisk
);
439 /* Based on the minimum sector size set the block size */
440 *physical_ashift
= highbit64(MAX(physical_block_size
,
441 SPA_MINBLOCKSIZE
)) - 1;
443 *logical_ashift
= highbit64(MAX(logical_block_size
,
444 SPA_MINBLOCKSIZE
)) - 1;
450 vdev_disk_close(vdev_t
*v
)
452 vdev_disk_t
*vd
= v
->vdev_tsd
;
454 if (v
->vdev_reopening
|| vd
== NULL
)
457 if (vd
->vd_bdh
!= NULL
) {
458 vdev_blkdev_put(vd
->vd_bdh
, spa_mode(v
->vdev_spa
),
462 rw_destroy(&vd
->vd_lock
);
463 kmem_free(vd
, sizeof (vdev_disk_t
));
468 vdev_submit_bio_impl(struct bio
*bio
)
470 #ifdef HAVE_1ARG_SUBMIT_BIO
471 (void) submit_bio(bio
);
473 (void) submit_bio(bio_data_dir(bio
), bio
);
478 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
479 * replace it with preempt_schedule under the following condition:
481 #if defined(CONFIG_ARM64) && \
482 defined(CONFIG_PREEMPTION) && \
483 defined(CONFIG_BLK_CGROUP)
484 #define preempt_schedule_notrace(x) preempt_schedule(x)
488 * As for the Linux 5.18 kernel bio_alloc() expects a block_device struct
489 * as an argument removing the need to set it with bio_set_dev(). This
490 * removes the need for all of the following compatibility code.
492 #if !defined(HAVE_BIO_ALLOC_4ARG)
494 #ifdef HAVE_BIO_SET_DEV
495 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
497 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
498 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
499 * As a side effect the function was converted to GPL-only. Define our
500 * own version when needed which uses rcu_read_lock_sched().
502 * The Linux 5.17 kernel split linux/blk-cgroup.h into a private and a public
503 * part, moving blkg_tryget into the private one. Define our own version.
505 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY) || !defined(HAVE_BLKG_TRYGET)
507 vdev_blkg_tryget(struct blkcg_gq
*blkg
)
509 struct percpu_ref
*ref
= &blkg
->refcnt
;
510 unsigned long __percpu
*count
;
513 rcu_read_lock_sched();
515 if (__ref_is_percpu(ref
, &count
)) {
516 this_cpu_inc(*count
);
519 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
520 rc
= atomic_long_inc_not_zero(&ref
->data
->count
);
522 rc
= atomic_long_inc_not_zero(&ref
->count
);
526 rcu_read_unlock_sched();
531 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
533 #ifdef HAVE_BIO_SET_DEV_MACRO
535 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
536 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
537 * the entire macro. Provide a minimal version which always assigns the
538 * request queue's root_blkg to the bio.
541 vdev_bio_associate_blkg(struct bio
*bio
)
543 #if defined(HAVE_BIO_BDEV_DISK)
544 struct request_queue
*q
= bio
->bi_bdev
->bd_disk
->queue
;
546 struct request_queue
*q
= bio
->bi_disk
->queue
;
549 ASSERT3P(q
, !=, NULL
);
550 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
552 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
553 bio
->bi_blkg
= q
->root_blkg
;
556 #define bio_associate_blkg vdev_bio_associate_blkg
559 vdev_bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
561 #if defined(HAVE_BIO_BDEV_DISK)
562 struct request_queue
*q
= bdev
->bd_disk
->queue
;
564 struct request_queue
*q
= bio
->bi_disk
->queue
;
566 bio_clear_flag(bio
, BIO_REMAPPED
);
567 if (bio
->bi_bdev
!= bdev
)
568 bio_clear_flag(bio
, BIO_THROTTLED
);
571 ASSERT3P(q
, !=, NULL
);
572 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
574 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
575 bio
->bi_blkg
= q
->root_blkg
;
577 #define bio_set_dev vdev_bio_set_dev
582 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
585 bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
589 #endif /* HAVE_BIO_SET_DEV */
590 #endif /* !HAVE_BIO_ALLOC_4ARG */
593 vdev_submit_bio(struct bio
*bio
)
595 struct bio_list
*bio_list
= current
->bio_list
;
596 current
->bio_list
= NULL
;
597 vdev_submit_bio_impl(bio
);
598 current
->bio_list
= bio_list
;
601 static inline struct bio
*
602 vdev_bio_alloc(struct block_device
*bdev
, gfp_t gfp_mask
,
603 unsigned short nr_vecs
)
607 #ifdef HAVE_BIO_ALLOC_4ARG
608 bio
= bio_alloc(bdev
, nr_vecs
, 0, gfp_mask
);
610 bio
= bio_alloc(gfp_mask
, nr_vecs
);
611 if (likely(bio
!= NULL
))
612 bio_set_dev(bio
, bdev
);
619 vdev_bio_max_segs(struct block_device
*bdev
)
622 * Smallest of the device max segs and the tuneable max segs. Minimum
623 * 4, so there's room to finish split pages if they come up.
625 const uint_t dev_max_segs
= queue_max_segments(bdev_get_queue(bdev
));
626 const uint_t tune_max_segs
= (zfs_vdev_disk_max_segs
> 0) ?
627 MAX(4, zfs_vdev_disk_max_segs
) : dev_max_segs
;
628 const uint_t max_segs
= MIN(tune_max_segs
, dev_max_segs
);
630 #ifdef HAVE_BIO_MAX_SEGS
631 return (bio_max_segs(max_segs
));
633 return (MIN(max_segs
, BIO_MAX_PAGES
));
638 vdev_bio_max_bytes(struct block_device
*bdev
)
640 return (queue_max_sectors(bdev_get_queue(bdev
)) << 9);
645 * Virtual block IO object (VBIO)
647 * Linux block IO (BIO) objects have a limit on how many data segments (pages)
648 * they can hold. Depending on how they're allocated and structured, a large
649 * ZIO can require more than one BIO to be submitted to the kernel, which then
650 * all have to complete before we can return the completed ZIO back to ZFS.
652 * A VBIO is a wrapper around multiple BIOs, carrying everything needed to
653 * translate a ZIO down into the kernel block layer and back again.
655 * Note that these are only used for data ZIOs (read/write). Meta-operations
656 * (flush/trim) don't need multiple BIOs and so can just make the call
660 zio_t
*vbio_zio
; /* parent zio */
662 struct block_device
*vbio_bdev
; /* blockdev to submit bios to */
664 abd_t
*vbio_abd
; /* abd carrying borrowed linear buf */
666 atomic_t vbio_ref
; /* bio refcount */
667 int vbio_error
; /* error from failed bio */
669 uint_t vbio_max_segs
; /* max segs per bio */
671 uint_t vbio_max_bytes
; /* max bytes per bio */
672 uint_t vbio_lbs_mask
; /* logical block size mask */
674 uint64_t vbio_offset
; /* start offset of next bio */
676 struct bio
*vbio_bio
; /* pointer to the current bio */
677 struct bio
*vbio_bios
; /* list of all bios */
681 vbio_alloc(zio_t
*zio
, struct block_device
*bdev
)
683 vbio_t
*vbio
= kmem_zalloc(sizeof (vbio_t
), KM_SLEEP
);
685 vbio
->vbio_zio
= zio
;
686 vbio
->vbio_bdev
= bdev
;
687 atomic_set(&vbio
->vbio_ref
, 0);
688 vbio
->vbio_max_segs
= vdev_bio_max_segs(bdev
);
689 vbio
->vbio_max_bytes
= vdev_bio_max_bytes(bdev
);
690 vbio
->vbio_lbs_mask
= ~(bdev_logical_block_size(bdev
)-1);
691 vbio
->vbio_offset
= zio
->io_offset
;
697 vbio_add_page(vbio_t
*vbio
, struct page
*page
, uint_t size
, uint_t offset
)
703 bio
= vbio
->vbio_bio
;
705 /* New BIO, allocate and set up */
706 bio
= vdev_bio_alloc(vbio
->vbio_bdev
, GFP_NOIO
,
707 vbio
->vbio_max_segs
);
708 if (unlikely(bio
== NULL
))
709 return (SET_ERROR(ENOMEM
));
710 BIO_BI_SECTOR(bio
) = vbio
->vbio_offset
>> 9;
712 bio
->bi_next
= vbio
->vbio_bios
;
713 vbio
->vbio_bios
= vbio
->vbio_bio
= bio
;
717 * Only load as much of the current page data as will fit in
718 * the space left in the BIO, respecting lbs alignment. Older
719 * kernels will error if we try to overfill the BIO, while
720 * newer ones will accept it and split the BIO. This ensures
721 * everything works on older kernels, and avoids an additional
722 * overhead on the new.
724 ssize
= MIN(size
, (vbio
->vbio_max_bytes
- BIO_BI_SIZE(bio
)) &
725 vbio
->vbio_lbs_mask
);
727 bio_add_page(bio
, page
, ssize
, offset
) == ssize
) {
728 /* Accepted, adjust and load any remaining. */
734 /* No room, set up for a new BIO and loop */
735 vbio
->vbio_offset
+= BIO_BI_SIZE(bio
);
737 /* Signal new BIO allocation wanted */
738 vbio
->vbio_bio
= NULL
;
744 BIO_END_IO_PROTO(vdev_disk_io_rw_completion
, bio
, error
);
745 static void vbio_put(vbio_t
*vbio
);
748 vbio_submit(vbio_t
*vbio
, int flags
)
750 ASSERT(vbio
->vbio_bios
);
751 struct bio
*bio
= vbio
->vbio_bios
;
752 vbio
->vbio_bio
= vbio
->vbio_bios
= NULL
;
755 * We take a reference for each BIO as we submit it, plus one to
756 * protect us from BIOs completing before we're done submitting them
757 * all, causing vbio_put() to free vbio out from under us and/or the
758 * zio to be returned before all its IO has completed.
760 atomic_set(&vbio
->vbio_ref
, 1);
763 * If we're submitting more than one BIO, inform the block layer so
764 * it can batch them if it wants.
766 struct blk_plug plug
;
767 boolean_t do_plug
= (bio
->bi_next
!= NULL
);
769 blk_start_plug(&plug
);
771 /* Submit all the BIOs */
772 while (bio
!= NULL
) {
773 atomic_inc(&vbio
->vbio_ref
);
775 struct bio
*next
= bio
->bi_next
;
778 bio
->bi_end_io
= vdev_disk_io_rw_completion
;
779 bio
->bi_private
= vbio
;
780 bio_set_op_attrs(bio
,
781 vbio
->vbio_zio
->io_type
== ZIO_TYPE_WRITE
?
782 WRITE
: READ
, flags
);
784 vdev_submit_bio(bio
);
789 /* Finish the batch */
791 blk_finish_plug(&plug
);
793 /* Release the extra reference */
798 vbio_return_abd(vbio_t
*vbio
)
800 zio_t
*zio
= vbio
->vbio_zio
;
801 if (vbio
->vbio_abd
== NULL
)
805 * If we copied the ABD before issuing it, clean up and return the copy
806 * to the ADB, with changes if appropriate.
808 void *buf
= abd_to_buf(vbio
->vbio_abd
);
809 abd_free(vbio
->vbio_abd
);
810 vbio
->vbio_abd
= NULL
;
812 if (zio
->io_type
== ZIO_TYPE_READ
)
813 abd_return_buf_copy(zio
->io_abd
, buf
, zio
->io_size
);
815 abd_return_buf(zio
->io_abd
, buf
, zio
->io_size
);
819 vbio_free(vbio_t
*vbio
)
821 VERIFY0(atomic_read(&vbio
->vbio_ref
));
823 vbio_return_abd(vbio
);
825 kmem_free(vbio
, sizeof (vbio_t
));
829 vbio_put(vbio_t
*vbio
)
831 if (atomic_dec_return(&vbio
->vbio_ref
) > 0)
835 * This was the last reference, so the entire IO is completed. Clean
836 * up and submit it for processing.
840 * Get any data buf back to the original ABD, if necessary. We do this
841 * now so we can get the ZIO into the pipeline as quickly as possible,
842 * and then do the remaining cleanup after.
844 vbio_return_abd(vbio
);
846 zio_t
*zio
= vbio
->vbio_zio
;
849 * Set the overall error. If multiple BIOs returned an error, only the
850 * first will be taken; the others are dropped (see
851 * vdev_disk_io_rw_completion()). Its pretty much impossible for
852 * multiple IOs to the same device to fail with different errors, so
853 * there's no real risk.
855 zio
->io_error
= vbio
->vbio_error
;
857 vdev_disk_error(zio
);
859 /* All done, submit for processing */
860 zio_delay_interrupt(zio
);
866 BIO_END_IO_PROTO(vdev_disk_io_rw_completion
, bio
, error
)
868 vbio_t
*vbio
= bio
->bi_private
;
870 if (vbio
->vbio_error
== 0) {
871 #ifdef HAVE_1ARG_BIO_END_IO_T
872 vbio
->vbio_error
= BIO_END_IO_ERROR(bio
);
875 vbio
->vbio_error
= -(error
);
876 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
877 vbio
->vbio_error
= EIO
;
882 * Destroy the BIO. This is safe to do; the vbio owns its data and the
883 * kernel won't touch it again after the completion function runs.
887 /* Drop this BIOs reference acquired by vbio_submit() */
892 * Iterator callback to count ABD pages and check their size & alignment.
894 * On Linux, each BIO segment can take a page pointer, and an offset+length of
895 * the data within that page. A page can be arbitrarily large ("compound"
896 * pages) but we still have to ensure the data portion is correctly sized and
897 * aligned to the logical block size, to ensure that if the kernel wants to
898 * split the BIO, the two halves will still be properly aligned.
904 } vdev_disk_check_pages_t
;
907 vdev_disk_check_pages_cb(struct page
*page
, size_t off
, size_t len
, void *priv
)
909 vdev_disk_check_pages_t
*s
= priv
;
912 * If we didn't finish on a block size boundary last time, then there
913 * would be a gap if we tried to use this ABD as-is, so abort.
919 * Note if we're taking less than a full block, so we can check it
920 * above on the next call.
922 s
->end
= len
& s
->bmask
;
924 /* All blocks after the first must start on a block size boundary. */
925 if (s
->npages
!= 0 && (off
& s
->bmask
) != 0)
933 * Check if we can submit the pages in this ABD to the kernel as-is. Returns
934 * the number of pages, or 0 if it can't be submitted like this.
937 vdev_disk_check_pages(abd_t
*abd
, uint64_t size
, struct block_device
*bdev
)
939 vdev_disk_check_pages_t s
= {
940 .bmask
= bdev_logical_block_size(bdev
)-1,
945 if (abd_iterate_page_func(abd
, 0, size
, vdev_disk_check_pages_cb
, &s
))
951 /* Iterator callback to submit ABD pages to the vbio. */
953 vdev_disk_fill_vbio_cb(struct page
*page
, size_t off
, size_t len
, void *priv
)
956 return (vbio_add_page(vbio
, page
, len
, off
));
960 vdev_disk_io_rw(zio_t
*zio
)
962 vdev_t
*v
= zio
->io_vd
;
963 vdev_disk_t
*vd
= v
->vdev_tsd
;
964 struct block_device
*bdev
= BDH_BDEV(vd
->vd_bdh
);
968 * Accessing outside the block device is never allowed.
970 if (zio
->io_offset
+ zio
->io_size
> bdev
->bd_inode
->i_size
) {
971 vdev_dbgmsg(zio
->io_vd
,
972 "Illegal access %llu size %llu, device size %llu",
973 (u_longlong_t
)zio
->io_offset
,
974 (u_longlong_t
)zio
->io_size
,
975 (u_longlong_t
)i_size_read(bdev
->bd_inode
));
976 return (SET_ERROR(EIO
));
979 if (!(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)) &&
980 v
->vdev_failfast
== B_TRUE
) {
981 bio_set_flags_failfast(bdev
, &flags
, zfs_vdev_failfast_mask
& 1,
982 zfs_vdev_failfast_mask
& 2, zfs_vdev_failfast_mask
& 4);
986 * Check alignment of the incoming ABD. If any part of it would require
987 * submitting a page that is not aligned to the logical block size,
988 * then we take a copy into a linear buffer and submit that instead.
989 * This should be impossible on a 512b LBS, and fairly rare on 4K,
990 * usually requiring abnormally-small data blocks (eg gang blocks)
991 * mixed into the same ABD as larger ones (eg aggregated).
993 abd_t
*abd
= zio
->io_abd
;
994 if (!vdev_disk_check_pages(abd
, zio
->io_size
, bdev
)) {
996 if (zio
->io_type
== ZIO_TYPE_READ
)
997 buf
= abd_borrow_buf(zio
->io_abd
, zio
->io_size
);
999 buf
= abd_borrow_buf_copy(zio
->io_abd
, zio
->io_size
);
1002 * Wrap the copy in an abd_t, so we can use the same iterators
1003 * to count and fill the vbio later.
1005 abd
= abd_get_from_buf(buf
, zio
->io_size
);
1008 * False here would mean the borrowed copy has an invalid
1009 * alignment too, which would mean we've somehow been passed a
1010 * linear ABD with an interior page that has a non-zero offset
1011 * or a size not a multiple of PAGE_SIZE. This is not possible.
1012 * It would mean either zio_buf_alloc() or its underlying
1013 * allocators have done something extremely strange, or our
1014 * math in vdev_disk_check_pages() is wrong. In either case,
1015 * something in seriously wrong and its not safe to continue.
1017 VERIFY(vdev_disk_check_pages(abd
, zio
->io_size
, bdev
));
1020 /* Allocate vbio, with a pointer to the borrowed ABD if necessary */
1022 vbio_t
*vbio
= vbio_alloc(zio
, bdev
);
1023 if (abd
!= zio
->io_abd
)
1024 vbio
->vbio_abd
= abd
;
1026 /* Fill it with pages */
1027 error
= abd_iterate_page_func(abd
, 0, zio
->io_size
,
1028 vdev_disk_fill_vbio_cb
, vbio
);
1034 vbio_submit(vbio
, flags
);
1041 * This is the classic, battle-tested BIO submission code. Until we're totally
1042 * sure that the new code is safe and correct in all cases, this will remain
1043 * available and can be enabled by setting zfs_vdev_disk_classic=1 at module
1046 * These functions have been renamed to vdev_classic_* to make it clear what
1047 * they belong to, but their implementations are unchanged.
1051 * Virtual device vector for disks.
1053 typedef struct dio_request
{
1054 zio_t
*dr_zio
; /* Parent ZIO */
1055 atomic_t dr_ref
; /* References */
1056 int dr_error
; /* Bio error */
1057 int dr_bio_count
; /* Count of bio's */
1058 struct bio
*dr_bio
[]; /* Attached bio's */
1061 static dio_request_t
*
1062 vdev_classic_dio_alloc(int bio_count
)
1064 dio_request_t
*dr
= kmem_zalloc(sizeof (dio_request_t
) +
1065 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
1066 atomic_set(&dr
->dr_ref
, 0);
1067 dr
->dr_bio_count
= bio_count
;
1070 for (int i
= 0; i
< dr
->dr_bio_count
; i
++)
1071 dr
->dr_bio
[i
] = NULL
;
1077 vdev_classic_dio_free(dio_request_t
*dr
)
1081 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
1083 bio_put(dr
->dr_bio
[i
]);
1085 kmem_free(dr
, sizeof (dio_request_t
) +
1086 sizeof (struct bio
*) * dr
->dr_bio_count
);
1090 vdev_classic_dio_get(dio_request_t
*dr
)
1092 atomic_inc(&dr
->dr_ref
);
1096 vdev_classic_dio_put(dio_request_t
*dr
)
1098 int rc
= atomic_dec_return(&dr
->dr_ref
);
1101 * Free the dio_request when the last reference is dropped and
1102 * ensure zio_interpret is called only once with the correct zio
1105 zio_t
*zio
= dr
->dr_zio
;
1106 int error
= dr
->dr_error
;
1108 vdev_classic_dio_free(dr
);
1111 zio
->io_error
= error
;
1112 ASSERT3S(zio
->io_error
, >=, 0);
1114 vdev_disk_error(zio
);
1116 zio_delay_interrupt(zio
);
1121 BIO_END_IO_PROTO(vdev_classic_physio_completion
, bio
, error
)
1123 dio_request_t
*dr
= bio
->bi_private
;
1125 if (dr
->dr_error
== 0) {
1126 #ifdef HAVE_1ARG_BIO_END_IO_T
1127 dr
->dr_error
= BIO_END_IO_ERROR(bio
);
1130 dr
->dr_error
= -(error
);
1131 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
1136 /* Drop reference acquired by vdev_classic_physio */
1137 vdev_classic_dio_put(dr
);
1140 static inline unsigned int
1141 vdev_classic_bio_max_segs(zio_t
*zio
, int bio_size
, uint64_t abd_offset
)
1143 unsigned long nr_segs
= abd_nr_pages_off(zio
->io_abd
,
1144 bio_size
, abd_offset
);
1146 #ifdef HAVE_BIO_MAX_SEGS
1147 return (bio_max_segs(nr_segs
));
1149 return (MIN(nr_segs
, BIO_MAX_PAGES
));
1154 vdev_classic_physio(zio_t
*zio
)
1156 vdev_t
*v
= zio
->io_vd
;
1157 vdev_disk_t
*vd
= v
->vdev_tsd
;
1158 struct block_device
*bdev
= BDH_BDEV(vd
->vd_bdh
);
1159 size_t io_size
= zio
->io_size
;
1160 uint64_t io_offset
= zio
->io_offset
;
1161 int rw
= zio
->io_type
== ZIO_TYPE_READ
? READ
: WRITE
;
1165 uint64_t abd_offset
;
1166 uint64_t bio_offset
;
1170 struct blk_plug plug
;
1171 unsigned short nr_vecs
;
1174 * Accessing outside the block device is never allowed.
1176 if (io_offset
+ io_size
> bdev
->bd_inode
->i_size
) {
1177 vdev_dbgmsg(zio
->io_vd
,
1178 "Illegal access %llu size %llu, device size %llu",
1179 (u_longlong_t
)io_offset
,
1180 (u_longlong_t
)io_size
,
1181 (u_longlong_t
)i_size_read(bdev
->bd_inode
));
1182 return (SET_ERROR(EIO
));
1186 dr
= vdev_classic_dio_alloc(bio_count
);
1188 if (!(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)) &&
1189 zio
->io_vd
->vdev_failfast
== B_TRUE
) {
1190 bio_set_flags_failfast(bdev
, &flags
, zfs_vdev_failfast_mask
& 1,
1191 zfs_vdev_failfast_mask
& 2, zfs_vdev_failfast_mask
& 4);
1197 * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
1198 * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
1199 * can cover at least 128KB and at most 1MB. When the required number
1200 * of iovec's exceeds this, we are forced to break the IO in multiple
1201 * bio's and wait for them all to complete. This is likely if the
1202 * recordsize property is increased beyond 1MB. The default
1203 * bio_count=16 should typically accommodate the maximum-size zio of
1208 bio_offset
= io_offset
;
1210 for (int i
= 0; i
<= dr
->dr_bio_count
; i
++) {
1212 /* Finished constructing bio's for given buffer */
1217 * If additional bio's are required, we have to retry, but
1218 * this should be rare - see the comment above.
1220 if (dr
->dr_bio_count
== i
) {
1221 vdev_classic_dio_free(dr
);
1226 nr_vecs
= vdev_classic_bio_max_segs(zio
, bio_size
, abd_offset
);
1227 dr
->dr_bio
[i
] = vdev_bio_alloc(bdev
, GFP_NOIO
, nr_vecs
);
1228 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
1229 vdev_classic_dio_free(dr
);
1230 return (SET_ERROR(ENOMEM
));
1233 /* Matching put called by vdev_classic_physio_completion */
1234 vdev_classic_dio_get(dr
);
1236 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
1237 dr
->dr_bio
[i
]->bi_end_io
= vdev_classic_physio_completion
;
1238 dr
->dr_bio
[i
]->bi_private
= dr
;
1239 bio_set_op_attrs(dr
->dr_bio
[i
], rw
, flags
);
1241 /* Remaining size is returned to become the new size */
1242 bio_size
= abd_bio_map_off(dr
->dr_bio
[i
], zio
->io_abd
,
1243 bio_size
, abd_offset
);
1245 /* Advance in buffer and construct another bio if needed */
1246 abd_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
1247 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
1250 /* Extra reference to protect dio_request during vdev_submit_bio */
1251 vdev_classic_dio_get(dr
);
1253 if (dr
->dr_bio_count
> 1)
1254 blk_start_plug(&plug
);
1256 /* Submit all bio's associated with this dio */
1257 for (int i
= 0; i
< dr
->dr_bio_count
; i
++) {
1259 vdev_submit_bio(dr
->dr_bio
[i
]);
1262 if (dr
->dr_bio_count
> 1)
1263 blk_finish_plug(&plug
);
1265 vdev_classic_dio_put(dr
);
1272 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, error
)
1274 zio_t
*zio
= bio
->bi_private
;
1275 #ifdef HAVE_1ARG_BIO_END_IO_T
1276 zio
->io_error
= BIO_END_IO_ERROR(bio
);
1278 zio
->io_error
= -error
;
1281 if (zio
->io_error
&& (zio
->io_error
== EOPNOTSUPP
))
1282 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
1285 ASSERT3S(zio
->io_error
, >=, 0);
1287 vdev_disk_error(zio
);
1292 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
1294 struct request_queue
*q
;
1297 q
= bdev_get_queue(bdev
);
1299 return (SET_ERROR(ENXIO
));
1301 bio
= vdev_bio_alloc(bdev
, GFP_NOIO
, 0);
1302 if (unlikely(bio
== NULL
))
1303 return (SET_ERROR(ENOMEM
));
1305 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
1306 bio
->bi_private
= zio
;
1308 vdev_submit_bio(bio
);
1309 invalidate_bdev(bdev
);
1314 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
1315 defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
1316 BIO_END_IO_PROTO(vdev_disk_discard_end_io
, bio
, error
)
1318 zio_t
*zio
= bio
->bi_private
;
1319 #ifdef HAVE_1ARG_BIO_END_IO_T
1320 zio
->io_error
= BIO_END_IO_ERROR(bio
);
1322 zio
->io_error
= -error
;
1326 vdev_disk_error(zio
);
1331 vdev_issue_discard_trim(zio_t
*zio
, unsigned long flags
)
1334 struct bio
*bio
= NULL
;
1336 #if defined(BLKDEV_DISCARD_SECURE)
1337 ret
= - __blkdev_issue_discard(
1338 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
1339 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, flags
, &bio
);
1342 ret
= - __blkdev_issue_discard(
1343 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
1344 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, &bio
);
1347 bio
->bi_private
= zio
;
1348 bio
->bi_end_io
= vdev_disk_discard_end_io
;
1349 vdev_submit_bio(bio
);
1356 vdev_disk_io_trim(zio_t
*zio
)
1358 unsigned long trim_flags
= 0;
1359 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
) {
1360 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
1361 return (-blkdev_issue_secure_erase(
1362 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
1363 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
));
1364 #elif defined(BLKDEV_DISCARD_SECURE)
1365 trim_flags
|= BLKDEV_DISCARD_SECURE
;
1368 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
1369 defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
1370 return (vdev_issue_discard_trim(zio
, trim_flags
));
1371 #elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
1372 return (-blkdev_issue_discard(
1373 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
1374 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, trim_flags
));
1376 #error "Unsupported kernel"
1380 int (*vdev_disk_io_rw_fn
)(zio_t
*zio
) = NULL
;
1383 vdev_disk_io_start(zio_t
*zio
)
1385 vdev_t
*v
= zio
->io_vd
;
1386 vdev_disk_t
*vd
= v
->vdev_tsd
;
1390 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
1391 * Nothing to be done here but return failure.
1394 zio
->io_error
= ENXIO
;
1399 rw_enter(&vd
->vd_lock
, RW_READER
);
1402 * If the vdev is closed, it's likely due to a failed reopen and is
1403 * in the UNAVAIL state. Nothing to be done here but return failure.
1405 if (vd
->vd_bdh
== NULL
) {
1406 rw_exit(&vd
->vd_lock
);
1407 zio
->io_error
= ENXIO
;
1412 switch (zio
->io_type
) {
1413 case ZIO_TYPE_IOCTL
:
1415 if (!vdev_readable(v
)) {
1416 rw_exit(&vd
->vd_lock
);
1417 zio
->io_error
= SET_ERROR(ENXIO
);
1422 switch (zio
->io_cmd
) {
1423 case DKIOCFLUSHWRITECACHE
:
1425 if (zfs_nocacheflush
)
1428 if (v
->vdev_nowritecache
) {
1429 zio
->io_error
= SET_ERROR(ENOTSUP
);
1433 error
= vdev_disk_io_flush(BDH_BDEV(vd
->vd_bdh
), zio
);
1435 rw_exit(&vd
->vd_lock
);
1439 zio
->io_error
= error
;
1444 zio
->io_error
= SET_ERROR(ENOTSUP
);
1447 rw_exit(&vd
->vd_lock
);
1452 zio
->io_error
= vdev_disk_io_trim(zio
);
1453 rw_exit(&vd
->vd_lock
);
1454 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
1455 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
)
1457 #elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
1463 case ZIO_TYPE_WRITE
:
1464 zio
->io_target_timestamp
= zio_handle_io_delay(zio
);
1465 error
= vdev_disk_io_rw_fn(zio
);
1466 rw_exit(&vd
->vd_lock
);
1468 zio
->io_error
= error
;
1475 * Getting here means our parent vdev has made a very strange
1476 * request of us, and shouldn't happen. Assert here to force a
1477 * crash in dev builds, but in production return the IO
1478 * unhandled. The pool will likely suspend anyway but that's
1479 * nicer than crashing the kernel.
1481 ASSERT3S(zio
->io_type
, ==, -1);
1483 rw_exit(&vd
->vd_lock
);
1484 zio
->io_error
= SET_ERROR(ENOTSUP
);
1489 __builtin_unreachable();
1493 vdev_disk_io_done(zio_t
*zio
)
1496 * If the device returned EIO, we revalidate the media. If it is
1497 * determined the media has changed this triggers the asynchronous
1498 * removal of the device from the configuration.
1500 if (zio
->io_error
== EIO
) {
1501 vdev_t
*v
= zio
->io_vd
;
1502 vdev_disk_t
*vd
= v
->vdev_tsd
;
1504 if (!zfs_check_disk_status(BDH_BDEV(vd
->vd_bdh
))) {
1505 invalidate_bdev(BDH_BDEV(vd
->vd_bdh
));
1506 v
->vdev_remove_wanted
= B_TRUE
;
1507 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
1513 vdev_disk_hold(vdev_t
*vd
)
1515 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
1517 /* We must have a pathname, and it must be absolute. */
1518 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
1522 * Only prefetch path and devid info if the device has
1523 * never been opened.
1525 if (vd
->vdev_tsd
!= NULL
)
1531 vdev_disk_rele(vdev_t
*vd
)
1533 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
1535 /* XXX: Implement me as a vnode rele for the device */
1539 * At first use vdev use, set the submission function from the default value if
1540 * it hasn't been set already.
1543 vdev_disk_init(spa_t
*spa
, nvlist_t
*nv
, void **tsd
)
1549 if (vdev_disk_io_rw_fn
== NULL
)
1550 /* XXX make configurable */
1551 vdev_disk_io_rw_fn
= 0 ? vdev_classic_physio
: vdev_disk_io_rw
;
1556 vdev_ops_t vdev_disk_ops
= {
1557 .vdev_op_init
= vdev_disk_init
,
1558 .vdev_op_fini
= NULL
,
1559 .vdev_op_open
= vdev_disk_open
,
1560 .vdev_op_close
= vdev_disk_close
,
1561 .vdev_op_asize
= vdev_default_asize
,
1562 .vdev_op_min_asize
= vdev_default_min_asize
,
1563 .vdev_op_min_alloc
= NULL
,
1564 .vdev_op_io_start
= vdev_disk_io_start
,
1565 .vdev_op_io_done
= vdev_disk_io_done
,
1566 .vdev_op_state_change
= NULL
,
1567 .vdev_op_need_resilver
= NULL
,
1568 .vdev_op_hold
= vdev_disk_hold
,
1569 .vdev_op_rele
= vdev_disk_rele
,
1570 .vdev_op_remap
= NULL
,
1571 .vdev_op_xlate
= vdev_default_xlate
,
1572 .vdev_op_rebuild_asize
= NULL
,
1573 .vdev_op_metaslab_init
= NULL
,
1574 .vdev_op_config_generate
= NULL
,
1575 .vdev_op_nparity
= NULL
,
1576 .vdev_op_ndisks
= NULL
,
1577 .vdev_op_type
= VDEV_TYPE_DISK
, /* name of this vdev type */
1578 .vdev_op_leaf
= B_TRUE
, /* leaf vdev */
1579 .vdev_op_kobj_evt_post
= vdev_disk_kobj_evt_post
1583 * The zfs_vdev_scheduler module option has been deprecated. Setting this
1584 * value no longer has any effect. It has not yet been entirely removed
1585 * to allow the module to be loaded if this option is specified in the
1586 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
1589 param_set_vdev_scheduler(const char *val
, zfs_kernel_param_t
*kp
)
1591 int error
= param_set_charp(val
, kp
);
1593 printk(KERN_INFO
"The 'zfs_vdev_scheduler' module option "
1594 "is not supported.\n");
1600 static const char *zfs_vdev_scheduler
= "unused";
1601 module_param_call(zfs_vdev_scheduler
, param_set_vdev_scheduler
,
1602 param_get_charp
, &zfs_vdev_scheduler
, 0644);
1603 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");
1606 param_set_min_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
1611 error
= kstrtouint(buf
, 0, &val
);
1613 return (SET_ERROR(error
));
1615 if (val
< ASHIFT_MIN
|| val
> zfs_vdev_max_auto_ashift
)
1616 return (SET_ERROR(-EINVAL
));
1618 error
= param_set_uint(buf
, kp
);
1620 return (SET_ERROR(error
));
1626 param_set_max_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
1631 error
= kstrtouint(buf
, 0, &val
);
1633 return (SET_ERROR(error
));
1635 if (val
> ASHIFT_MAX
|| val
< zfs_vdev_min_auto_ashift
)
1636 return (SET_ERROR(-EINVAL
));
1638 error
= param_set_uint(buf
, kp
);
1640 return (SET_ERROR(error
));
1645 ZFS_MODULE_PARAM(zfs_vdev
, zfs_vdev_
, open_timeout_ms
, UINT
, ZMOD_RW
,
1646 "Timeout before determining that a device is missing");
1648 ZFS_MODULE_PARAM(zfs_vdev
, zfs_vdev_
, failfast_mask
, UINT
, ZMOD_RW
,
1649 "Defines failfast mask: 1 - device, 2 - transport, 4 - driver");
1651 ZFS_MODULE_PARAM(zfs_vdev_disk
, zfs_vdev_disk_
, max_segs
, UINT
, ZMOD_RW
,
1652 "Maximum number of data segments to add to an IO request (min 4)");