4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/vdev_trim.h>
35 #include <sys/fs/zfs.h>
37 #include <linux/blkpg.h>
38 #include <linux/msdos_fs.h>
39 #include <linux/vfs_compat.h>
40 #ifdef HAVE_LINUX_BLK_CGROUP_HEADER
41 #include <linux/blk-cgroup.h>
45 * Linux 6.8.x uses a bdev_handle as an instance/refcount for an underlying
46 * block_device. Since it carries the block_device inside, its convenient to
47 * just use the handle as a proxy. For pre-6.8, we just emulate this with
48 * a cast, since we don't need any of the other fields inside the handle.
50 #ifdef HAVE_BDEV_OPEN_BY_PATH
51 typedef struct bdev_handle zfs_bdev_handle_t
;
52 #define BDH_BDEV(bdh) ((bdh)->bdev)
53 #define BDH_IS_ERR(bdh) (IS_ERR(bdh))
54 #define BDH_PTR_ERR(bdh) (PTR_ERR(bdh))
55 #define BDH_ERR_PTR(err) (ERR_PTR(err))
57 typedef void zfs_bdev_handle_t
;
58 #define BDH_BDEV(bdh) ((struct block_device *)bdh)
59 #define BDH_IS_ERR(bdh) (IS_ERR(BDH_BDEV(bdh)))
60 #define BDH_PTR_ERR(bdh) (PTR_ERR(BDH_BDEV(bdh)))
61 #define BDH_ERR_PTR(err) (ERR_PTR(err))
64 typedef struct vdev_disk
{
65 zfs_bdev_handle_t
*vd_bdh
;
70 * Unique identifier for the exclusive vdev holder.
72 static void *zfs_vdev_holder
= VDEV_HOLDER
;
75 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
76 * device is missing. The missing path may be transient since the links
77 * can be briefly removed and recreated in response to udev events.
79 static uint_t zfs_vdev_open_timeout_ms
= 1000;
82 * Size of the "reserved" partition, in blocks.
84 #define EFI_MIN_RESV_SIZE (16 * 1024)
87 * BIO request failfast mask.
90 static unsigned int zfs_vdev_failfast_mask
= 1;
92 #ifdef HAVE_BLK_MODE_T
97 vdev_bdev_mode(spa_mode_t spa_mode
, boolean_t exclusive
)
99 #ifdef HAVE_BLK_MODE_T
102 if (spa_mode
& SPA_MODE_READ
)
103 mode
|= BLK_OPEN_READ
;
105 if (spa_mode
& SPA_MODE_WRITE
)
106 mode
|= BLK_OPEN_WRITE
;
109 mode
|= BLK_OPEN_EXCL
;
113 if (spa_mode
& SPA_MODE_READ
)
116 if (spa_mode
& SPA_MODE_WRITE
)
127 * Returns the usable capacity (in bytes) for the partition or disk.
130 bdev_capacity(struct block_device
*bdev
)
132 return (i_size_read(bdev
->bd_inode
));
135 #if !defined(HAVE_BDEV_WHOLE)
136 static inline struct block_device
*
137 bdev_whole(struct block_device
*bdev
)
139 return (bdev
->bd_contains
);
143 #if defined(HAVE_BDEVNAME)
144 #define vdev_bdevname(bdev, name) bdevname(bdev, name)
147 vdev_bdevname(struct block_device
*bdev
, char *name
)
149 snprintf(name
, BDEVNAME_SIZE
, "%pg", bdev
);
154 * Returns the maximum expansion capacity of the block device (in bytes).
156 * It is possible to expand a vdev when it has been created as a wholedisk
157 * and the containing block device has increased in capacity. Or when the
158 * partition containing the pool has been manually increased in size.
160 * This function is only responsible for calculating the potential expansion
161 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
162 * responsible for verifying the expected partition layout in the wholedisk
163 * case, and updating the partition table if appropriate. Once the partition
164 * size has been increased the additional capacity will be visible using
167 * The returned maximum expansion capacity is always expected to be larger, or
168 * at the very least equal, to its usable capacity to prevent overestimating
169 * the pool expandsize.
172 bdev_max_capacity(struct block_device
*bdev
, uint64_t wholedisk
)
177 if (wholedisk
&& bdev
!= bdev_whole(bdev
)) {
179 * When reporting maximum expansion capacity for a wholedisk
180 * deduct any capacity which is expected to be lost due to
181 * alignment restrictions. Over reporting this value isn't
182 * harmful and would only result in slightly less capacity
183 * than expected post expansion.
184 * The estimated available space may be slightly smaller than
185 * bdev_capacity() for devices where the number of sectors is
186 * not a multiple of the alignment size and the partition layout
187 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
188 * "reserved" EFI partition: in such cases return the device
191 available
= i_size_read(bdev_whole(bdev
)->bd_inode
) -
192 ((EFI_MIN_RESV_SIZE
+ NEW_START_BLOCK
+
193 PARTITION_END_ALIGNMENT
) << SECTOR_BITS
);
194 psize
= MAX(available
, bdev_capacity(bdev
));
196 psize
= bdev_capacity(bdev
);
203 vdev_disk_error(zio_t
*zio
)
206 * This function can be called in interrupt context, for instance while
207 * handling IRQs coming from a misbehaving disk device; use printk()
208 * which is safe from any context.
210 printk(KERN_WARNING
"zio pool=%s vdev=%s error=%d type=%d "
211 "offset=%llu size=%llu flags=%llu\n", spa_name(zio
->io_spa
),
212 zio
->io_vd
->vdev_path
, zio
->io_error
, zio
->io_type
,
213 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
218 vdev_disk_kobj_evt_post(vdev_t
*v
)
220 vdev_disk_t
*vd
= v
->vdev_tsd
;
221 if (vd
&& vd
->vd_bdh
) {
222 spl_signal_kobj_evt(BDH_BDEV(vd
->vd_bdh
));
224 vdev_dbgmsg(v
, "vdev_disk_t is NULL for VDEV:%s\n",
229 static zfs_bdev_handle_t
*
230 vdev_blkdev_get_by_path(const char *path
, spa_mode_t mode
, void *holder
)
232 #if defined(HAVE_BDEV_OPEN_BY_PATH)
233 return (bdev_open_by_path(path
,
234 vdev_bdev_mode(mode
, B_TRUE
), holder
, NULL
));
235 #elif defined(HAVE_BLKDEV_GET_BY_PATH_4ARG)
236 return (blkdev_get_by_path(path
,
237 vdev_bdev_mode(mode
, B_TRUE
), holder
, NULL
));
239 return (blkdev_get_by_path(path
,
240 vdev_bdev_mode(mode
, B_TRUE
), holder
));
245 vdev_blkdev_put(zfs_bdev_handle_t
*bdh
, spa_mode_t mode
, void *holder
)
247 #if defined(HAVE_BDEV_RELEASE)
248 return (bdev_release(bdh
));
249 #elif defined(HAVE_BLKDEV_PUT_HOLDER)
250 return (blkdev_put(BDH_BDEV(bdh
), holder
));
252 return (blkdev_put(BDH_BDEV(bdh
),
253 vdev_bdev_mode(mode
, B_TRUE
)));
258 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
259 uint64_t *logical_ashift
, uint64_t *physical_ashift
)
261 zfs_bdev_handle_t
*bdh
;
262 #ifdef HAVE_BLK_MODE_T
263 blk_mode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
), B_FALSE
);
265 fmode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
), B_FALSE
);
267 hrtime_t timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
);
270 /* Must have a pathname and it must be absolute. */
271 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
272 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
273 vdev_dbgmsg(v
, "invalid vdev_path");
274 return (SET_ERROR(EINVAL
));
278 * Reopen the device if it is currently open. When expanding a
279 * partition force re-scanning the partition table if userland
280 * did not take care of this already. We need to do this while closed
281 * in order to get an accurate updated block device size. Then
282 * since udev may need to recreate the device links increase the
283 * open retry timeout before reporting the device as unavailable.
287 char disk_name
[BDEVNAME_SIZE
+ 6] = "/dev/";
288 boolean_t reread_part
= B_FALSE
;
290 rw_enter(&vd
->vd_lock
, RW_WRITER
);
295 struct block_device
*bdev
= BDH_BDEV(bdh
);
296 if (v
->vdev_expanding
&& bdev
!= bdev_whole(bdev
)) {
297 vdev_bdevname(bdev_whole(bdev
), disk_name
+ 5);
299 * If userland has BLKPG_RESIZE_PARTITION,
300 * then it should have updated the partition
301 * table already. We can detect this by
302 * comparing our current physical size
303 * with that of the device. If they are
304 * the same, then we must not have
305 * BLKPG_RESIZE_PARTITION or it failed to
306 * update the partition table online. We
307 * fallback to rescanning the partition
308 * table from the kernel below. However,
309 * if the capacity already reflects the
310 * updated partition, then we skip
311 * rescanning the partition table here.
313 if (v
->vdev_psize
== bdev_capacity(bdev
))
314 reread_part
= B_TRUE
;
317 vdev_blkdev_put(bdh
, mode
, zfs_vdev_holder
);
321 bdh
= vdev_blkdev_get_by_path(disk_name
, mode
,
323 if (!BDH_IS_ERR(bdh
)) {
325 vdev_bdev_reread_part(BDH_BDEV(bdh
));
326 vdev_blkdev_put(bdh
, mode
, zfs_vdev_holder
);
329 zfs_vdev_open_timeout_ms
* 2);
334 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
336 rw_init(&vd
->vd_lock
, NULL
, RW_DEFAULT
, NULL
);
337 rw_enter(&vd
->vd_lock
, RW_WRITER
);
341 * Devices are always opened by the path provided at configuration
342 * time. This means that if the provided path is a udev by-id path
343 * then drives may be re-cabled without an issue. If the provided
344 * path is a udev by-path path, then the physical location information
345 * will be preserved. This can be critical for more complicated
346 * configurations where drives are located in specific physical
347 * locations to maximize the systems tolerance to component failure.
349 * Alternatively, you can provide your own udev rule to flexibly map
350 * the drives as you see fit. It is not advised that you use the
351 * /dev/[hd]d devices which may be reordered due to probing order.
352 * Devices in the wrong locations will be detected by the higher
353 * level vdev validation.
355 * The specified paths may be briefly removed and recreated in
356 * response to udev events. This should be exceptionally unlikely
357 * because the zpool command makes every effort to verify these paths
358 * have already settled prior to reaching this point. Therefore,
359 * a ENOENT failure at this point is highly likely to be transient
360 * and it is reasonable to sleep and retry before giving up. In
361 * practice delays have been observed to be on the order of 100ms.
363 * When ERESTARTSYS is returned it indicates the block device is
364 * a zvol which could not be opened due to the deadlock detection
365 * logic in zvol_open(). Extend the timeout and retry the open
366 * subsequent attempts are expected to eventually succeed.
368 hrtime_t start
= gethrtime();
369 bdh
= BDH_ERR_PTR(-ENXIO
);
370 while (BDH_IS_ERR(bdh
) && ((gethrtime() - start
) < timeout
)) {
371 bdh
= vdev_blkdev_get_by_path(v
->vdev_path
, mode
,
373 if (unlikely(BDH_PTR_ERR(bdh
) == -ENOENT
)) {
375 * There is no point of waiting since device is removed
381 schedule_timeout(MSEC_TO_TICK(10));
382 } else if (unlikely(BDH_PTR_ERR(bdh
) == -ERESTARTSYS
)) {
383 timeout
= MSEC2NSEC(zfs_vdev_open_timeout_ms
* 10);
385 } else if (BDH_IS_ERR(bdh
)) {
390 if (BDH_IS_ERR(bdh
)) {
391 int error
= -BDH_PTR_ERR(bdh
);
392 vdev_dbgmsg(v
, "open error=%d timeout=%llu/%llu", error
,
393 (u_longlong_t
)(gethrtime() - start
),
394 (u_longlong_t
)timeout
);
397 rw_exit(&vd
->vd_lock
);
398 return (SET_ERROR(error
));
402 rw_exit(&vd
->vd_lock
);
405 struct block_device
*bdev
= BDH_BDEV(vd
->vd_bdh
);
407 /* Determine the physical block size */
408 int physical_block_size
= bdev_physical_block_size(bdev
);
410 /* Determine the logical block size */
411 int logical_block_size
= bdev_logical_block_size(bdev
);
413 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
414 v
->vdev_nowritecache
= B_FALSE
;
416 /* Set when device reports it supports TRIM. */
417 v
->vdev_has_trim
= bdev_discard_supported(bdev
);
419 /* Set when device reports it supports secure TRIM. */
420 v
->vdev_has_securetrim
= bdev_secure_discard_supported(bdev
);
422 /* Inform the ZIO pipeline that we are non-rotational */
423 v
->vdev_nonrot
= blk_queue_nonrot(bdev_get_queue(bdev
));
425 /* Physical volume size in bytes for the partition */
426 *psize
= bdev_capacity(bdev
);
428 /* Physical volume size in bytes including possible expansion space */
429 *max_psize
= bdev_max_capacity(bdev
, v
->vdev_wholedisk
);
431 /* Based on the minimum sector size set the block size */
432 *physical_ashift
= highbit64(MAX(physical_block_size
,
433 SPA_MINBLOCKSIZE
)) - 1;
435 *logical_ashift
= highbit64(MAX(logical_block_size
,
436 SPA_MINBLOCKSIZE
)) - 1;
442 vdev_disk_close(vdev_t
*v
)
444 vdev_disk_t
*vd
= v
->vdev_tsd
;
446 if (v
->vdev_reopening
|| vd
== NULL
)
449 if (vd
->vd_bdh
!= NULL
) {
450 vdev_blkdev_put(vd
->vd_bdh
, spa_mode(v
->vdev_spa
),
454 rw_destroy(&vd
->vd_lock
);
455 kmem_free(vd
, sizeof (vdev_disk_t
));
460 vdev_submit_bio_impl(struct bio
*bio
)
462 #ifdef HAVE_1ARG_SUBMIT_BIO
463 (void) submit_bio(bio
);
465 (void) submit_bio(bio_data_dir(bio
), bio
);
470 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
471 * replace it with preempt_schedule under the following condition:
473 #if defined(CONFIG_ARM64) && \
474 defined(CONFIG_PREEMPTION) && \
475 defined(CONFIG_BLK_CGROUP)
476 #define preempt_schedule_notrace(x) preempt_schedule(x)
480 * As for the Linux 5.18 kernel bio_alloc() expects a block_device struct
481 * as an argument removing the need to set it with bio_set_dev(). This
482 * removes the need for all of the following compatibility code.
484 #if !defined(HAVE_BIO_ALLOC_4ARG)
486 #ifdef HAVE_BIO_SET_DEV
487 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
489 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
490 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
491 * As a side effect the function was converted to GPL-only. Define our
492 * own version when needed which uses rcu_read_lock_sched().
494 * The Linux 5.17 kernel split linux/blk-cgroup.h into a private and a public
495 * part, moving blkg_tryget into the private one. Define our own version.
497 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY) || !defined(HAVE_BLKG_TRYGET)
499 vdev_blkg_tryget(struct blkcg_gq
*blkg
)
501 struct percpu_ref
*ref
= &blkg
->refcnt
;
502 unsigned long __percpu
*count
;
505 rcu_read_lock_sched();
507 if (__ref_is_percpu(ref
, &count
)) {
508 this_cpu_inc(*count
);
511 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
512 rc
= atomic_long_inc_not_zero(&ref
->data
->count
);
514 rc
= atomic_long_inc_not_zero(&ref
->count
);
518 rcu_read_unlock_sched();
523 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
525 #ifdef HAVE_BIO_SET_DEV_MACRO
527 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
528 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
529 * the entire macro. Provide a minimal version which always assigns the
530 * request queue's root_blkg to the bio.
533 vdev_bio_associate_blkg(struct bio
*bio
)
535 #if defined(HAVE_BIO_BDEV_DISK)
536 struct request_queue
*q
= bio
->bi_bdev
->bd_disk
->queue
;
538 struct request_queue
*q
= bio
->bi_disk
->queue
;
541 ASSERT3P(q
, !=, NULL
);
542 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
544 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
545 bio
->bi_blkg
= q
->root_blkg
;
548 #define bio_associate_blkg vdev_bio_associate_blkg
551 vdev_bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
553 #if defined(HAVE_BIO_BDEV_DISK)
554 struct request_queue
*q
= bdev
->bd_disk
->queue
;
556 struct request_queue
*q
= bio
->bi_disk
->queue
;
558 bio_clear_flag(bio
, BIO_REMAPPED
);
559 if (bio
->bi_bdev
!= bdev
)
560 bio_clear_flag(bio
, BIO_THROTTLED
);
563 ASSERT3P(q
, !=, NULL
);
564 ASSERT3P(bio
->bi_blkg
, ==, NULL
);
566 if (q
->root_blkg
&& vdev_blkg_tryget(q
->root_blkg
))
567 bio
->bi_blkg
= q
->root_blkg
;
569 #define bio_set_dev vdev_bio_set_dev
574 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
577 bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
581 #endif /* HAVE_BIO_SET_DEV */
582 #endif /* !HAVE_BIO_ALLOC_4ARG */
585 vdev_submit_bio(struct bio
*bio
)
587 struct bio_list
*bio_list
= current
->bio_list
;
588 current
->bio_list
= NULL
;
589 vdev_submit_bio_impl(bio
);
590 current
->bio_list
= bio_list
;
593 static inline struct bio
*
594 vdev_bio_alloc(struct block_device
*bdev
, gfp_t gfp_mask
,
595 unsigned short nr_vecs
)
599 #ifdef HAVE_BIO_ALLOC_4ARG
600 bio
= bio_alloc(bdev
, nr_vecs
, 0, gfp_mask
);
602 bio
= bio_alloc(gfp_mask
, nr_vecs
);
603 if (likely(bio
!= NULL
))
604 bio_set_dev(bio
, bdev
);
613 * This is the classic, battle-tested BIO submission code.
615 * These functions have been renamed to vdev_classic_* to make it clear what
616 * they belong to, but their implementations are unchanged.
620 * Virtual device vector for disks.
622 typedef struct dio_request
{
623 zio_t
*dr_zio
; /* Parent ZIO */
624 atomic_t dr_ref
; /* References */
625 int dr_error
; /* Bio error */
626 int dr_bio_count
; /* Count of bio's */
627 struct bio
*dr_bio
[]; /* Attached bio's */
630 static dio_request_t
*
631 vdev_classic_dio_alloc(int bio_count
)
633 dio_request_t
*dr
= kmem_zalloc(sizeof (dio_request_t
) +
634 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
635 atomic_set(&dr
->dr_ref
, 0);
636 dr
->dr_bio_count
= bio_count
;
639 for (int i
= 0; i
< dr
->dr_bio_count
; i
++)
640 dr
->dr_bio
[i
] = NULL
;
646 vdev_classic_dio_free(dio_request_t
*dr
)
650 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
652 bio_put(dr
->dr_bio
[i
]);
654 kmem_free(dr
, sizeof (dio_request_t
) +
655 sizeof (struct bio
*) * dr
->dr_bio_count
);
659 vdev_classic_dio_get(dio_request_t
*dr
)
661 atomic_inc(&dr
->dr_ref
);
665 vdev_classic_dio_put(dio_request_t
*dr
)
667 int rc
= atomic_dec_return(&dr
->dr_ref
);
670 * Free the dio_request when the last reference is dropped and
671 * ensure zio_interpret is called only once with the correct zio
674 zio_t
*zio
= dr
->dr_zio
;
675 int error
= dr
->dr_error
;
677 vdev_classic_dio_free(dr
);
680 zio
->io_error
= error
;
681 ASSERT3S(zio
->io_error
, >=, 0);
683 vdev_disk_error(zio
);
685 zio_delay_interrupt(zio
);
690 BIO_END_IO_PROTO(vdev_classic_physio_completion
, bio
, error
)
692 dio_request_t
*dr
= bio
->bi_private
;
694 if (dr
->dr_error
== 0) {
695 #ifdef HAVE_1ARG_BIO_END_IO_T
696 dr
->dr_error
= BIO_END_IO_ERROR(bio
);
699 dr
->dr_error
= -(error
);
700 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
705 /* Drop reference acquired by vdev_classic_physio */
706 vdev_classic_dio_put(dr
);
709 static inline unsigned int
710 vdev_classic_bio_max_segs(zio_t
*zio
, int bio_size
, uint64_t abd_offset
)
712 unsigned long nr_segs
= abd_nr_pages_off(zio
->io_abd
,
713 bio_size
, abd_offset
);
715 #ifdef HAVE_BIO_MAX_SEGS
716 return (bio_max_segs(nr_segs
));
718 return (MIN(nr_segs
, BIO_MAX_PAGES
));
723 vdev_classic_physio(zio_t
*zio
)
725 vdev_t
*v
= zio
->io_vd
;
726 vdev_disk_t
*vd
= v
->vdev_tsd
;
727 struct block_device
*bdev
= BDH_BDEV(vd
->vd_bdh
);
728 size_t io_size
= zio
->io_size
;
729 uint64_t io_offset
= zio
->io_offset
;
730 int rw
= zio
->io_type
== ZIO_TYPE_READ
? READ
: WRITE
;
739 struct blk_plug plug
;
740 unsigned short nr_vecs
;
743 * Accessing outside the block device is never allowed.
745 if (io_offset
+ io_size
> bdev
->bd_inode
->i_size
) {
746 vdev_dbgmsg(zio
->io_vd
,
747 "Illegal access %llu size %llu, device size %llu",
748 (u_longlong_t
)io_offset
,
749 (u_longlong_t
)io_size
,
750 (u_longlong_t
)i_size_read(bdev
->bd_inode
));
751 return (SET_ERROR(EIO
));
755 dr
= vdev_classic_dio_alloc(bio_count
);
757 if (!(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)) &&
758 zio
->io_vd
->vdev_failfast
== B_TRUE
) {
759 bio_set_flags_failfast(bdev
, &flags
, zfs_vdev_failfast_mask
& 1,
760 zfs_vdev_failfast_mask
& 2, zfs_vdev_failfast_mask
& 4);
766 * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
767 * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
768 * can cover at least 128KB and at most 1MB. When the required number
769 * of iovec's exceeds this, we are forced to break the IO in multiple
770 * bio's and wait for them all to complete. This is likely if the
771 * recordsize property is increased beyond 1MB. The default
772 * bio_count=16 should typically accommodate the maximum-size zio of
777 bio_offset
= io_offset
;
779 for (int i
= 0; i
<= dr
->dr_bio_count
; i
++) {
781 /* Finished constructing bio's for given buffer */
786 * If additional bio's are required, we have to retry, but
787 * this should be rare - see the comment above.
789 if (dr
->dr_bio_count
== i
) {
790 vdev_classic_dio_free(dr
);
795 nr_vecs
= vdev_classic_bio_max_segs(zio
, bio_size
, abd_offset
);
796 dr
->dr_bio
[i
] = vdev_bio_alloc(bdev
, GFP_NOIO
, nr_vecs
);
797 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
798 vdev_classic_dio_free(dr
);
799 return (SET_ERROR(ENOMEM
));
802 /* Matching put called by vdev_classic_physio_completion */
803 vdev_classic_dio_get(dr
);
805 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
806 dr
->dr_bio
[i
]->bi_end_io
= vdev_classic_physio_completion
;
807 dr
->dr_bio
[i
]->bi_private
= dr
;
808 bio_set_op_attrs(dr
->dr_bio
[i
], rw
, flags
);
810 /* Remaining size is returned to become the new size */
811 bio_size
= abd_bio_map_off(dr
->dr_bio
[i
], zio
->io_abd
,
812 bio_size
, abd_offset
);
814 /* Advance in buffer and construct another bio if needed */
815 abd_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
816 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
819 /* Extra reference to protect dio_request during vdev_submit_bio */
820 vdev_classic_dio_get(dr
);
822 if (dr
->dr_bio_count
> 1)
823 blk_start_plug(&plug
);
825 /* Submit all bio's associated with this dio */
826 for (int i
= 0; i
< dr
->dr_bio_count
; i
++) {
828 vdev_submit_bio(dr
->dr_bio
[i
]);
831 if (dr
->dr_bio_count
> 1)
832 blk_finish_plug(&plug
);
834 vdev_classic_dio_put(dr
);
841 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, error
)
843 zio_t
*zio
= bio
->bi_private
;
844 #ifdef HAVE_1ARG_BIO_END_IO_T
845 zio
->io_error
= BIO_END_IO_ERROR(bio
);
847 zio
->io_error
= -error
;
850 if (zio
->io_error
&& (zio
->io_error
== EOPNOTSUPP
))
851 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
854 ASSERT3S(zio
->io_error
, >=, 0);
856 vdev_disk_error(zio
);
861 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
863 struct request_queue
*q
;
866 q
= bdev_get_queue(bdev
);
868 return (SET_ERROR(ENXIO
));
870 bio
= vdev_bio_alloc(bdev
, GFP_NOIO
, 0);
871 if (unlikely(bio
== NULL
))
872 return (SET_ERROR(ENOMEM
));
874 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
875 bio
->bi_private
= zio
;
877 vdev_submit_bio(bio
);
878 invalidate_bdev(bdev
);
883 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
884 defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
885 BIO_END_IO_PROTO(vdev_disk_discard_end_io
, bio
, error
)
887 zio_t
*zio
= bio
->bi_private
;
888 #ifdef HAVE_1ARG_BIO_END_IO_T
889 zio
->io_error
= BIO_END_IO_ERROR(bio
);
891 zio
->io_error
= -error
;
895 vdev_disk_error(zio
);
900 vdev_issue_discard_trim(zio_t
*zio
, unsigned long flags
)
903 struct bio
*bio
= NULL
;
905 #if defined(BLKDEV_DISCARD_SECURE)
906 ret
= - __blkdev_issue_discard(
907 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
908 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, flags
, &bio
);
911 ret
= - __blkdev_issue_discard(
912 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
913 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, &bio
);
916 bio
->bi_private
= zio
;
917 bio
->bi_end_io
= vdev_disk_discard_end_io
;
918 vdev_submit_bio(bio
);
925 vdev_disk_io_trim(zio_t
*zio
)
927 unsigned long trim_flags
= 0;
928 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
) {
929 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
930 return (-blkdev_issue_secure_erase(
931 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
932 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
));
933 #elif defined(BLKDEV_DISCARD_SECURE)
934 trim_flags
|= BLKDEV_DISCARD_SECURE
;
937 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE) || \
938 defined(HAVE_BLKDEV_ISSUE_DISCARD_ASYNC)
939 return (vdev_issue_discard_trim(zio
, trim_flags
));
940 #elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
941 return (-blkdev_issue_discard(
942 BDH_BDEV(((vdev_disk_t
*)zio
->io_vd
->vdev_tsd
)->vd_bdh
),
943 zio
->io_offset
>> 9, zio
->io_size
>> 9, GFP_NOFS
, trim_flags
));
945 #error "Unsupported kernel"
949 int (*vdev_disk_io_rw_fn
)(zio_t
*zio
) = NULL
;
952 vdev_disk_io_start(zio_t
*zio
)
954 vdev_t
*v
= zio
->io_vd
;
955 vdev_disk_t
*vd
= v
->vdev_tsd
;
959 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
960 * Nothing to be done here but return failure.
963 zio
->io_error
= ENXIO
;
968 rw_enter(&vd
->vd_lock
, RW_READER
);
971 * If the vdev is closed, it's likely due to a failed reopen and is
972 * in the UNAVAIL state. Nothing to be done here but return failure.
974 if (vd
->vd_bdh
== NULL
) {
975 rw_exit(&vd
->vd_lock
);
976 zio
->io_error
= ENXIO
;
981 switch (zio
->io_type
) {
984 if (!vdev_readable(v
)) {
985 rw_exit(&vd
->vd_lock
);
986 zio
->io_error
= SET_ERROR(ENXIO
);
991 switch (zio
->io_cmd
) {
992 case DKIOCFLUSHWRITECACHE
:
994 if (zfs_nocacheflush
)
997 if (v
->vdev_nowritecache
) {
998 zio
->io_error
= SET_ERROR(ENOTSUP
);
1002 error
= vdev_disk_io_flush(BDH_BDEV(vd
->vd_bdh
), zio
);
1004 rw_exit(&vd
->vd_lock
);
1008 zio
->io_error
= error
;
1013 zio
->io_error
= SET_ERROR(ENOTSUP
);
1016 rw_exit(&vd
->vd_lock
);
1021 zio
->io_error
= vdev_disk_io_trim(zio
);
1022 rw_exit(&vd
->vd_lock
);
1023 #if defined(HAVE_BLKDEV_ISSUE_SECURE_ERASE)
1024 if (zio
->io_trim_flags
& ZIO_TRIM_SECURE
)
1026 #elif defined(HAVE_BLKDEV_ISSUE_DISCARD)
1032 case ZIO_TYPE_WRITE
:
1033 zio
->io_target_timestamp
= zio_handle_io_delay(zio
);
1034 error
= vdev_disk_io_rw_fn(zio
);
1035 rw_exit(&vd
->vd_lock
);
1037 zio
->io_error
= error
;
1044 * Getting here means our parent vdev has made a very strange
1045 * request of us, and shouldn't happen. Assert here to force a
1046 * crash in dev builds, but in production return the IO
1047 * unhandled. The pool will likely suspend anyway but that's
1048 * nicer than crashing the kernel.
1050 ASSERT3S(zio
->io_type
, ==, -1);
1052 rw_exit(&vd
->vd_lock
);
1053 zio
->io_error
= SET_ERROR(ENOTSUP
);
1058 __builtin_unreachable();
1062 vdev_disk_io_done(zio_t
*zio
)
1065 * If the device returned EIO, we revalidate the media. If it is
1066 * determined the media has changed this triggers the asynchronous
1067 * removal of the device from the configuration.
1069 if (zio
->io_error
== EIO
) {
1070 vdev_t
*v
= zio
->io_vd
;
1071 vdev_disk_t
*vd
= v
->vdev_tsd
;
1073 if (!zfs_check_disk_status(BDH_BDEV(vd
->vd_bdh
))) {
1074 invalidate_bdev(BDH_BDEV(vd
->vd_bdh
));
1075 v
->vdev_remove_wanted
= B_TRUE
;
1076 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
1082 vdev_disk_hold(vdev_t
*vd
)
1084 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
1086 /* We must have a pathname, and it must be absolute. */
1087 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
1091 * Only prefetch path and devid info if the device has
1092 * never been opened.
1094 if (vd
->vdev_tsd
!= NULL
)
1100 vdev_disk_rele(vdev_t
*vd
)
1102 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
1104 /* XXX: Implement me as a vnode rele for the device */
1108 * At first use vdev use, set the submission function from the default value if
1109 * it hasn't been set already.
1112 vdev_disk_init(spa_t
*spa
, nvlist_t
*nv
, void **tsd
)
1118 if (vdev_disk_io_rw_fn
== NULL
)
1119 vdev_disk_io_rw_fn
= vdev_classic_physio
;
1124 vdev_ops_t vdev_disk_ops
= {
1125 .vdev_op_init
= vdev_disk_init
,
1126 .vdev_op_fini
= NULL
,
1127 .vdev_op_open
= vdev_disk_open
,
1128 .vdev_op_close
= vdev_disk_close
,
1129 .vdev_op_asize
= vdev_default_asize
,
1130 .vdev_op_min_asize
= vdev_default_min_asize
,
1131 .vdev_op_min_alloc
= NULL
,
1132 .vdev_op_io_start
= vdev_disk_io_start
,
1133 .vdev_op_io_done
= vdev_disk_io_done
,
1134 .vdev_op_state_change
= NULL
,
1135 .vdev_op_need_resilver
= NULL
,
1136 .vdev_op_hold
= vdev_disk_hold
,
1137 .vdev_op_rele
= vdev_disk_rele
,
1138 .vdev_op_remap
= NULL
,
1139 .vdev_op_xlate
= vdev_default_xlate
,
1140 .vdev_op_rebuild_asize
= NULL
,
1141 .vdev_op_metaslab_init
= NULL
,
1142 .vdev_op_config_generate
= NULL
,
1143 .vdev_op_nparity
= NULL
,
1144 .vdev_op_ndisks
= NULL
,
1145 .vdev_op_type
= VDEV_TYPE_DISK
, /* name of this vdev type */
1146 .vdev_op_leaf
= B_TRUE
, /* leaf vdev */
1147 .vdev_op_kobj_evt_post
= vdev_disk_kobj_evt_post
1151 * The zfs_vdev_scheduler module option has been deprecated. Setting this
1152 * value no longer has any effect. It has not yet been entirely removed
1153 * to allow the module to be loaded if this option is specified in the
1154 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
1157 param_set_vdev_scheduler(const char *val
, zfs_kernel_param_t
*kp
)
1159 int error
= param_set_charp(val
, kp
);
1161 printk(KERN_INFO
"The 'zfs_vdev_scheduler' module option "
1162 "is not supported.\n");
1168 static const char *zfs_vdev_scheduler
= "unused";
1169 module_param_call(zfs_vdev_scheduler
, param_set_vdev_scheduler
,
1170 param_get_charp
, &zfs_vdev_scheduler
, 0644);
1171 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");
1174 param_set_min_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
1179 error
= kstrtouint(buf
, 0, &val
);
1181 return (SET_ERROR(error
));
1183 if (val
< ASHIFT_MIN
|| val
> zfs_vdev_max_auto_ashift
)
1184 return (SET_ERROR(-EINVAL
));
1186 error
= param_set_uint(buf
, kp
);
1188 return (SET_ERROR(error
));
1194 param_set_max_auto_ashift(const char *buf
, zfs_kernel_param_t
*kp
)
1199 error
= kstrtouint(buf
, 0, &val
);
1201 return (SET_ERROR(error
));
1203 if (val
> ASHIFT_MAX
|| val
< zfs_vdev_min_auto_ashift
)
1204 return (SET_ERROR(-EINVAL
));
1206 error
= param_set_uint(buf
, kp
);
1208 return (SET_ERROR(error
));
1213 ZFS_MODULE_PARAM(zfs_vdev
, zfs_vdev_
, open_timeout_ms
, UINT
, ZMOD_RW
,
1214 "Timeout before determining that a device is missing");
1216 ZFS_MODULE_PARAM(zfs_vdev
, zfs_vdev_
, failfast_mask
, UINT
, ZMOD_RW
,
1217 "Defines failfast mask: 1 - device, 2 - transport, 4 - driver");