4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/fs/zfs.h>
35 #include <sys/sunldi.h>
37 char *zfs_vdev_scheduler
= VDEV_SCHEDULER
;
38 static void *zfs_vdev_holder
= VDEV_HOLDER
;
41 * Virtual device vector for disks.
43 typedef struct dio_request
{
44 struct completion dr_comp
; /* Completion for sync IO */
45 zio_t
*dr_zio
; /* Parent ZIO */
46 atomic_t dr_ref
; /* References */
47 int dr_wait
; /* Wait for IO */
48 int dr_error
; /* Bio error */
49 int dr_bio_count
; /* Count of bio's */
50 struct bio
*dr_bio
[0]; /* Attached bio's */
54 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
56 vdev_bdev_mode(int smode
)
60 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
72 vdev_bdev_mode(int smode
)
76 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
78 if ((smode
& FREAD
) && !(smode
& FWRITE
))
83 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
86 bdev_capacity(struct block_device
*bdev
)
88 struct hd_struct
*part
= bdev
->bd_part
;
90 /* The partition capacity referenced by the block device */
92 return (part
->nr_sects
<< 9);
94 /* Otherwise assume the full device capacity */
95 return (get_capacity(bdev
->bd_disk
) << 9);
99 vdev_disk_error(zio_t
*zio
)
102 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
103 "flags=%x\n", zio
->io_error
, zio
->io_type
,
104 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
110 * Use the Linux 'noop' elevator for zfs managed block devices. This
111 * strikes the ideal balance by allowing the zfs elevator to do all
112 * request ordering and prioritization. While allowing the Linux
113 * elevator to do the maximum front/back merging allowed by the
114 * physical device. This yields the largest possible requests for
115 * the device with the lowest total overhead.
118 vdev_elevator_switch(vdev_t
*v
, char *elevator
)
120 vdev_disk_t
*vd
= v
->vdev_tsd
;
121 struct block_device
*bdev
= vd
->vd_bdev
;
122 struct request_queue
*q
= bdev_get_queue(bdev
);
123 char *device
= bdev
->bd_disk
->disk_name
;
127 * Skip devices which are not whole disks (partitions).
128 * Device-mapper devices are excepted since they may be whole
129 * disks despite the vdev_wholedisk flag, in which case we can
130 * and should switch the elevator. If the device-mapper device
131 * does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the
132 * "Skip devices without schedulers" check below will fail.
134 if (!v
->vdev_wholedisk
&& strncmp(device
, "dm-", 3) != 0)
137 /* Skip devices without schedulers (loop, ram, dm, etc) */
138 if (!q
->elevator
|| !blk_queue_stackable(q
))
141 /* Leave existing scheduler when set to "none" */
142 if ((strncmp(elevator
, "none", 4) == 0) && (strlen(elevator
) == 4))
145 #ifdef HAVE_ELEVATOR_CHANGE
146 error
= elevator_change(q
, elevator
);
149 * For pre-2.6.36 kernels elevator_change() is not available.
150 * Therefore we fall back to using a usermodehelper to echo the
151 * elevator into sysfs; This requires /bin/echo and sysfs to be
152 * mounted which may not be true early in the boot process.
154 #define SET_SCHEDULER_CMD \
155 "exec 0</dev/null " \
156 " 1>/sys/block/%s/queue/scheduler " \
161 char *argv
[] = { "/bin/sh", "-c", NULL
, NULL
};
162 char *envp
[] = { NULL
};
164 argv
[2] = kmem_asprintf(SET_SCHEDULER_CMD
, device
, elevator
);
165 error
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
168 #endif /* HAVE_ELEVATOR_CHANGE */
170 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
171 elevator
, v
->vdev_path
, device
, error
);
177 * Expanding a whole disk vdev involves invoking BLKRRPART on the
178 * whole disk device. This poses a problem, because BLKRRPART will
179 * return EBUSY if one of the disk's partitions is open. That's why
180 * we have to do it here, just before opening the data partition.
181 * Unfortunately, BLKRRPART works by dropping all partitions and
182 * recreating them, which means that for a short time window, all
183 * /dev/sdxN device files disappear (until udev recreates them).
184 * This means two things:
185 * - When we open the data partition just after a BLKRRPART, we
186 * can't do it using the normal device file path because of the
187 * obvious race condition with udev. Instead, we use reliable
188 * kernel APIs to get a handle to the new partition device from
189 * the whole disk device.
190 * - Because vdev_disk_open() initially needs to find the device
191 * using its path, multiple vdev_disk_open() invocations in
192 * short succession on the same disk with BLKRRPARTs in the
193 * middle have a high probability of failure (because of the
194 * race condition with udev). A typical situation where this
195 * might happen is when the zpool userspace tool does a
196 * TRYIMPORT immediately followed by an IMPORT. For this
197 * reason, we only invoke BLKRRPART in the module when strictly
198 * necessary (zpool online -e case), and rely on userspace to
199 * do it when possible.
201 static struct block_device
*
202 vdev_disk_rrpart(const char *path
, int mode
, vdev_disk_t
*vd
)
204 #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK)
205 struct block_device
*bdev
, *result
= ERR_PTR(-ENXIO
);
206 struct gendisk
*disk
;
209 bdev
= vdev_bdev_open(path
, vdev_bdev_mode(mode
), zfs_vdev_holder
);
213 disk
= get_gendisk(bdev
->bd_dev
, &partno
);
214 vdev_bdev_close(bdev
, vdev_bdev_mode(mode
));
217 bdev
= bdget(disk_devt(disk
));
219 error
= blkdev_get(bdev
, vdev_bdev_mode(mode
), vd
);
221 error
= ioctl_by_bdev(bdev
, BLKRRPART
, 0);
222 vdev_bdev_close(bdev
, vdev_bdev_mode(mode
));
225 bdev
= bdget_disk(disk
, partno
);
227 error
= blkdev_get(bdev
,
228 vdev_bdev_mode(mode
) | FMODE_EXCL
, vd
);
237 return (ERR_PTR(-EOPNOTSUPP
));
238 #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
242 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
245 struct block_device
*bdev
= ERR_PTR(-ENXIO
);
247 int count
= 0, mode
, block_size
;
249 /* Must have a pathname and it must be absolute. */
250 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
251 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
252 return (SET_ERROR(EINVAL
));
256 * Reopen the device if it's not currently open. Otherwise,
257 * just update the physical size of the device.
259 if (v
->vdev_tsd
!= NULL
) {
260 ASSERT(v
->vdev_reopening
);
265 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
267 return (SET_ERROR(ENOMEM
));
270 * Devices are always opened by the path provided at configuration
271 * time. This means that if the provided path is a udev by-id path
272 * then drives may be recabled without an issue. If the provided
273 * path is a udev by-path path, then the physical location information
274 * will be preserved. This can be critical for more complicated
275 * configurations where drives are located in specific physical
276 * locations to maximize the systems tolerence to component failure.
277 * Alternatively, you can provide your own udev rule to flexibly map
278 * the drives as you see fit. It is not advised that you use the
279 * /dev/[hd]d devices which may be reordered due to probing order.
280 * Devices in the wrong locations will be detected by the higher
281 * level vdev validation.
283 * The specified paths may be briefly removed and recreated in
284 * response to udev events. This should be exceptionally unlikely
285 * because the zpool command makes every effort to verify these paths
286 * have already settled prior to reaching this point. Therefore,
287 * a ENOENT failure at this point is highly likely to be transient
288 * and it is reasonable to sleep and retry before giving up. In
289 * practice delays have been observed to be on the order of 100ms.
291 mode
= spa_mode(v
->vdev_spa
);
292 if (v
->vdev_wholedisk
&& v
->vdev_expanding
)
293 bdev
= vdev_disk_rrpart(v
->vdev_path
, mode
, vd
);
295 while (IS_ERR(bdev
) && count
< 50) {
296 bdev
= vdev_bdev_open(v
->vdev_path
,
297 vdev_bdev_mode(mode
), zfs_vdev_holder
);
298 if (unlikely(PTR_ERR(bdev
) == -ENOENT
)) {
301 } else if (IS_ERR(bdev
)) {
307 dprintf("failed open v->vdev_path=%s, error=%d count=%d\n",
308 v
->vdev_path
, -PTR_ERR(bdev
), count
);
309 kmem_free(vd
, sizeof (vdev_disk_t
));
310 return (SET_ERROR(-PTR_ERR(bdev
)));
317 /* Determine the physical block size */
318 block_size
= vdev_bdev_block_size(vd
->vd_bdev
);
320 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
321 v
->vdev_nowritecache
= B_FALSE
;
323 /* Inform the ZIO pipeline that we are non-rotational */
324 v
->vdev_nonrot
= blk_queue_nonrot(bdev_get_queue(vd
->vd_bdev
));
326 /* Physical volume size in bytes */
327 *psize
= bdev_capacity(vd
->vd_bdev
);
329 /* TODO: report possible expansion size */
332 /* Based on the minimum sector size set the block size */
333 *ashift
= highbit64(MAX(block_size
, SPA_MINBLOCKSIZE
)) - 1;
335 /* Try to set the io scheduler elevator algorithm */
336 (void) vdev_elevator_switch(v
, zfs_vdev_scheduler
);
342 vdev_disk_close(vdev_t
*v
)
344 vdev_disk_t
*vd
= v
->vdev_tsd
;
346 if (v
->vdev_reopening
|| vd
== NULL
)
349 if (vd
->vd_bdev
!= NULL
)
350 vdev_bdev_close(vd
->vd_bdev
,
351 vdev_bdev_mode(spa_mode(v
->vdev_spa
)));
353 kmem_free(vd
, sizeof (vdev_disk_t
));
357 static dio_request_t
*
358 vdev_disk_dio_alloc(int bio_count
)
363 dr
= kmem_zalloc(sizeof (dio_request_t
) +
364 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
366 init_completion(&dr
->dr_comp
);
367 atomic_set(&dr
->dr_ref
, 0);
368 dr
->dr_bio_count
= bio_count
;
371 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
372 dr
->dr_bio
[i
] = NULL
;
379 vdev_disk_dio_free(dio_request_t
*dr
)
383 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
385 bio_put(dr
->dr_bio
[i
]);
387 kmem_free(dr
, sizeof (dio_request_t
) +
388 sizeof (struct bio
*) * dr
->dr_bio_count
);
392 vdev_disk_dio_get(dio_request_t
*dr
)
394 atomic_inc(&dr
->dr_ref
);
398 vdev_disk_dio_put(dio_request_t
*dr
)
400 int rc
= atomic_dec_return(&dr
->dr_ref
);
403 * Free the dio_request when the last reference is dropped and
404 * ensure zio_interpret is called only once with the correct zio
407 zio_t
*zio
= dr
->dr_zio
;
408 int error
= dr
->dr_error
;
410 vdev_disk_dio_free(dr
);
413 zio
->io_error
= error
;
414 ASSERT3S(zio
->io_error
, >=, 0);
416 vdev_disk_error(zio
);
424 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, error
)
426 dio_request_t
*dr
= bio
->bi_private
;
430 if (dr
->dr_error
== 0) {
431 #ifdef HAVE_1ARG_BIO_END_IO_T
432 dr
->dr_error
= -(bio
->bi_error
);
435 dr
->dr_error
= -(error
);
436 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
442 /* Drop reference aquired by __vdev_disk_physio */
443 rc
= vdev_disk_dio_put(dr
);
445 /* Wake up synchronous waiter this is the last outstanding bio */
447 complete(&dr
->dr_comp
);
450 static inline unsigned long
451 bio_nr_pages(void *bio_ptr
, unsigned int bio_size
)
453 return ((((unsigned long)bio_ptr
+ bio_size
+ PAGE_SIZE
- 1) >>
454 PAGE_SHIFT
) - ((unsigned long)bio_ptr
>> PAGE_SHIFT
));
458 bio_map(struct bio
*bio
, void *bio_ptr
, unsigned int bio_size
)
460 unsigned int offset
, size
, i
;
463 offset
= offset_in_page(bio_ptr
);
464 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
465 size
= PAGE_SIZE
- offset
;
473 if (is_vmalloc_addr(bio_ptr
))
474 page
= vmalloc_to_page(bio_ptr
);
476 page
= virt_to_page(bio_ptr
);
479 * Some network related block device uses tcp_sendpage, which
480 * doesn't behave well when using 0-count page, this is a
481 * safety net to catch them.
483 ASSERT3S(page_count(page
), >, 0);
485 if (bio_add_page(bio
, page
, size
, offset
) != size
)
497 vdev_submit_bio(int rw
, struct bio
*bio
)
499 #ifdef HAVE_CURRENT_BIO_TAIL
500 struct bio
**bio_tail
= current
->bio_tail
;
501 current
->bio_tail
= NULL
;
503 current
->bio_tail
= bio_tail
;
505 struct bio_list
*bio_list
= current
->bio_list
;
506 current
->bio_list
= NULL
;
508 current
->bio_list
= bio_list
;
513 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
, caddr_t kbuf_ptr
,
514 size_t kbuf_size
, uint64_t kbuf_offset
, int flags
, int wait
)
519 int rw
, bio_size
, bio_count
= 16;
520 int i
= 0, error
= 0;
522 ASSERT3U(kbuf_offset
+ kbuf_size
, <=, bdev
->bd_inode
->i_size
);
525 dr
= vdev_disk_dio_alloc(bio_count
);
529 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
530 bio_set_flags_failfast(bdev
, &flags
);
537 * When the IO size exceeds the maximum bio size for the request
538 * queue we are forced to break the IO in multiple bio's and wait
539 * for them all to complete. Ideally, all pool users will set
540 * their volume block size to match the maximum request size and
541 * the common case will be one bio per vdev IO request.
544 bio_offset
= kbuf_offset
;
545 bio_size
= kbuf_size
;
546 for (i
= 0; i
<= dr
->dr_bio_count
; i
++) {
548 /* Finished constructing bio's for given buffer */
553 * By default only 'bio_count' bio's per dio are allowed.
554 * However, if we find ourselves in a situation where more
555 * are needed we allocate a larger dio and warn the user.
557 if (dr
->dr_bio_count
== i
) {
558 vdev_disk_dio_free(dr
);
563 /* bio_alloc() with __GFP_WAIT never returns NULL */
564 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
565 MIN(bio_nr_pages(bio_ptr
, bio_size
), BIO_MAX_PAGES
));
566 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
567 vdev_disk_dio_free(dr
);
571 /* Matching put called by vdev_disk_physio_completion */
572 vdev_disk_dio_get(dr
);
574 dr
->dr_bio
[i
]->bi_bdev
= bdev
;
575 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
576 dr
->dr_bio
[i
]->bi_rw
= rw
;
577 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
578 dr
->dr_bio
[i
]->bi_private
= dr
;
580 /* Remaining size is returned to become the new size */
581 bio_size
= bio_map(dr
->dr_bio
[i
], bio_ptr
, bio_size
);
583 /* Advance in buffer and construct another bio if needed */
584 bio_ptr
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
585 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
588 /* Extra reference to protect dio_request during vdev_submit_bio */
589 vdev_disk_dio_get(dr
);
591 /* Submit all bio's associated with this dio */
592 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
594 vdev_submit_bio(rw
, dr
->dr_bio
[i
]);
597 * On synchronous blocking requests we wait for all bio the completion
598 * callbacks to run. We will be woken when the last callback runs
599 * for this dio. We are responsible for putting the last dio_request
600 * reference will in turn put back the last bio references. The
601 * only synchronous consumer is vdev_disk_read_rootlabel() all other
602 * IO originating from vdev_disk_io_start() is asynchronous.
605 wait_for_completion(&dr
->dr_comp
);
606 error
= dr
->dr_error
;
607 ASSERT3S(atomic_read(&dr
->dr_ref
), ==, 1);
610 (void) vdev_disk_dio_put(dr
);
616 vdev_disk_physio(struct block_device
*bdev
, caddr_t kbuf
,
617 size_t size
, uint64_t offset
, int flags
)
619 bio_set_flags_failfast(bdev
, &flags
);
620 return (__vdev_disk_physio(bdev
, NULL
, kbuf
, size
, offset
, flags
, 1));
623 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, rc
)
625 zio_t
*zio
= bio
->bi_private
;
626 #ifdef HAVE_1ARG_BIO_END_IO_T
627 int rc
= bio
->bi_error
;
631 if (rc
&& (rc
== -EOPNOTSUPP
))
632 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
635 ASSERT3S(zio
->io_error
, >=, 0);
637 vdev_disk_error(zio
);
642 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
644 struct request_queue
*q
;
647 q
= bdev_get_queue(bdev
);
651 bio
= bio_alloc(GFP_NOIO
, 0);
652 /* bio_alloc() with __GFP_WAIT never returns NULL */
653 if (unlikely(bio
== NULL
))
656 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
657 bio
->bi_private
= zio
;
659 vdev_submit_bio(VDEV_WRITE_FLUSH_FUA
, bio
);
660 invalidate_bdev(bdev
);
666 vdev_disk_io_start(zio_t
*zio
)
668 vdev_t
*v
= zio
->io_vd
;
669 vdev_disk_t
*vd
= v
->vdev_tsd
;
670 zio_priority_t pri
= zio
->io_priority
;
673 switch (zio
->io_type
) {
676 if (!vdev_readable(v
)) {
677 zio
->io_error
= SET_ERROR(ENXIO
);
682 switch (zio
->io_cmd
) {
683 case DKIOCFLUSHWRITECACHE
:
685 if (zfs_nocacheflush
)
688 if (v
->vdev_nowritecache
) {
689 zio
->io_error
= SET_ERROR(ENOTSUP
);
693 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
697 zio
->io_error
= error
;
698 if (error
== ENOTSUP
)
699 v
->vdev_nowritecache
= B_TRUE
;
704 zio
->io_error
= SET_ERROR(ENOTSUP
);
710 if ((pri
== ZIO_PRIORITY_SYNC_WRITE
) && (v
->vdev_nonrot
))
717 if ((pri
== ZIO_PRIORITY_SYNC_READ
) && (v
->vdev_nonrot
))
724 zio
->io_error
= SET_ERROR(ENOTSUP
);
729 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
, zio
->io_data
,
730 zio
->io_size
, zio
->io_offset
, flags
, 0);
732 zio
->io_error
= error
;
739 vdev_disk_io_done(zio_t
*zio
)
742 * If the device returned EIO, we revalidate the media. If it is
743 * determined the media has changed this triggers the asynchronous
744 * removal of the device from the configuration.
746 if (zio
->io_error
== EIO
) {
747 vdev_t
*v
= zio
->io_vd
;
748 vdev_disk_t
*vd
= v
->vdev_tsd
;
750 if (check_disk_change(vd
->vd_bdev
)) {
751 vdev_bdev_invalidate(vd
->vd_bdev
);
752 v
->vdev_remove_wanted
= B_TRUE
;
753 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
759 vdev_disk_hold(vdev_t
*vd
)
761 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
763 /* We must have a pathname, and it must be absolute. */
764 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
768 * Only prefetch path and devid info if the device has
771 if (vd
->vdev_tsd
!= NULL
)
774 /* XXX: Implement me as a vnode lookup for the device */
775 vd
->vdev_name_vp
= NULL
;
776 vd
->vdev_devid_vp
= NULL
;
780 vdev_disk_rele(vdev_t
*vd
)
782 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
784 /* XXX: Implement me as a vnode rele for the device */
787 vdev_ops_t vdev_disk_ops
= {
796 VDEV_TYPE_DISK
, /* name of this vdev type */
797 B_TRUE
/* leaf vdev */
801 * Given the root disk device devid or pathname, read the label from
802 * the device, and construct a configuration nvlist.
805 vdev_disk_read_rootlabel(char *devpath
, char *devid
, nvlist_t
**config
)
807 struct block_device
*bdev
;
812 bdev
= vdev_bdev_open(devpath
, vdev_bdev_mode(FREAD
), zfs_vdev_holder
);
814 return (-PTR_ERR(bdev
));
816 s
= bdev_capacity(bdev
);
818 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));
822 size
= P2ALIGN_TYPED(s
, sizeof (vdev_label_t
), uint64_t);
823 label
= vmem_alloc(sizeof (vdev_label_t
), KM_SLEEP
);
825 for (i
= 0; i
< VDEV_LABELS
; i
++) {
826 uint64_t offset
, state
, txg
= 0;
828 /* read vdev label */
829 offset
= vdev_label_offset(size
, i
, 0);
830 if (vdev_disk_physio(bdev
, (caddr_t
)label
,
831 VDEV_SKIP_SIZE
+ VDEV_PHYS_SIZE
, offset
, READ_SYNC
) != 0)
834 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
835 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0) {
840 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
841 &state
) != 0 || state
>= POOL_STATE_DESTROYED
) {
842 nvlist_free(*config
);
847 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
848 &txg
) != 0 || txg
== 0) {
849 nvlist_free(*config
);
857 vmem_free(label
, sizeof (vdev_label_t
));
858 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));
863 module_param(zfs_vdev_scheduler
, charp
, 0644);
864 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");