4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
36 char *zfs_vdev_scheduler
= VDEV_SCHEDULER
;
39 * Virtual device vector for disks.
41 typedef struct dio_request
{
42 struct completion dr_comp
; /* Completion for sync IO */
43 atomic_t dr_ref
; /* References */
44 zio_t
*dr_zio
; /* Parent ZIO */
45 int dr_rw
; /* Read/Write */
46 int dr_error
; /* Bio error */
47 int dr_bio_count
; /* Count of bio's */
48 struct bio
*dr_bio
[0]; /* Attached bio's */
52 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
54 vdev_bdev_mode(int smode
)
58 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
70 vdev_bdev_mode(int smode
)
74 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
76 if ((smode
& FREAD
) && !(smode
& FWRITE
))
81 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
84 bdev_capacity(struct block_device
*bdev
)
86 struct hd_struct
*part
= bdev
->bd_part
;
88 /* The partition capacity referenced by the block device */
90 return (part
->nr_sects
<< 9);
92 /* Otherwise assume the full device capacity */
93 return (get_capacity(bdev
->bd_disk
) << 9);
97 vdev_disk_error(zio_t
*zio
)
100 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
101 "flags=%x delay=%llu\n", zio
->io_error
, zio
->io_type
,
102 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
103 zio
->io_flags
, (u_longlong_t
)zio
->io_delay
);
108 * Use the Linux 'noop' elevator for zfs managed block devices. This
109 * strikes the ideal balance by allowing the zfs elevator to do all
110 * request ordering and prioritization. While allowing the Linux
111 * elevator to do the maximum front/back merging allowed by the
112 * physical device. This yields the largest possible requests for
113 * the device with the lowest total overhead.
116 vdev_elevator_switch(vdev_t
*v
, char *elevator
)
118 vdev_disk_t
*vd
= v
->vdev_tsd
;
119 struct block_device
*bdev
= vd
->vd_bdev
;
120 struct request_queue
*q
= bdev_get_queue(bdev
);
121 char *device
= bdev
->bd_disk
->disk_name
;
124 /* Skip devices which are not whole disks (partitions) */
125 if (!v
->vdev_wholedisk
)
128 /* Skip devices without schedulers (loop, ram, dm, etc) */
129 if (!q
->elevator
|| !blk_queue_stackable(q
))
132 /* Leave existing scheduler when set to "none" */
133 if (!strncmp(elevator
, "none", 4) && (strlen(elevator
) == 4))
136 #ifdef HAVE_ELEVATOR_CHANGE
137 error
= elevator_change(q
, elevator
);
139 /* For pre-2.6.36 kernels elevator_change() is not available.
140 * Therefore we fall back to using a usermodehelper to echo the
141 * elevator into sysfs; This requires /bin/echo and sysfs to be
142 * mounted which may not be true early in the boot process.
144 # define SET_SCHEDULER_CMD \
145 "exec 0</dev/null " \
146 " 1>/sys/block/%s/queue/scheduler " \
151 char *argv
[] = { "/bin/sh", "-c", NULL
, NULL
};
152 char *envp
[] = { NULL
};
154 argv
[2] = kmem_asprintf(SET_SCHEDULER_CMD
, device
, elevator
);
155 error
= call_usermodehelper(argv
[0], argv
, envp
, 1);
158 #endif /* HAVE_ELEVATOR_CHANGE */
160 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
161 elevator
, v
->vdev_path
, device
, error
);
167 * Expanding a whole disk vdev involves invoking BLKRRPART on the
168 * whole disk device. This poses a problem, because BLKRRPART will
169 * return EBUSY if one of the disk's partitions is open. That's why
170 * we have to do it here, just before opening the data partition.
171 * Unfortunately, BLKRRPART works by dropping all partitions and
172 * recreating them, which means that for a short time window, all
173 * /dev/sdxN device files disappear (until udev recreates them).
174 * This means two things:
175 * - When we open the data partition just after a BLKRRPART, we
176 * can't do it using the normal device file path because of the
177 * obvious race condition with udev. Instead, we use reliable
178 * kernel APIs to get a handle to the new partition device from
179 * the whole disk device.
180 * - Because vdev_disk_open() initially needs to find the device
181 * using its path, multiple vdev_disk_open() invocations in
182 * short succession on the same disk with BLKRRPARTs in the
183 * middle have a high probability of failure (because of the
184 * race condition with udev). A typical situation where this
185 * might happen is when the zpool userspace tool does a
186 * TRYIMPORT immediately followed by an IMPORT. For this
187 * reason, we only invoke BLKRRPART in the module when strictly
188 * necessary (zpool online -e case), and rely on userspace to
189 * do it when possible.
191 static struct block_device
*
192 vdev_disk_rrpart(const char *path
, int mode
, vdev_disk_t
*vd
)
194 #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK)
195 struct block_device
*bdev
, *result
= ERR_PTR(-ENXIO
);
196 struct gendisk
*disk
;
199 bdev
= vdev_bdev_open(path
, vdev_bdev_mode(mode
), vd
);
203 disk
= get_gendisk(bdev
->bd_dev
, &partno
);
204 vdev_bdev_close(bdev
, vdev_bdev_mode(mode
));
207 bdev
= bdget(disk_devt(disk
));
209 error
= blkdev_get(bdev
, vdev_bdev_mode(mode
), vd
);
211 error
= ioctl_by_bdev(bdev
, BLKRRPART
, 0);
212 vdev_bdev_close(bdev
, vdev_bdev_mode(mode
));
215 bdev
= bdget_disk(disk
, partno
);
217 error
= blkdev_get(bdev
,
218 vdev_bdev_mode(mode
) | FMODE_EXCL
, vd
);
227 return ERR_PTR(-EOPNOTSUPP
);
228 #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
232 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
235 struct block_device
*bdev
= ERR_PTR(-ENXIO
);
237 int mode
, block_size
;
239 /* Must have a pathname and it must be absolute. */
240 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
241 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
245 vd
= kmem_zalloc(sizeof(vdev_disk_t
), KM_PUSHPAGE
);
250 * Devices are always opened by the path provided at configuration
251 * time. This means that if the provided path is a udev by-id path
252 * then drives may be recabled without an issue. If the provided
253 * path is a udev by-path path then the physical location information
254 * will be preserved. This can be critical for more complicated
255 * configurations where drives are located in specific physical
256 * locations to maximize the systems tolerence to component failure.
257 * Alternately you can provide your own udev rule to flexibly map
258 * the drives as you see fit. It is not advised that you use the
259 * /dev/[hd]d devices which may be reorder due to probing order.
260 * Devices in the wrong locations will be detected by the higher
261 * level vdev validation.
263 mode
= spa_mode(v
->vdev_spa
);
264 if (v
->vdev_wholedisk
&& v
->vdev_expanding
)
265 bdev
= vdev_disk_rrpart(v
->vdev_path
, mode
, vd
);
267 bdev
= vdev_bdev_open(v
->vdev_path
, vdev_bdev_mode(mode
), vd
);
269 kmem_free(vd
, sizeof(vdev_disk_t
));
270 return -PTR_ERR(bdev
);
275 block_size
= vdev_bdev_block_size(bdev
);
277 /* We think the wholedisk property should always be set when this
278 * function is called. ASSERT here so if any legitimate cases exist
279 * where it's not set, we'll find them during debugging. If we never
280 * hit the ASSERT, this and the following conditional statement can be
282 ASSERT3S(v
->vdev_wholedisk
, !=, -1ULL);
284 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
285 * was unspecified. In that case, check if this is a whole device.
286 * When bdev->bd_contains == bdev we have a whole device and not simply
288 if (v
->vdev_wholedisk
== -1ULL)
289 v
->vdev_wholedisk
= (bdev
->bd_contains
== bdev
);
291 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
292 v
->vdev_nowritecache
= B_FALSE
;
294 /* Physical volume size in bytes */
295 *psize
= bdev_capacity(bdev
);
297 /* TODO: report possible expansion size */
300 /* Based on the minimum sector size set the block size */
301 *ashift
= highbit(MAX(block_size
, SPA_MINBLOCKSIZE
)) - 1;
303 /* Try to set the io scheduler elevator algorithm */
304 (void) vdev_elevator_switch(v
, zfs_vdev_scheduler
);
310 vdev_disk_close(vdev_t
*v
)
312 vdev_disk_t
*vd
= v
->vdev_tsd
;
317 if (vd
->vd_bdev
!= NULL
)
318 vdev_bdev_close(vd
->vd_bdev
,
319 vdev_bdev_mode(spa_mode(v
->vdev_spa
)));
321 kmem_free(vd
, sizeof(vdev_disk_t
));
325 static dio_request_t
*
326 vdev_disk_dio_alloc(int bio_count
)
331 dr
= kmem_zalloc(sizeof(dio_request_t
) +
332 sizeof(struct bio
*) * bio_count
, KM_PUSHPAGE
);
334 init_completion(&dr
->dr_comp
);
335 atomic_set(&dr
->dr_ref
, 0);
336 dr
->dr_bio_count
= bio_count
;
339 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
340 dr
->dr_bio
[i
] = NULL
;
347 vdev_disk_dio_free(dio_request_t
*dr
)
351 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
353 bio_put(dr
->dr_bio
[i
]);
355 kmem_free(dr
, sizeof(dio_request_t
) +
356 sizeof(struct bio
*) * dr
->dr_bio_count
);
360 vdev_disk_dio_is_sync(dio_request_t
*dr
)
362 #ifdef HAVE_BIO_RW_SYNC
363 /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
364 return (dr
->dr_rw
& (1 << BIO_RW_SYNC
));
366 # ifdef HAVE_BIO_RW_SYNCIO
367 /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
368 return (dr
->dr_rw
& (1 << BIO_RW_SYNCIO
));
370 # ifdef HAVE_REQ_SYNC
371 /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
372 return (dr
->dr_rw
& REQ_SYNC
);
374 # error "Unable to determine bio sync flag"
375 # endif /* HAVE_REQ_SYNC */
376 # endif /* HAVE_BIO_RW_SYNC */
377 #endif /* HAVE_BIO_RW_SYNCIO */
381 vdev_disk_dio_get(dio_request_t
*dr
)
383 atomic_inc(&dr
->dr_ref
);
387 vdev_disk_dio_put(dio_request_t
*dr
)
389 int rc
= atomic_dec_return(&dr
->dr_ref
);
392 * Free the dio_request when the last reference is dropped and
393 * ensure zio_interpret is called only once with the correct zio
396 zio_t
*zio
= dr
->dr_zio
;
397 int error
= dr
->dr_error
;
399 vdev_disk_dio_free(dr
);
402 zio
->io_delay
= jiffies_to_msecs(
403 jiffies_64
- zio
->io_delay
);
404 zio
->io_error
= error
;
405 ASSERT3S(zio
->io_error
, >=, 0);
407 vdev_disk_error(zio
);
415 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, size
, error
)
417 dio_request_t
*dr
= bio
->bi_private
;
420 /* Fatal error but print some useful debugging before asserting */
422 PANIC("dr == NULL, bio->bi_private == NULL\n"
423 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
424 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
425 bio
->bi_next
, bio
->bi_flags
, bio
->bi_rw
, bio
->bi_vcnt
,
426 bio
->bi_idx
, bio
->bi_size
, bio
->bi_end_io
,
427 atomic_read(&bio
->bi_cnt
));
429 #ifndef HAVE_2ARGS_BIO_END_IO_T
432 #endif /* HAVE_2ARGS_BIO_END_IO_T */
434 if (error
== 0 && !test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
437 if (dr
->dr_error
== 0)
438 dr
->dr_error
= -error
;
440 /* Drop reference aquired by __vdev_disk_physio */
441 rc
= vdev_disk_dio_put(dr
);
443 /* Wake up synchronous waiter this is the last outstanding bio */
444 if ((rc
== 1) && vdev_disk_dio_is_sync(dr
))
445 complete(&dr
->dr_comp
);
447 BIO_END_IO_RETURN(0);
450 static inline unsigned long
451 bio_nr_pages(void *bio_ptr
, unsigned int bio_size
)
453 return ((((unsigned long)bio_ptr
+ bio_size
+ PAGE_SIZE
- 1) >>
454 PAGE_SHIFT
) - ((unsigned long)bio_ptr
>> PAGE_SHIFT
));
458 bio_map(struct bio
*bio
, void *bio_ptr
, unsigned int bio_size
)
460 unsigned int offset
, size
, i
;
463 offset
= offset_in_page(bio_ptr
);
464 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
465 size
= PAGE_SIZE
- offset
;
473 if (kmem_virt(bio_ptr
))
474 page
= vmalloc_to_page(bio_ptr
);
476 page
= virt_to_page(bio_ptr
);
478 if (bio_add_page(bio
, page
, size
, offset
) != size
)
490 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
, caddr_t kbuf_ptr
,
491 size_t kbuf_size
, uint64_t kbuf_offset
, int flags
)
496 int bio_size
, bio_count
= 16;
497 int i
= 0, error
= 0;
499 ASSERT3U(kbuf_offset
+ kbuf_size
, <=, bdev
->bd_inode
->i_size
);
502 dr
= vdev_disk_dio_alloc(bio_count
);
506 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
507 bio_set_flags_failfast(bdev
, &flags
);
513 * When the IO size exceeds the maximum bio size for the request
514 * queue we are forced to break the IO in multiple bio's and wait
515 * for them all to complete. Ideally, all pool users will set
516 * their volume block size to match the maximum request size and
517 * the common case will be one bio per vdev IO request.
520 bio_offset
= kbuf_offset
;
521 bio_size
= kbuf_size
;
522 for (i
= 0; i
<= dr
->dr_bio_count
; i
++) {
524 /* Finished constructing bio's for given buffer */
529 * By default only 'bio_count' bio's per dio are allowed.
530 * However, if we find ourselves in a situation where more
531 * are needed we allocate a larger dio and warn the user.
533 if (dr
->dr_bio_count
== i
) {
534 vdev_disk_dio_free(dr
);
536 printk("WARNING: Resized bio's/dio to %d\n",bio_count
);
540 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
541 bio_nr_pages(bio_ptr
, bio_size
));
542 if (dr
->dr_bio
[i
] == NULL
) {
543 vdev_disk_dio_free(dr
);
547 /* Matching put called by vdev_disk_physio_completion */
548 vdev_disk_dio_get(dr
);
550 dr
->dr_bio
[i
]->bi_bdev
= bdev
;
551 dr
->dr_bio
[i
]->bi_sector
= bio_offset
>> 9;
552 dr
->dr_bio
[i
]->bi_rw
= dr
->dr_rw
;
553 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
554 dr
->dr_bio
[i
]->bi_private
= dr
;
556 /* Remaining size is returned to become the new size */
557 bio_size
= bio_map(dr
->dr_bio
[i
], bio_ptr
, bio_size
);
559 /* Advance in buffer and construct another bio if needed */
560 bio_ptr
+= dr
->dr_bio
[i
]->bi_size
;
561 bio_offset
+= dr
->dr_bio
[i
]->bi_size
;
564 /* Extra reference to protect dio_request during submit_bio */
565 vdev_disk_dio_get(dr
);
567 zio
->io_delay
= jiffies_64
;
569 /* Submit all bio's associated with this dio */
570 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
572 submit_bio(dr
->dr_rw
, dr
->dr_bio
[i
]);
575 * On synchronous blocking requests we wait for all bio the completion
576 * callbacks to run. We will be woken when the last callback runs
577 * for this dio. We are responsible for putting the last dio_request
578 * reference will in turn put back the last bio references. The
579 * only synchronous consumer is vdev_disk_read_rootlabel() all other
580 * IO originating from vdev_disk_io_start() is asynchronous.
582 if (vdev_disk_dio_is_sync(dr
)) {
583 wait_for_completion(&dr
->dr_comp
);
584 error
= dr
->dr_error
;
585 ASSERT3S(atomic_read(&dr
->dr_ref
), ==, 1);
588 (void)vdev_disk_dio_put(dr
);
594 vdev_disk_physio(struct block_device
*bdev
, caddr_t kbuf
,
595 size_t size
, uint64_t offset
, int flags
)
597 bio_set_flags_failfast(bdev
, &flags
);
598 return __vdev_disk_physio(bdev
, NULL
, kbuf
, size
, offset
, flags
);
601 /* 2.6.24 API change */
602 #ifdef HAVE_BIO_EMPTY_BARRIER
603 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, size
, rc
)
605 zio_t
*zio
= bio
->bi_private
;
607 zio
->io_delay
= jiffies_to_msecs(jiffies_64
- zio
->io_delay
);
609 if (rc
&& (rc
== -EOPNOTSUPP
))
610 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
613 ASSERT3S(zio
->io_error
, >=, 0);
615 vdev_disk_error(zio
);
618 BIO_END_IO_RETURN(0);
622 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
624 struct request_queue
*q
;
627 q
= bdev_get_queue(bdev
);
631 bio
= bio_alloc(GFP_KERNEL
, 0);
635 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
636 bio
->bi_private
= zio
;
638 zio
->io_delay
= jiffies_64
;
639 submit_bio(VDEV_WRITE_FLUSH_FUA
, bio
);
645 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
649 #endif /* HAVE_BIO_EMPTY_BARRIER */
652 vdev_disk_io_start(zio_t
*zio
)
654 vdev_t
*v
= zio
->io_vd
;
655 vdev_disk_t
*vd
= v
->vdev_tsd
;
658 switch (zio
->io_type
) {
661 if (!vdev_readable(v
)) {
662 zio
->io_error
= ENXIO
;
663 return ZIO_PIPELINE_CONTINUE
;
666 switch (zio
->io_cmd
) {
667 case DKIOCFLUSHWRITECACHE
:
669 if (zfs_nocacheflush
)
672 if (v
->vdev_nowritecache
) {
673 zio
->io_error
= ENOTSUP
;
677 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
679 return ZIO_PIPELINE_STOP
;
681 zio
->io_error
= error
;
682 if (error
== ENOTSUP
)
683 v
->vdev_nowritecache
= B_TRUE
;
688 zio
->io_error
= ENOTSUP
;
691 return ZIO_PIPELINE_CONTINUE
;
702 zio
->io_error
= ENOTSUP
;
703 return ZIO_PIPELINE_CONTINUE
;
706 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
, zio
->io_data
,
707 zio
->io_size
, zio
->io_offset
, flags
);
709 zio
->io_error
= error
;
710 return ZIO_PIPELINE_CONTINUE
;
713 return ZIO_PIPELINE_STOP
;
717 vdev_disk_io_done(zio_t
*zio
)
720 * If the device returned EIO, we revalidate the media. If it is
721 * determined the media has changed this triggers the asynchronous
722 * removal of the device from the configuration.
724 if (zio
->io_error
== EIO
) {
725 vdev_t
*v
= zio
->io_vd
;
726 vdev_disk_t
*vd
= v
->vdev_tsd
;
728 if (check_disk_change(vd
->vd_bdev
)) {
729 vdev_bdev_invalidate(vd
->vd_bdev
);
730 v
->vdev_remove_wanted
= B_TRUE
;
731 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
737 vdev_disk_hold(vdev_t
*vd
)
739 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
741 /* We must have a pathname, and it must be absolute. */
742 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
746 * Only prefetch path and devid info if the device has
749 if (vd
->vdev_tsd
!= NULL
)
752 /* XXX: Implement me as a vnode lookup for the device */
753 vd
->vdev_name_vp
= NULL
;
754 vd
->vdev_devid_vp
= NULL
;
758 vdev_disk_rele(vdev_t
*vd
)
760 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
762 /* XXX: Implement me as a vnode rele for the device */
765 vdev_ops_t vdev_disk_ops
= {
774 VDEV_TYPE_DISK
, /* name of this vdev type */
775 B_TRUE
/* leaf vdev */
779 * Given the root disk device devid or pathname, read the label from
780 * the device, and construct a configuration nvlist.
783 vdev_disk_read_rootlabel(char *devpath
, char *devid
, nvlist_t
**config
)
785 struct block_device
*bdev
;
790 bdev
= vdev_bdev_open(devpath
, vdev_bdev_mode(FREAD
), NULL
);
792 return -PTR_ERR(bdev
);
794 s
= bdev_capacity(bdev
);
796 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));
800 size
= P2ALIGN_TYPED(s
, sizeof(vdev_label_t
), uint64_t);
801 label
= vmem_alloc(sizeof(vdev_label_t
), KM_PUSHPAGE
);
803 for (i
= 0; i
< VDEV_LABELS
; i
++) {
804 uint64_t offset
, state
, txg
= 0;
806 /* read vdev label */
807 offset
= vdev_label_offset(size
, i
, 0);
808 if (vdev_disk_physio(bdev
, (caddr_t
)label
,
809 VDEV_SKIP_SIZE
+ VDEV_PHYS_SIZE
, offset
, READ_SYNC
) != 0)
812 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
813 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0) {
818 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
819 &state
) != 0 || state
>= POOL_STATE_DESTROYED
) {
820 nvlist_free(*config
);
825 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
826 &txg
) != 0 || txg
== 0) {
827 nvlist_free(*config
);
835 vmem_free(label
, sizeof(vdev_label_t
));
836 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));
841 module_param(zfs_vdev_scheduler
, charp
, 0644);
842 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");