4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
26 * Copyright (c) 2012, 2018 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
34 #include <sys/fs/zfs.h>
36 #include <linux/mod_compat.h>
37 #include <linux/msdos_fs.h>
39 char *zfs_vdev_scheduler
= VDEV_SCHEDULER
;
40 static void *zfs_vdev_holder
= VDEV_HOLDER
;
42 /* size of the "reserved" partition, in blocks */
43 #define EFI_MIN_RESV_SIZE (16 * 1024)
46 * Virtual device vector for disks.
48 typedef struct dio_request
{
49 zio_t
*dr_zio
; /* Parent ZIO */
50 atomic_t dr_ref
; /* References */
51 int dr_error
; /* Bio error */
52 int dr_bio_count
; /* Count of bio's */
53 struct bio
*dr_bio
[0]; /* Attached bio's */
57 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
59 vdev_bdev_mode(int smode
)
63 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
75 vdev_bdev_mode(int smode
)
79 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
81 if ((smode
& FREAD
) && !(smode
& FWRITE
))
86 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
89 * Returns the usable capacity (in bytes) for the partition or disk.
92 bdev_capacity(struct block_device
*bdev
)
94 return (i_size_read(bdev
->bd_inode
));
98 * Returns the maximum expansion capacity of the block device (in bytes).
100 * It is possible to expand a vdev when it has been created as a wholedisk
101 * and the containing block device has increased in capacity. Or when the
102 * partition containing the pool has been manually increased in size.
104 * This function is only responsible for calculating the potential expansion
105 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
106 * responsible for verifying the expected partition layout in the wholedisk
107 * case, and updating the partition table if appropriate. Once the partition
108 * size has been increased the additional capacity will be visible using
112 bdev_max_capacity(struct block_device
*bdev
, uint64_t wholedisk
)
117 if (wholedisk
&& bdev
->bd_part
!= NULL
&& bdev
!= bdev
->bd_contains
) {
119 * When reporting maximum expansion capacity for a wholedisk
120 * deduct any capacity which is expected to be lost due to
121 * alignment restrictions. Over reporting this value isn't
122 * harmful and would only result in slightly less capacity
123 * than expected post expansion.
125 available
= i_size_read(bdev
->bd_contains
->bd_inode
) -
126 ((EFI_MIN_RESV_SIZE
+ NEW_START_BLOCK
+
127 PARTITION_END_ALIGNMENT
) << SECTOR_BITS
);
131 psize
= bdev_capacity(bdev
);
133 psize
= bdev_capacity(bdev
);
140 vdev_disk_error(zio_t
*zio
)
142 zfs_dbgmsg(KERN_WARNING
"zio error=%d type=%d offset=%llu size=%llu "
143 "flags=%x\n", zio
->io_error
, zio
->io_type
,
144 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
149 * Use the Linux 'noop' elevator for zfs managed block devices. This
150 * strikes the ideal balance by allowing the zfs elevator to do all
151 * request ordering and prioritization. While allowing the Linux
152 * elevator to do the maximum front/back merging allowed by the
153 * physical device. This yields the largest possible requests for
154 * the device with the lowest total overhead.
157 vdev_elevator_switch(vdev_t
*v
, char *elevator
)
159 vdev_disk_t
*vd
= v
->vdev_tsd
;
160 struct request_queue
*q
;
164 for (int c
= 0; c
< v
->vdev_children
; c
++)
165 vdev_elevator_switch(v
->vdev_child
[c
], elevator
);
167 if (!v
->vdev_ops
->vdev_op_leaf
|| vd
->vd_bdev
== NULL
)
170 q
= bdev_get_queue(vd
->vd_bdev
);
171 device
= vd
->vd_bdev
->bd_disk
->disk_name
;
174 * Skip devices which are not whole disks (partitions).
175 * Device-mapper devices are excepted since they may be whole
176 * disks despite the vdev_wholedisk flag, in which case we can
177 * and should switch the elevator. If the device-mapper device
178 * does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the
179 * "Skip devices without schedulers" check below will fail.
181 if (!v
->vdev_wholedisk
&& strncmp(device
, "dm-", 3) != 0)
184 /* Leave existing scheduler when set to "none" */
185 if ((strncmp(elevator
, "none", 4) == 0) && (strlen(elevator
) == 4))
189 * The elevator_change() function was available in kernels from
190 * 2.6.36 to 4.11. When not available fall back to using the user
191 * mode helper functionality to set the elevator via sysfs. This
192 * requires /bin/echo and sysfs to be mounted which may not be true
193 * early in the boot process.
195 #ifdef HAVE_ELEVATOR_CHANGE
196 error
= elevator_change(q
, elevator
);
198 #define SET_SCHEDULER_CMD \
199 "exec 0</dev/null " \
200 " 1>/sys/block/%s/queue/scheduler " \
204 char *argv
[] = { "/bin/sh", "-c", NULL
, NULL
};
205 char *envp
[] = { NULL
};
207 argv
[2] = kmem_asprintf(SET_SCHEDULER_CMD
, device
, elevator
);
208 error
= call_usermodehelper(argv
[0], argv
, envp
, UMH_WAIT_PROC
);
210 #endif /* HAVE_ELEVATOR_CHANGE */
212 zfs_dbgmsg("Unable to set \"%s\" scheduler for %s (%s): %d\n",
213 elevator
, v
->vdev_path
, device
, error
);
218 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *max_psize
,
221 struct block_device
*bdev
;
222 fmode_t mode
= vdev_bdev_mode(spa_mode(v
->vdev_spa
));
223 int count
= 0, block_size
;
224 int bdev_retry_count
= 50;
227 /* Must have a pathname and it must be absolute. */
228 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
229 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
230 vdev_dbgmsg(v
, "invalid vdev_path");
231 return (SET_ERROR(EINVAL
));
235 * Reopen the device if it is currently open. When expanding a
236 * partition force re-scanning the partition table while closed
237 * in order to get an accurate updated block device size. Then
238 * since udev may need to recreate the device links increase the
239 * open retry count before reporting the device as unavailable.
243 char disk_name
[BDEVNAME_SIZE
+ 6] = "/dev/";
244 boolean_t reread_part
= B_FALSE
;
246 rw_enter(&vd
->vd_lock
, RW_WRITER
);
251 if (v
->vdev_expanding
&& bdev
!= bdev
->bd_contains
) {
252 bdevname(bdev
->bd_contains
, disk_name
+ 5);
253 reread_part
= B_TRUE
;
256 vdev_bdev_close(bdev
, mode
);
260 bdev
= vdev_bdev_open(disk_name
, mode
, zfs_vdev_holder
);
262 int error
= vdev_bdev_reread_part(bdev
);
263 vdev_bdev_close(bdev
, mode
);
265 bdev_retry_count
= 100;
269 vd
= kmem_zalloc(sizeof (vdev_disk_t
), KM_SLEEP
);
271 rw_init(&vd
->vd_lock
, NULL
, RW_DEFAULT
, NULL
);
272 rw_enter(&vd
->vd_lock
, RW_WRITER
);
276 * Devices are always opened by the path provided at configuration
277 * time. This means that if the provided path is a udev by-id path
278 * then drives may be re-cabled without an issue. If the provided
279 * path is a udev by-path path, then the physical location information
280 * will be preserved. This can be critical for more complicated
281 * configurations where drives are located in specific physical
282 * locations to maximize the systems tolerance to component failure.
284 * Alternatively, you can provide your own udev rule to flexibly map
285 * the drives as you see fit. It is not advised that you use the
286 * /dev/[hd]d devices which may be reordered due to probing order.
287 * Devices in the wrong locations will be detected by the higher
288 * level vdev validation.
290 * The specified paths may be briefly removed and recreated in
291 * response to udev events. This should be exceptionally unlikely
292 * because the zpool command makes every effort to verify these paths
293 * have already settled prior to reaching this point. Therefore,
294 * a ENOENT failure at this point is highly likely to be transient
295 * and it is reasonable to sleep and retry before giving up. In
296 * practice delays have been observed to be on the order of 100ms.
298 bdev
= ERR_PTR(-ENXIO
);
299 while (IS_ERR(bdev
) && count
< bdev_retry_count
) {
300 bdev
= vdev_bdev_open(v
->vdev_path
, mode
, zfs_vdev_holder
);
301 if (unlikely(PTR_ERR(bdev
) == -ENOENT
)) {
302 schedule_timeout(MSEC_TO_TICK(10));
304 } else if (IS_ERR(bdev
)) {
310 int error
= -PTR_ERR(bdev
);
311 vdev_dbgmsg(v
, "open error=%d count=%d\n", error
, count
);
314 rw_exit(&vd
->vd_lock
);
315 return (SET_ERROR(error
));
319 rw_exit(&vd
->vd_lock
);
322 /* Determine the physical block size */
323 block_size
= vdev_bdev_block_size(vd
->vd_bdev
);
325 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
326 v
->vdev_nowritecache
= B_FALSE
;
328 /* Inform the ZIO pipeline that we are non-rotational */
329 v
->vdev_nonrot
= blk_queue_nonrot(bdev_get_queue(vd
->vd_bdev
));
331 /* Physical volume size in bytes for the partition */
332 *psize
= bdev_capacity(vd
->vd_bdev
);
334 /* Physical volume size in bytes including possible expansion space */
335 *max_psize
= bdev_max_capacity(vd
->vd_bdev
, v
->vdev_wholedisk
);
337 /* Based on the minimum sector size set the block size */
338 *ashift
= highbit64(MAX(block_size
, SPA_MINBLOCKSIZE
)) - 1;
340 /* Try to set the io scheduler elevator algorithm */
341 (void) vdev_elevator_switch(v
, zfs_vdev_scheduler
);
347 vdev_disk_close(vdev_t
*v
)
349 vdev_disk_t
*vd
= v
->vdev_tsd
;
351 if (v
->vdev_reopening
|| vd
== NULL
)
354 if (vd
->vd_bdev
!= NULL
) {
355 vdev_bdev_close(vd
->vd_bdev
,
356 vdev_bdev_mode(spa_mode(v
->vdev_spa
)));
359 rw_destroy(&vd
->vd_lock
);
360 kmem_free(vd
, sizeof (vdev_disk_t
));
364 static dio_request_t
*
365 vdev_disk_dio_alloc(int bio_count
)
370 dr
= kmem_zalloc(sizeof (dio_request_t
) +
371 sizeof (struct bio
*) * bio_count
, KM_SLEEP
);
373 atomic_set(&dr
->dr_ref
, 0);
374 dr
->dr_bio_count
= bio_count
;
377 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
378 dr
->dr_bio
[i
] = NULL
;
385 vdev_disk_dio_free(dio_request_t
*dr
)
389 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
391 bio_put(dr
->dr_bio
[i
]);
393 kmem_free(dr
, sizeof (dio_request_t
) +
394 sizeof (struct bio
*) * dr
->dr_bio_count
);
398 vdev_disk_dio_get(dio_request_t
*dr
)
400 atomic_inc(&dr
->dr_ref
);
404 vdev_disk_dio_put(dio_request_t
*dr
)
406 int rc
= atomic_dec_return(&dr
->dr_ref
);
409 * Free the dio_request when the last reference is dropped and
410 * ensure zio_interpret is called only once with the correct zio
413 zio_t
*zio
= dr
->dr_zio
;
414 int error
= dr
->dr_error
;
416 vdev_disk_dio_free(dr
);
419 zio
->io_error
= error
;
420 ASSERT3S(zio
->io_error
, >=, 0);
422 vdev_disk_error(zio
);
424 zio_delay_interrupt(zio
);
431 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, error
)
433 dio_request_t
*dr
= bio
->bi_private
;
436 if (dr
->dr_error
== 0) {
437 #ifdef HAVE_1ARG_BIO_END_IO_T
438 dr
->dr_error
= BIO_END_IO_ERROR(bio
);
441 dr
->dr_error
= -(error
);
442 else if (!test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
447 /* Drop reference acquired by __vdev_disk_physio */
448 rc
= vdev_disk_dio_put(dr
);
452 bio_map(struct bio
*bio
, void *bio_ptr
, unsigned int bio_size
)
454 unsigned int offset
, size
, i
;
457 offset
= offset_in_page(bio_ptr
);
458 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
459 size
= PAGE_SIZE
- offset
;
467 if (is_vmalloc_addr(bio_ptr
))
468 page
= vmalloc_to_page(bio_ptr
);
470 page
= virt_to_page(bio_ptr
);
473 * Some network related block device uses tcp_sendpage, which
474 * doesn't behave well when using 0-count page, this is a
475 * safety net to catch them.
477 ASSERT3S(page_count(page
), >, 0);
479 if (bio_add_page(bio
, page
, size
, offset
) != size
)
491 bio_map_abd_off(struct bio
*bio
, abd_t
*abd
, unsigned int size
, size_t off
)
493 if (abd_is_linear(abd
))
494 return (bio_map(bio
, ((char *)abd_to_buf(abd
)) + off
, size
));
496 return (abd_scatter_bio_map_off(bio
, abd
, size
, off
));
500 vdev_submit_bio_impl(struct bio
*bio
)
502 #ifdef HAVE_1ARG_SUBMIT_BIO
509 #ifndef HAVE_BIO_SET_DEV
511 bio_set_dev(struct bio
*bio
, struct block_device
*bdev
)
515 #endif /* !HAVE_BIO_SET_DEV */
518 vdev_submit_bio(struct bio
*bio
)
520 #ifdef HAVE_CURRENT_BIO_TAIL
521 struct bio
**bio_tail
= current
->bio_tail
;
522 current
->bio_tail
= NULL
;
523 vdev_submit_bio_impl(bio
);
524 current
->bio_tail
= bio_tail
;
526 struct bio_list
*bio_list
= current
->bio_list
;
527 current
->bio_list
= NULL
;
528 vdev_submit_bio_impl(bio
);
529 current
->bio_list
= bio_list
;
534 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
,
535 size_t io_size
, uint64_t io_offset
, int rw
, int flags
)
540 int bio_size
, bio_count
= 16;
541 int i
= 0, error
= 0;
542 #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
543 struct blk_plug plug
;
546 * Accessing outside the block device is never allowed.
548 if (io_offset
+ io_size
> bdev
->bd_inode
->i_size
) {
549 vdev_dbgmsg(zio
->io_vd
,
550 "Illegal access %llu size %llu, device size %llu",
551 io_offset
, io_size
, i_size_read(bdev
->bd_inode
));
552 return (SET_ERROR(EIO
));
556 dr
= vdev_disk_dio_alloc(bio_count
);
558 return (SET_ERROR(ENOMEM
));
560 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
561 bio_set_flags_failfast(bdev
, &flags
);
566 * When the IO size exceeds the maximum bio size for the request
567 * queue we are forced to break the IO in multiple bio's and wait
568 * for them all to complete. Ideally, all pool users will set
569 * their volume block size to match the maximum request size and
570 * the common case will be one bio per vdev IO request.
574 bio_offset
= io_offset
;
576 for (i
= 0; i
<= dr
->dr_bio_count
; i
++) {
578 /* Finished constructing bio's for given buffer */
583 * By default only 'bio_count' bio's per dio are allowed.
584 * However, if we find ourselves in a situation where more
585 * are needed we allocate a larger dio and warn the user.
587 if (dr
->dr_bio_count
== i
) {
588 vdev_disk_dio_free(dr
);
593 /* bio_alloc() with __GFP_WAIT never returns NULL */
594 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
595 MIN(abd_nr_pages_off(zio
->io_abd
, bio_size
, abd_offset
),
597 if (unlikely(dr
->dr_bio
[i
] == NULL
)) {
598 vdev_disk_dio_free(dr
);
599 return (SET_ERROR(ENOMEM
));
602 /* Matching put called by vdev_disk_physio_completion */
603 vdev_disk_dio_get(dr
);
605 bio_set_dev(dr
->dr_bio
[i
], bdev
);
606 BIO_BI_SECTOR(dr
->dr_bio
[i
]) = bio_offset
>> 9;
607 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
608 dr
->dr_bio
[i
]->bi_private
= dr
;
609 bio_set_op_attrs(dr
->dr_bio
[i
], rw
, flags
);
611 /* Remaining size is returned to become the new size */
612 bio_size
= bio_map_abd_off(dr
->dr_bio
[i
], zio
->io_abd
,
613 bio_size
, abd_offset
);
615 /* Advance in buffer and construct another bio if needed */
616 abd_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
617 bio_offset
+= BIO_BI_SIZE(dr
->dr_bio
[i
]);
620 /* Extra reference to protect dio_request during vdev_submit_bio */
621 vdev_disk_dio_get(dr
);
623 #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
624 if (dr
->dr_bio_count
> 1)
625 blk_start_plug(&plug
);
628 /* Submit all bio's associated with this dio */
629 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
631 vdev_submit_bio(dr
->dr_bio
[i
]);
633 #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
634 if (dr
->dr_bio_count
> 1)
635 blk_finish_plug(&plug
);
638 (void) vdev_disk_dio_put(dr
);
643 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, error
)
645 zio_t
*zio
= bio
->bi_private
;
646 #ifdef HAVE_1ARG_BIO_END_IO_T
647 zio
->io_error
= BIO_END_IO_ERROR(bio
);
649 zio
->io_error
= -error
;
652 if (zio
->io_error
&& (zio
->io_error
== EOPNOTSUPP
))
653 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
656 ASSERT3S(zio
->io_error
, >=, 0);
658 vdev_disk_error(zio
);
663 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
665 struct request_queue
*q
;
668 q
= bdev_get_queue(bdev
);
670 return (SET_ERROR(ENXIO
));
672 bio
= bio_alloc(GFP_NOIO
, 0);
673 /* bio_alloc() with __GFP_WAIT never returns NULL */
674 if (unlikely(bio
== NULL
))
675 return (SET_ERROR(ENOMEM
));
677 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
678 bio
->bi_private
= zio
;
679 bio_set_dev(bio
, bdev
);
681 vdev_submit_bio(bio
);
682 invalidate_bdev(bdev
);
688 vdev_disk_io_start(zio_t
*zio
)
690 vdev_t
*v
= zio
->io_vd
;
691 vdev_disk_t
*vd
= v
->vdev_tsd
;
692 int rw
, flags
, error
;
695 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
696 * Nothing to be done here but return failure.
699 zio
->io_error
= ENXIO
;
704 rw_enter(&vd
->vd_lock
, RW_READER
);
707 * If the vdev is closed, it's likely due to a failed reopen and is
708 * in the UNAVAIL state. Nothing to be done here but return failure.
710 if (vd
->vd_bdev
== NULL
) {
711 rw_exit(&vd
->vd_lock
);
712 zio
->io_error
= ENXIO
;
717 switch (zio
->io_type
) {
720 if (!vdev_readable(v
)) {
721 rw_exit(&vd
->vd_lock
);
722 zio
->io_error
= SET_ERROR(ENXIO
);
727 switch (zio
->io_cmd
) {
728 case DKIOCFLUSHWRITECACHE
:
730 if (zfs_nocacheflush
)
733 if (v
->vdev_nowritecache
) {
734 zio
->io_error
= SET_ERROR(ENOTSUP
);
738 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
740 rw_exit(&vd
->vd_lock
);
744 zio
->io_error
= error
;
749 zio
->io_error
= SET_ERROR(ENOTSUP
);
752 rw_exit(&vd
->vd_lock
);
757 #if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
758 flags
= (1 << BIO_RW_UNPLUG
);
759 #elif defined(REQ_UNPLUG)
768 #if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
769 flags
= (1 << BIO_RW_UNPLUG
);
770 #elif defined(REQ_UNPLUG)
778 rw_exit(&vd
->vd_lock
);
779 zio
->io_error
= SET_ERROR(ENOTSUP
);
784 zio
->io_target_timestamp
= zio_handle_io_delay(zio
);
785 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
,
786 zio
->io_size
, zio
->io_offset
, rw
, flags
);
787 rw_exit(&vd
->vd_lock
);
790 zio
->io_error
= error
;
797 vdev_disk_io_done(zio_t
*zio
)
800 * If the device returned EIO, we revalidate the media. If it is
801 * determined the media has changed this triggers the asynchronous
802 * removal of the device from the configuration.
804 if (zio
->io_error
== EIO
) {
805 vdev_t
*v
= zio
->io_vd
;
806 vdev_disk_t
*vd
= v
->vdev_tsd
;
808 if (check_disk_change(vd
->vd_bdev
)) {
809 vdev_bdev_invalidate(vd
->vd_bdev
);
810 v
->vdev_remove_wanted
= B_TRUE
;
811 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
817 vdev_disk_hold(vdev_t
*vd
)
819 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
821 /* We must have a pathname, and it must be absolute. */
822 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
826 * Only prefetch path and devid info if the device has
829 if (vd
->vdev_tsd
!= NULL
)
832 /* XXX: Implement me as a vnode lookup for the device */
833 vd
->vdev_name_vp
= NULL
;
834 vd
->vdev_devid_vp
= NULL
;
838 vdev_disk_rele(vdev_t
*vd
)
840 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
842 /* XXX: Implement me as a vnode rele for the device */
846 param_set_vdev_scheduler(const char *val
, zfs_kernel_param_t
*kp
)
852 return (SET_ERROR(-EINVAL
));
854 if ((p
= strchr(val
, '\n')) != NULL
)
857 if (spa_mode_global
!= 0) {
858 mutex_enter(&spa_namespace_lock
);
859 while ((spa
= spa_next(spa
)) != NULL
) {
860 if (spa_state(spa
) != POOL_STATE_ACTIVE
||
861 !spa_writeable(spa
) || spa_suspended(spa
))
864 spa_open_ref(spa
, FTAG
);
865 mutex_exit(&spa_namespace_lock
);
866 vdev_elevator_switch(spa
->spa_root_vdev
, (char *)val
);
867 mutex_enter(&spa_namespace_lock
);
868 spa_close(spa
, FTAG
);
870 mutex_exit(&spa_namespace_lock
);
873 return (param_set_charp(val
, kp
));
876 vdev_ops_t vdev_disk_ops
= {
887 VDEV_TYPE_DISK
, /* name of this vdev type */
888 B_TRUE
/* leaf vdev */
891 module_param_call(zfs_vdev_scheduler
, param_set_vdev_scheduler
,
892 param_get_charp
, &zfs_vdev_scheduler
, 0644);
893 MODULE_PARM_DESC(zfs_vdev_scheduler
, "I/O scheduler");