]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_disk.c
Don't set I/O Scheduler for Partitions
[mirror_zfs.git] / module / zfs / vdev_disk.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
33 #include <sys/zio.h>
34 #include <sys/sunldi.h>
35
36 char *zfs_vdev_scheduler = VDEV_SCHEDULER;
37
38 /*
39 * Virtual device vector for disks.
40 */
41 typedef struct dio_request {
42 struct completion dr_comp; /* Completion for sync IO */
43 atomic_t dr_ref; /* References */
44 zio_t *dr_zio; /* Parent ZIO */
45 int dr_rw; /* Read/Write */
46 int dr_error; /* Bio error */
47 int dr_bio_count; /* Count of bio's */
48 struct bio *dr_bio[0]; /* Attached bio's */
49 } dio_request_t;
50
51
52 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
53 static fmode_t
54 vdev_bdev_mode(int smode)
55 {
56 fmode_t mode = 0;
57
58 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
59
60 if (smode & FREAD)
61 mode |= FMODE_READ;
62
63 if (smode & FWRITE)
64 mode |= FMODE_WRITE;
65
66 return mode;
67 }
68 #else
69 static int
70 vdev_bdev_mode(int smode)
71 {
72 int mode = 0;
73
74 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
75
76 if ((smode & FREAD) && !(smode & FWRITE))
77 mode = MS_RDONLY;
78
79 return mode;
80 }
81 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
82
83 static uint64_t
84 bdev_capacity(struct block_device *bdev)
85 {
86 struct hd_struct *part = bdev->bd_part;
87
88 /* The partition capacity referenced by the block device */
89 if (part)
90 return part->nr_sects;
91
92 /* Otherwise assume the full device capacity */
93 return get_capacity(bdev->bd_disk);
94 }
95
96 static void
97 vdev_disk_error(zio_t *zio)
98 {
99 #ifdef ZFS_DEBUG
100 printk("ZFS: zio error=%d type=%d offset=%llu size=%llu "
101 "flags=%x delay=%llu\n", zio->io_error, zio->io_type,
102 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
103 zio->io_flags, (u_longlong_t)zio->io_delay);
104 #endif
105 }
106
107 /*
108 * Use the Linux 'noop' elevator for zfs managed block devices. This
109 * strikes the ideal balance by allowing the zfs elevator to do all
110 * request ordering and prioritization. While allowing the Linux
111 * elevator to do the maximum front/back merging allowed by the
112 * physical device. This yields the largest possible requests for
113 * the device with the lowest total overhead.
114 *
115 * Unfortunately we cannot directly call the elevator_switch() function
116 * because it is not exported from the block layer. This means we have
117 * to use the sysfs interface and a user space upcall. Pools will be
118 * automatically imported on module load so we must do this at device
119 * open time from the kernel.
120 */
121 static int
122 vdev_elevator_switch(vdev_t *v, char *elevator)
123 {
124 vdev_disk_t *vd = v->vdev_tsd;
125 struct block_device *bdev = vd->vd_bdev;
126 struct request_queue *q = bdev_get_queue(bdev);
127 char *device = bdev->bd_disk->disk_name;
128 char sh_path[] = "/bin/sh";
129 char sh_cmd[128];
130 char *argv[] = { sh_path, "-c", sh_cmd };
131 char *envp[] = { NULL };
132 int count = 0, error;
133
134 /* Skip devices which are not whole disks (partitions) */
135 if (!v->vdev_wholedisk)
136 return (0);
137
138 /* Skip devices without schedulers (loop, ram, dm, etc) */
139 if (!q->elevator || !blk_queue_stackable(q))
140 return (0);
141
142 /* Leave existing scheduler when set to "none" */
143 if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4))
144 return (0);
145
146 /*
147 * Set the desired scheduler with a three attempt retry for
148 * -EFAULT which has been observed to occur spuriously.
149 */
150 sprintf(sh_cmd, "%s \"%s\" >/sys/block/%s/queue/scheduler",
151 "/bin/echo", elevator, device);
152
153 while (++count <= 3) {
154 error = call_usermodehelper(sh_path, argv, envp, 1);
155 if ((error == 0) || (error != -EFAULT))
156 break;
157 }
158
159 if (error)
160 printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n",
161 elevator, v->vdev_path, device, error);
162
163 return (error);
164 }
165
166 static int
167 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
168 {
169 struct block_device *bdev;
170 vdev_disk_t *vd;
171 int mode, block_size;
172
173 /* Must have a pathname and it must be absolute. */
174 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
175 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
176 return EINVAL;
177 }
178
179 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
180 if (vd == NULL)
181 return ENOMEM;
182
183 /*
184 * Devices are always opened by the path provided at configuration
185 * time. This means that if the provided path is a udev by-id path
186 * then drives may be recabled without an issue. If the provided
187 * path is a udev by-path path then the physical location information
188 * will be preserved. This can be critical for more complicated
189 * configurations where drives are located in specific physical
190 * locations to maximize the systems tolerence to component failure.
191 * Alternately you can provide your own udev rule to flexibly map
192 * the drives as you see fit. It is not advised that you use the
193 * /dev/[hd]d devices which may be reorder due to probing order.
194 * Devices in the wrong locations will be detected by the higher
195 * level vdev validation.
196 */
197 mode = spa_mode(v->vdev_spa);
198 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
199 if (IS_ERR(bdev)) {
200 kmem_free(vd, sizeof(vdev_disk_t));
201 return -PTR_ERR(bdev);
202 }
203
204 v->vdev_tsd = vd;
205 vd->vd_bdev = bdev;
206 block_size = vdev_bdev_block_size(bdev);
207
208 /* We think the wholedisk property should always be set when this
209 * function is called. ASSERT here so if any legitimate cases exist
210 * where it's not set, we'll find them during debugging. If we never
211 * hit the ASSERT, this and the following conditional statement can be
212 * removed. */
213 ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
214
215 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
216 * was unspecified. In that case, check if this is a whole device.
217 * When bdev->bd_contains == bdev we have a whole device and not simply
218 * a partition. */
219 if (v->vdev_wholedisk == -1ULL)
220 v->vdev_wholedisk = (bdev->bd_contains == bdev);
221
222 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
223 v->vdev_nowritecache = B_FALSE;
224
225 /* Physical volume size in bytes */
226 *psize = bdev_capacity(bdev) * block_size;
227
228 /* Based on the minimum sector size set the block size */
229 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
230
231 /* Try to set the io scheduler elevator algorithm */
232 (void) vdev_elevator_switch(v, zfs_vdev_scheduler);
233
234 return 0;
235 }
236
237 static void
238 vdev_disk_close(vdev_t *v)
239 {
240 vdev_disk_t *vd = v->vdev_tsd;
241
242 if (vd == NULL)
243 return;
244
245 if (vd->vd_bdev != NULL)
246 vdev_bdev_close(vd->vd_bdev,
247 vdev_bdev_mode(spa_mode(v->vdev_spa)));
248
249 kmem_free(vd, sizeof(vdev_disk_t));
250 v->vdev_tsd = NULL;
251 }
252
253 static dio_request_t *
254 vdev_disk_dio_alloc(int bio_count)
255 {
256 dio_request_t *dr;
257 int i;
258
259 dr = kmem_zalloc(sizeof(dio_request_t) +
260 sizeof(struct bio *) * bio_count, KM_SLEEP);
261 if (dr) {
262 init_completion(&dr->dr_comp);
263 atomic_set(&dr->dr_ref, 0);
264 dr->dr_bio_count = bio_count;
265 dr->dr_error = 0;
266
267 for (i = 0; i < dr->dr_bio_count; i++)
268 dr->dr_bio[i] = NULL;
269 }
270
271 return dr;
272 }
273
274 static void
275 vdev_disk_dio_free(dio_request_t *dr)
276 {
277 int i;
278
279 for (i = 0; i < dr->dr_bio_count; i++)
280 if (dr->dr_bio[i])
281 bio_put(dr->dr_bio[i]);
282
283 kmem_free(dr, sizeof(dio_request_t) +
284 sizeof(struct bio *) * dr->dr_bio_count);
285 }
286
287 static int
288 vdev_disk_dio_is_sync(dio_request_t *dr)
289 {
290 #ifdef HAVE_BIO_RW_SYNC
291 /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */
292 return (dr->dr_rw & (1 << BIO_RW_SYNC));
293 #else
294 # ifdef HAVE_BIO_RW_SYNCIO
295 /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */
296 return (dr->dr_rw & (1 << BIO_RW_SYNCIO));
297 # else
298 # ifdef HAVE_REQ_SYNC
299 /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */
300 return (dr->dr_rw & REQ_SYNC);
301 # else
302 # error "Unable to determine bio sync flag"
303 # endif /* HAVE_REQ_SYNC */
304 # endif /* HAVE_BIO_RW_SYNC */
305 #endif /* HAVE_BIO_RW_SYNCIO */
306 }
307
308 static void
309 vdev_disk_dio_get(dio_request_t *dr)
310 {
311 atomic_inc(&dr->dr_ref);
312 }
313
314 static int
315 vdev_disk_dio_put(dio_request_t *dr)
316 {
317 int rc = atomic_dec_return(&dr->dr_ref);
318
319 /*
320 * Free the dio_request when the last reference is dropped and
321 * ensure zio_interpret is called only once with the correct zio
322 */
323 if (rc == 0) {
324 zio_t *zio = dr->dr_zio;
325 int error = dr->dr_error;
326
327 vdev_disk_dio_free(dr);
328
329 if (zio) {
330 zio->io_delay = jiffies_to_msecs(
331 jiffies_64 - zio->io_delay);
332 zio->io_error = error;
333 ASSERT3S(zio->io_error, >=, 0);
334 if (zio->io_error)
335 vdev_disk_error(zio);
336 zio_interrupt(zio);
337 }
338 }
339
340 return rc;
341 }
342
343 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
344 {
345 dio_request_t *dr = bio->bi_private;
346 int rc;
347
348 /* Fatal error but print some useful debugging before asserting */
349 if (dr == NULL)
350 PANIC("dr == NULL, bio->bi_private == NULL\n"
351 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
352 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
353 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
354 bio->bi_idx, bio->bi_size, bio->bi_end_io,
355 atomic_read(&bio->bi_cnt));
356
357 #ifndef HAVE_2ARGS_BIO_END_IO_T
358 if (bio->bi_size)
359 return 1;
360 #endif /* HAVE_2ARGS_BIO_END_IO_T */
361
362 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
363 error = -EIO;
364
365 if (dr->dr_error == 0)
366 dr->dr_error = -error;
367
368 /* Drop reference aquired by __vdev_disk_physio */
369 rc = vdev_disk_dio_put(dr);
370
371 /* Wake up synchronous waiter this is the last outstanding bio */
372 if ((rc == 1) && vdev_disk_dio_is_sync(dr))
373 complete(&dr->dr_comp);
374
375 BIO_END_IO_RETURN(0);
376 }
377
378 static inline unsigned long
379 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
380 {
381 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
382 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
383 }
384
385 static unsigned int
386 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
387 {
388 unsigned int offset, size, i;
389 struct page *page;
390
391 offset = offset_in_page(bio_ptr);
392 for (i = 0; i < bio->bi_max_vecs; i++) {
393 size = PAGE_SIZE - offset;
394
395 if (bio_size <= 0)
396 break;
397
398 if (size > bio_size)
399 size = bio_size;
400
401 if (kmem_virt(bio_ptr))
402 page = vmalloc_to_page(bio_ptr);
403 else
404 page = virt_to_page(bio_ptr);
405
406 if (bio_add_page(bio, page, size, offset) != size)
407 break;
408
409 bio_ptr += size;
410 bio_size -= size;
411 offset = 0;
412 }
413
414 return bio_size;
415 }
416
417 static int
418 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
419 size_t kbuf_size, uint64_t kbuf_offset, int flags)
420 {
421 dio_request_t *dr;
422 caddr_t bio_ptr;
423 uint64_t bio_offset;
424 int bio_size, bio_count = 16;
425 int i = 0, error = 0, block_size;
426
427 ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size);
428
429 retry:
430 dr = vdev_disk_dio_alloc(bio_count);
431 if (dr == NULL)
432 return ENOMEM;
433
434 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
435 bio_set_flags_failfast(bdev, &flags);
436
437 dr->dr_zio = zio;
438 dr->dr_rw = flags;
439 block_size = vdev_bdev_block_size(bdev);
440
441 /*
442 * When the IO size exceeds the maximum bio size for the request
443 * queue we are forced to break the IO in multiple bio's and wait
444 * for them all to complete. Ideally, all pool users will set
445 * their volume block size to match the maximum request size and
446 * the common case will be one bio per vdev IO request.
447 */
448 bio_ptr = kbuf_ptr;
449 bio_offset = kbuf_offset;
450 bio_size = kbuf_size;
451 for (i = 0; i <= dr->dr_bio_count; i++) {
452
453 /* Finished constructing bio's for given buffer */
454 if (bio_size <= 0)
455 break;
456
457 /*
458 * By default only 'bio_count' bio's per dio are allowed.
459 * However, if we find ourselves in a situation where more
460 * are needed we allocate a larger dio and warn the user.
461 */
462 if (dr->dr_bio_count == i) {
463 vdev_disk_dio_free(dr);
464 bio_count *= 2;
465 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
466 goto retry;
467 }
468
469 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
470 bio_nr_pages(bio_ptr, bio_size));
471 if (dr->dr_bio[i] == NULL) {
472 vdev_disk_dio_free(dr);
473 return ENOMEM;
474 }
475
476 /* Matching put called by vdev_disk_physio_completion */
477 vdev_disk_dio_get(dr);
478
479 dr->dr_bio[i]->bi_bdev = bdev;
480 dr->dr_bio[i]->bi_sector = bio_offset / block_size;
481 dr->dr_bio[i]->bi_rw = dr->dr_rw;
482 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
483 dr->dr_bio[i]->bi_private = dr;
484
485 /* Remaining size is returned to become the new size */
486 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
487
488 /* Advance in buffer and construct another bio if needed */
489 bio_ptr += dr->dr_bio[i]->bi_size;
490 bio_offset += dr->dr_bio[i]->bi_size;
491 }
492
493 /* Extra reference to protect dio_request during submit_bio */
494 vdev_disk_dio_get(dr);
495 if (zio)
496 zio->io_delay = jiffies_64;
497
498 /* Submit all bio's associated with this dio */
499 for (i = 0; i < dr->dr_bio_count; i++)
500 if (dr->dr_bio[i])
501 submit_bio(dr->dr_rw, dr->dr_bio[i]);
502
503 /*
504 * On synchronous blocking requests we wait for all bio the completion
505 * callbacks to run. We will be woken when the last callback runs
506 * for this dio. We are responsible for putting the last dio_request
507 * reference will in turn put back the last bio references. The
508 * only synchronous consumer is vdev_disk_read_rootlabel() all other
509 * IO originating from vdev_disk_io_start() is asynchronous.
510 */
511 if (vdev_disk_dio_is_sync(dr)) {
512 wait_for_completion(&dr->dr_comp);
513 error = dr->dr_error;
514 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
515 }
516
517 (void)vdev_disk_dio_put(dr);
518
519 return error;
520 }
521
522 int
523 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
524 size_t size, uint64_t offset, int flags)
525 {
526 bio_set_flags_failfast(bdev, &flags);
527 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
528 }
529
530 /* 2.6.24 API change */
531 #ifdef HAVE_BIO_EMPTY_BARRIER
532 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
533 {
534 zio_t *zio = bio->bi_private;
535
536 zio->io_delay = jiffies_to_msecs(jiffies_64 - zio->io_delay);
537 zio->io_error = -rc;
538 if (rc && (rc == -EOPNOTSUPP))
539 zio->io_vd->vdev_nowritecache = B_TRUE;
540
541 bio_put(bio);
542 ASSERT3S(zio->io_error, >=, 0);
543 if (zio->io_error)
544 vdev_disk_error(zio);
545 zio_interrupt(zio);
546
547 BIO_END_IO_RETURN(0);
548 }
549
550 static int
551 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
552 {
553 struct request_queue *q;
554 struct bio *bio;
555
556 q = bdev_get_queue(bdev);
557 if (!q)
558 return ENXIO;
559
560 bio = bio_alloc(GFP_KERNEL, 0);
561 if (!bio)
562 return ENOMEM;
563
564 bio->bi_end_io = vdev_disk_io_flush_completion;
565 bio->bi_private = zio;
566 bio->bi_bdev = bdev;
567 zio->io_delay = jiffies_64;
568 submit_bio(WRITE_BARRIER, bio);
569
570 return 0;
571 }
572 #else
573 static int
574 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
575 {
576 return ENOTSUP;
577 }
578 #endif /* HAVE_BIO_EMPTY_BARRIER */
579
580 static int
581 vdev_disk_io_start(zio_t *zio)
582 {
583 vdev_t *v = zio->io_vd;
584 vdev_disk_t *vd = v->vdev_tsd;
585 int flags, error;
586
587 switch (zio->io_type) {
588 case ZIO_TYPE_IOCTL:
589
590 if (!vdev_readable(v)) {
591 zio->io_error = ENXIO;
592 return ZIO_PIPELINE_CONTINUE;
593 }
594
595 switch (zio->io_cmd) {
596 case DKIOCFLUSHWRITECACHE:
597
598 if (zfs_nocacheflush)
599 break;
600
601 if (v->vdev_nowritecache) {
602 zio->io_error = ENOTSUP;
603 break;
604 }
605
606 error = vdev_disk_io_flush(vd->vd_bdev, zio);
607 if (error == 0)
608 return ZIO_PIPELINE_STOP;
609
610 zio->io_error = error;
611 if (error == ENOTSUP)
612 v->vdev_nowritecache = B_TRUE;
613
614 break;
615
616 default:
617 zio->io_error = ENOTSUP;
618 }
619
620 return ZIO_PIPELINE_CONTINUE;
621
622 case ZIO_TYPE_WRITE:
623 flags = WRITE;
624 break;
625
626 case ZIO_TYPE_READ:
627 flags = READ;
628 break;
629
630 default:
631 zio->io_error = ENOTSUP;
632 return ZIO_PIPELINE_CONTINUE;
633 }
634
635 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
636 zio->io_size, zio->io_offset, flags);
637 if (error) {
638 zio->io_error = error;
639 return ZIO_PIPELINE_CONTINUE;
640 }
641
642 return ZIO_PIPELINE_STOP;
643 }
644
645 static void
646 vdev_disk_io_done(zio_t *zio)
647 {
648 /*
649 * If the device returned EIO, we revalidate the media. If it is
650 * determined the media has changed this triggers the asynchronous
651 * removal of the device from the configuration.
652 */
653 if (zio->io_error == EIO) {
654 vdev_t *v = zio->io_vd;
655 vdev_disk_t *vd = v->vdev_tsd;
656
657 if (check_disk_change(vd->vd_bdev)) {
658 vdev_bdev_invalidate(vd->vd_bdev);
659 v->vdev_remove_wanted = B_TRUE;
660 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
661 }
662 }
663 }
664
665 static void
666 vdev_disk_hold(vdev_t *vd)
667 {
668 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
669
670 /* We must have a pathname, and it must be absolute. */
671 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
672 return;
673
674 /*
675 * Only prefetch path and devid info if the device has
676 * never been opened.
677 */
678 if (vd->vdev_tsd != NULL)
679 return;
680
681 /* XXX: Implement me as a vnode lookup for the device */
682 vd->vdev_name_vp = NULL;
683 vd->vdev_devid_vp = NULL;
684 }
685
686 static void
687 vdev_disk_rele(vdev_t *vd)
688 {
689 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
690
691 /* XXX: Implement me as a vnode rele for the device */
692 }
693
694 vdev_ops_t vdev_disk_ops = {
695 vdev_disk_open,
696 vdev_disk_close,
697 vdev_default_asize,
698 vdev_disk_io_start,
699 vdev_disk_io_done,
700 NULL,
701 vdev_disk_hold,
702 vdev_disk_rele,
703 VDEV_TYPE_DISK, /* name of this vdev type */
704 B_TRUE /* leaf vdev */
705 };
706
707 /*
708 * Given the root disk device devid or pathname, read the label from
709 * the device, and construct a configuration nvlist.
710 */
711 int
712 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
713 {
714 struct block_device *bdev;
715 vdev_label_t *label;
716 uint64_t s, size;
717 int i;
718
719 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
720 if (IS_ERR(bdev))
721 return -PTR_ERR(bdev);
722
723 s = bdev_capacity(bdev) * vdev_bdev_block_size(bdev);
724 if (s == 0) {
725 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
726 return EIO;
727 }
728
729 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
730 label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
731
732 for (i = 0; i < VDEV_LABELS; i++) {
733 uint64_t offset, state, txg = 0;
734
735 /* read vdev label */
736 offset = vdev_label_offset(size, i, 0);
737 if (vdev_disk_physio(bdev, (caddr_t)label,
738 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
739 continue;
740
741 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
742 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
743 *config = NULL;
744 continue;
745 }
746
747 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
748 &state) != 0 || state >= POOL_STATE_DESTROYED) {
749 nvlist_free(*config);
750 *config = NULL;
751 continue;
752 }
753
754 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
755 &txg) != 0 || txg == 0) {
756 nvlist_free(*config);
757 *config = NULL;
758 continue;
759 }
760
761 break;
762 }
763
764 vmem_free(label, sizeof(vdev_label_t));
765 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
766
767 return 0;
768 }
769
770 module_param(zfs_vdev_scheduler, charp, 0644);
771 MODULE_PARM_DESC(zfs_vdev_scheduler, "IO Scheduler (noop)");