]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/linux/zfs/vdev_disk.c
Make struct vdev_disk_t be platform private
[mirror_zfs.git] / module / os / linux / zfs / vdev_disk.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/vdev_trim.h>
34 #include <sys/abd.h>
35 #include <sys/fs/zfs.h>
36 #include <sys/zio.h>
37 #include <linux/msdos_fs.h>
38 #include <linux/vfs_compat.h>
39
40 typedef struct vdev_disk {
41 struct block_device *vd_bdev;
42 krwlock_t vd_lock;
43 } vdev_disk_t;
44
45 /*
46 * Unique identifier for the exclusive vdev holder.
47 */
48 static void *zfs_vdev_holder = VDEV_HOLDER;
49
50 /*
51 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
52 * device is missing. The missing path may be transient since the links
53 * can be briefly removed and recreated in response to udev events.
54 */
55 static unsigned zfs_vdev_open_timeout_ms = 1000;
56
57 /*
58 * Size of the "reserved" partition, in blocks.
59 */
60 #define EFI_MIN_RESV_SIZE (16 * 1024)
61
62 /*
63 * Virtual device vector for disks.
64 */
65 typedef struct dio_request {
66 zio_t *dr_zio; /* Parent ZIO */
67 atomic_t dr_ref; /* References */
68 int dr_error; /* Bio error */
69 int dr_bio_count; /* Count of bio's */
70 struct bio *dr_bio[0]; /* Attached bio's */
71 } dio_request_t;
72
73 static fmode_t
74 vdev_bdev_mode(spa_mode_t spa_mode)
75 {
76 fmode_t mode = 0;
77
78 if (spa_mode & SPA_MODE_READ)
79 mode |= FMODE_READ;
80
81 if (spa_mode & SPA_MODE_WRITE)
82 mode |= FMODE_WRITE;
83
84 return (mode);
85 }
86
87 /*
88 * Returns the usable capacity (in bytes) for the partition or disk.
89 */
90 static uint64_t
91 bdev_capacity(struct block_device *bdev)
92 {
93 return (i_size_read(bdev->bd_inode));
94 }
95
96 /*
97 * Returns the maximum expansion capacity of the block device (in bytes).
98 *
99 * It is possible to expand a vdev when it has been created as a wholedisk
100 * and the containing block device has increased in capacity. Or when the
101 * partition containing the pool has been manually increased in size.
102 *
103 * This function is only responsible for calculating the potential expansion
104 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
105 * responsible for verifying the expected partition layout in the wholedisk
106 * case, and updating the partition table if appropriate. Once the partition
107 * size has been increased the additional capacity will be visible using
108 * bdev_capacity().
109 *
110 * The returned maximum expansion capacity is always expected to be larger, or
111 * at the very least equal, to its usable capacity to prevent overestimating
112 * the pool expandsize.
113 */
114 static uint64_t
115 bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
116 {
117 uint64_t psize;
118 int64_t available;
119
120 if (wholedisk && bdev->bd_part != NULL && bdev != bdev->bd_contains) {
121 /*
122 * When reporting maximum expansion capacity for a wholedisk
123 * deduct any capacity which is expected to be lost due to
124 * alignment restrictions. Over reporting this value isn't
125 * harmful and would only result in slightly less capacity
126 * than expected post expansion.
127 * The estimated available space may be slightly smaller than
128 * bdev_capacity() for devices where the number of sectors is
129 * not a multiple of the alignment size and the partition layout
130 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
131 * "reserved" EFI partition: in such cases return the device
132 * usable capacity.
133 */
134 available = i_size_read(bdev->bd_contains->bd_inode) -
135 ((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
136 PARTITION_END_ALIGNMENT) << SECTOR_BITS);
137 psize = MAX(available, bdev_capacity(bdev));
138 } else {
139 psize = bdev_capacity(bdev);
140 }
141
142 return (psize);
143 }
144
145 static void
146 vdev_disk_error(zio_t *zio)
147 {
148 /*
149 * This function can be called in interrupt context, for instance while
150 * handling IRQs coming from a misbehaving disk device; use printk()
151 * which is safe from any context.
152 */
153 printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
154 "offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa),
155 zio->io_vd->vdev_path, zio->io_error, zio->io_type,
156 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
157 zio->io_flags);
158 }
159
160 static int
161 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
162 uint64_t *ashift)
163 {
164 struct block_device *bdev;
165 fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
166 hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
167 vdev_disk_t *vd;
168
169 /* Must have a pathname and it must be absolute. */
170 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
171 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
172 vdev_dbgmsg(v, "invalid vdev_path");
173 return (SET_ERROR(EINVAL));
174 }
175
176 /*
177 * Reopen the device if it is currently open. When expanding a
178 * partition force re-scanning the partition table while closed
179 * in order to get an accurate updated block device size. Then
180 * since udev may need to recreate the device links increase the
181 * open retry timeout before reporting the device as unavailable.
182 */
183 vd = v->vdev_tsd;
184 if (vd) {
185 char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
186 boolean_t reread_part = B_FALSE;
187
188 rw_enter(&vd->vd_lock, RW_WRITER);
189 bdev = vd->vd_bdev;
190 vd->vd_bdev = NULL;
191
192 if (bdev) {
193 if (v->vdev_expanding && bdev != bdev->bd_contains) {
194 bdevname(bdev->bd_contains, disk_name + 5);
195 reread_part = B_TRUE;
196 }
197
198 blkdev_put(bdev, mode | FMODE_EXCL);
199 }
200
201 if (reread_part) {
202 bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL,
203 zfs_vdev_holder);
204 if (!IS_ERR(bdev)) {
205 int error = vdev_bdev_reread_part(bdev);
206 blkdev_put(bdev, mode | FMODE_EXCL);
207 if (error == 0) {
208 timeout = MSEC2NSEC(
209 zfs_vdev_open_timeout_ms * 2);
210 }
211 }
212 }
213 } else {
214 vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
215
216 rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
217 rw_enter(&vd->vd_lock, RW_WRITER);
218 }
219
220 /*
221 * Devices are always opened by the path provided at configuration
222 * time. This means that if the provided path is a udev by-id path
223 * then drives may be re-cabled without an issue. If the provided
224 * path is a udev by-path path, then the physical location information
225 * will be preserved. This can be critical for more complicated
226 * configurations where drives are located in specific physical
227 * locations to maximize the systems tolerance to component failure.
228 *
229 * Alternatively, you can provide your own udev rule to flexibly map
230 * the drives as you see fit. It is not advised that you use the
231 * /dev/[hd]d devices which may be reordered due to probing order.
232 * Devices in the wrong locations will be detected by the higher
233 * level vdev validation.
234 *
235 * The specified paths may be briefly removed and recreated in
236 * response to udev events. This should be exceptionally unlikely
237 * because the zpool command makes every effort to verify these paths
238 * have already settled prior to reaching this point. Therefore,
239 * a ENOENT failure at this point is highly likely to be transient
240 * and it is reasonable to sleep and retry before giving up. In
241 * practice delays have been observed to be on the order of 100ms.
242 */
243 hrtime_t start = gethrtime();
244 bdev = ERR_PTR(-ENXIO);
245 while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
246 bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL,
247 zfs_vdev_holder);
248 if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
249 schedule_timeout(MSEC_TO_TICK(10));
250 } else if (IS_ERR(bdev)) {
251 break;
252 }
253 }
254
255 if (IS_ERR(bdev)) {
256 int error = -PTR_ERR(bdev);
257 vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error,
258 (u_longlong_t)(gethrtime() - start),
259 (u_longlong_t)timeout);
260 vd->vd_bdev = NULL;
261 v->vdev_tsd = vd;
262 rw_exit(&vd->vd_lock);
263 return (SET_ERROR(error));
264 } else {
265 vd->vd_bdev = bdev;
266 v->vdev_tsd = vd;
267 rw_exit(&vd->vd_lock);
268 }
269
270 struct request_queue *q = bdev_get_queue(vd->vd_bdev);
271
272 /* Determine the physical block size */
273 int block_size = bdev_physical_block_size(vd->vd_bdev);
274
275 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
276 v->vdev_nowritecache = B_FALSE;
277
278 /* Set when device reports it supports TRIM. */
279 v->vdev_has_trim = !!blk_queue_discard(q);
280
281 /* Set when device reports it supports secure TRIM. */
282 v->vdev_has_securetrim = !!blk_queue_discard_secure(q);
283
284 /* Inform the ZIO pipeline that we are non-rotational */
285 v->vdev_nonrot = blk_queue_nonrot(q);
286
287 /* Physical volume size in bytes for the partition */
288 *psize = bdev_capacity(vd->vd_bdev);
289
290 /* Physical volume size in bytes including possible expansion space */
291 *max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
292
293 /* Based on the minimum sector size set the block size */
294 *ashift = highbit64(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
295
296 return (0);
297 }
298
299 static void
300 vdev_disk_close(vdev_t *v)
301 {
302 vdev_disk_t *vd = v->vdev_tsd;
303
304 if (v->vdev_reopening || vd == NULL)
305 return;
306
307 if (vd->vd_bdev != NULL) {
308 blkdev_put(vd->vd_bdev,
309 vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL);
310 }
311
312 rw_destroy(&vd->vd_lock);
313 kmem_free(vd, sizeof (vdev_disk_t));
314 v->vdev_tsd = NULL;
315 }
316
317 static dio_request_t *
318 vdev_disk_dio_alloc(int bio_count)
319 {
320 dio_request_t *dr;
321 int i;
322
323 dr = kmem_zalloc(sizeof (dio_request_t) +
324 sizeof (struct bio *) * bio_count, KM_SLEEP);
325 if (dr) {
326 atomic_set(&dr->dr_ref, 0);
327 dr->dr_bio_count = bio_count;
328 dr->dr_error = 0;
329
330 for (i = 0; i < dr->dr_bio_count; i++)
331 dr->dr_bio[i] = NULL;
332 }
333
334 return (dr);
335 }
336
337 static void
338 vdev_disk_dio_free(dio_request_t *dr)
339 {
340 int i;
341
342 for (i = 0; i < dr->dr_bio_count; i++)
343 if (dr->dr_bio[i])
344 bio_put(dr->dr_bio[i]);
345
346 kmem_free(dr, sizeof (dio_request_t) +
347 sizeof (struct bio *) * dr->dr_bio_count);
348 }
349
350 static void
351 vdev_disk_dio_get(dio_request_t *dr)
352 {
353 atomic_inc(&dr->dr_ref);
354 }
355
356 static int
357 vdev_disk_dio_put(dio_request_t *dr)
358 {
359 int rc = atomic_dec_return(&dr->dr_ref);
360
361 /*
362 * Free the dio_request when the last reference is dropped and
363 * ensure zio_interpret is called only once with the correct zio
364 */
365 if (rc == 0) {
366 zio_t *zio = dr->dr_zio;
367 int error = dr->dr_error;
368
369 vdev_disk_dio_free(dr);
370
371 if (zio) {
372 zio->io_error = error;
373 ASSERT3S(zio->io_error, >=, 0);
374 if (zio->io_error)
375 vdev_disk_error(zio);
376
377 zio_delay_interrupt(zio);
378 }
379 }
380
381 return (rc);
382 }
383
384 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
385 {
386 dio_request_t *dr = bio->bi_private;
387 int rc;
388
389 if (dr->dr_error == 0) {
390 #ifdef HAVE_1ARG_BIO_END_IO_T
391 dr->dr_error = BIO_END_IO_ERROR(bio);
392 #else
393 if (error)
394 dr->dr_error = -(error);
395 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
396 dr->dr_error = EIO;
397 #endif
398 }
399
400 /* Drop reference acquired by __vdev_disk_physio */
401 rc = vdev_disk_dio_put(dr);
402 }
403
404 static inline void
405 vdev_submit_bio_impl(struct bio *bio)
406 {
407 #ifdef HAVE_1ARG_SUBMIT_BIO
408 submit_bio(bio);
409 #else
410 submit_bio(0, bio);
411 #endif
412 }
413
414 #ifdef HAVE_BIO_SET_DEV
415 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
416 /*
417 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
418 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
419 * As a side effect the function was converted to GPL-only. Define our
420 * own version when needed which uses rcu_read_lock_sched().
421 */
422 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
423 static inline bool
424 vdev_blkg_tryget(struct blkcg_gq *blkg)
425 {
426 struct percpu_ref *ref = &blkg->refcnt;
427 unsigned long __percpu *count;
428 bool rc;
429
430 rcu_read_lock_sched();
431
432 if (__ref_is_percpu(ref, &count)) {
433 this_cpu_inc(*count);
434 rc = true;
435 } else {
436 rc = atomic_long_inc_not_zero(&ref->count);
437 }
438
439 rcu_read_unlock_sched();
440
441 return (rc);
442 }
443 #elif defined(HAVE_BLKG_TRYGET)
444 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
445 #endif
446 /*
447 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
448 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
449 * the entire macro. Provide a minimal version which always assigns the
450 * request queue's root_blkg to the bio.
451 */
452 static inline void
453 vdev_bio_associate_blkg(struct bio *bio)
454 {
455 struct request_queue *q = bio->bi_disk->queue;
456
457 ASSERT3P(q, !=, NULL);
458 ASSERT3P(bio->bi_blkg, ==, NULL);
459
460 if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
461 bio->bi_blkg = q->root_blkg;
462 }
463 #define bio_associate_blkg vdev_bio_associate_blkg
464 #endif
465 #else
466 /*
467 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
468 */
469 static inline void
470 bio_set_dev(struct bio *bio, struct block_device *bdev)
471 {
472 bio->bi_bdev = bdev;
473 }
474 #endif /* HAVE_BIO_SET_DEV */
475
476 static inline void
477 vdev_submit_bio(struct bio *bio)
478 {
479 struct bio_list *bio_list = current->bio_list;
480 current->bio_list = NULL;
481 vdev_submit_bio_impl(bio);
482 current->bio_list = bio_list;
483 }
484
485 static int
486 __vdev_disk_physio(struct block_device *bdev, zio_t *zio,
487 size_t io_size, uint64_t io_offset, int rw, int flags)
488 {
489 dio_request_t *dr;
490 uint64_t abd_offset;
491 uint64_t bio_offset;
492 int bio_size, bio_count = 16;
493 int i = 0, error = 0;
494 struct blk_plug plug;
495
496 /*
497 * Accessing outside the block device is never allowed.
498 */
499 if (io_offset + io_size > bdev->bd_inode->i_size) {
500 vdev_dbgmsg(zio->io_vd,
501 "Illegal access %llu size %llu, device size %llu",
502 io_offset, io_size, i_size_read(bdev->bd_inode));
503 return (SET_ERROR(EIO));
504 }
505
506 retry:
507 dr = vdev_disk_dio_alloc(bio_count);
508 if (dr == NULL)
509 return (SET_ERROR(ENOMEM));
510
511 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
512 bio_set_flags_failfast(bdev, &flags);
513
514 dr->dr_zio = zio;
515
516 /*
517 * When the IO size exceeds the maximum bio size for the request
518 * queue we are forced to break the IO in multiple bio's and wait
519 * for them all to complete. Ideally, all pool users will set
520 * their volume block size to match the maximum request size and
521 * the common case will be one bio per vdev IO request.
522 */
523
524 abd_offset = 0;
525 bio_offset = io_offset;
526 bio_size = io_size;
527 for (i = 0; i <= dr->dr_bio_count; i++) {
528
529 /* Finished constructing bio's for given buffer */
530 if (bio_size <= 0)
531 break;
532
533 /*
534 * By default only 'bio_count' bio's per dio are allowed.
535 * However, if we find ourselves in a situation where more
536 * are needed we allocate a larger dio and warn the user.
537 */
538 if (dr->dr_bio_count == i) {
539 vdev_disk_dio_free(dr);
540 bio_count *= 2;
541 goto retry;
542 }
543
544 /* bio_alloc() with __GFP_WAIT never returns NULL */
545 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
546 MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
547 BIO_MAX_PAGES));
548 if (unlikely(dr->dr_bio[i] == NULL)) {
549 vdev_disk_dio_free(dr);
550 return (SET_ERROR(ENOMEM));
551 }
552
553 /* Matching put called by vdev_disk_physio_completion */
554 vdev_disk_dio_get(dr);
555
556 bio_set_dev(dr->dr_bio[i], bdev);
557 BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
558 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
559 dr->dr_bio[i]->bi_private = dr;
560 bio_set_op_attrs(dr->dr_bio[i], rw, flags);
561
562 /* Remaining size is returned to become the new size */
563 bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
564 bio_size, abd_offset);
565
566 /* Advance in buffer and construct another bio if needed */
567 abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
568 bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
569 }
570
571 /* Extra reference to protect dio_request during vdev_submit_bio */
572 vdev_disk_dio_get(dr);
573
574 if (dr->dr_bio_count > 1)
575 blk_start_plug(&plug);
576
577 /* Submit all bio's associated with this dio */
578 for (i = 0; i < dr->dr_bio_count; i++)
579 if (dr->dr_bio[i])
580 vdev_submit_bio(dr->dr_bio[i]);
581
582 if (dr->dr_bio_count > 1)
583 blk_finish_plug(&plug);
584
585 (void) vdev_disk_dio_put(dr);
586
587 return (error);
588 }
589
590 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
591 {
592 zio_t *zio = bio->bi_private;
593 #ifdef HAVE_1ARG_BIO_END_IO_T
594 zio->io_error = BIO_END_IO_ERROR(bio);
595 #else
596 zio->io_error = -error;
597 #endif
598
599 if (zio->io_error && (zio->io_error == EOPNOTSUPP))
600 zio->io_vd->vdev_nowritecache = B_TRUE;
601
602 bio_put(bio);
603 ASSERT3S(zio->io_error, >=, 0);
604 if (zio->io_error)
605 vdev_disk_error(zio);
606 zio_interrupt(zio);
607 }
608
609 static int
610 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
611 {
612 struct request_queue *q;
613 struct bio *bio;
614
615 q = bdev_get_queue(bdev);
616 if (!q)
617 return (SET_ERROR(ENXIO));
618
619 bio = bio_alloc(GFP_NOIO, 0);
620 /* bio_alloc() with __GFP_WAIT never returns NULL */
621 if (unlikely(bio == NULL))
622 return (SET_ERROR(ENOMEM));
623
624 bio->bi_end_io = vdev_disk_io_flush_completion;
625 bio->bi_private = zio;
626 bio_set_dev(bio, bdev);
627 bio_set_flush(bio);
628 vdev_submit_bio(bio);
629 invalidate_bdev(bdev);
630
631 return (0);
632 }
633
634 static void
635 vdev_disk_io_start(zio_t *zio)
636 {
637 vdev_t *v = zio->io_vd;
638 vdev_disk_t *vd = v->vdev_tsd;
639 unsigned long trim_flags = 0;
640 int rw, error;
641
642 /*
643 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
644 * Nothing to be done here but return failure.
645 */
646 if (vd == NULL) {
647 zio->io_error = ENXIO;
648 zio_interrupt(zio);
649 return;
650 }
651
652 rw_enter(&vd->vd_lock, RW_READER);
653
654 /*
655 * If the vdev is closed, it's likely due to a failed reopen and is
656 * in the UNAVAIL state. Nothing to be done here but return failure.
657 */
658 if (vd->vd_bdev == NULL) {
659 rw_exit(&vd->vd_lock);
660 zio->io_error = ENXIO;
661 zio_interrupt(zio);
662 return;
663 }
664
665 switch (zio->io_type) {
666 case ZIO_TYPE_IOCTL:
667
668 if (!vdev_readable(v)) {
669 rw_exit(&vd->vd_lock);
670 zio->io_error = SET_ERROR(ENXIO);
671 zio_interrupt(zio);
672 return;
673 }
674
675 switch (zio->io_cmd) {
676 case DKIOCFLUSHWRITECACHE:
677
678 if (zfs_nocacheflush)
679 break;
680
681 if (v->vdev_nowritecache) {
682 zio->io_error = SET_ERROR(ENOTSUP);
683 break;
684 }
685
686 error = vdev_disk_io_flush(vd->vd_bdev, zio);
687 if (error == 0) {
688 rw_exit(&vd->vd_lock);
689 return;
690 }
691
692 zio->io_error = error;
693
694 break;
695
696 default:
697 zio->io_error = SET_ERROR(ENOTSUP);
698 }
699
700 rw_exit(&vd->vd_lock);
701 zio_execute(zio);
702 return;
703 case ZIO_TYPE_WRITE:
704 rw = WRITE;
705 break;
706
707 case ZIO_TYPE_READ:
708 rw = READ;
709 break;
710
711 case ZIO_TYPE_TRIM:
712 #if defined(BLKDEV_DISCARD_SECURE)
713 if (zio->io_trim_flags & ZIO_TRIM_SECURE)
714 trim_flags |= BLKDEV_DISCARD_SECURE;
715 #endif
716 zio->io_error = -blkdev_issue_discard(vd->vd_bdev,
717 zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS,
718 trim_flags);
719
720 rw_exit(&vd->vd_lock);
721 zio_interrupt(zio);
722 return;
723
724 default:
725 rw_exit(&vd->vd_lock);
726 zio->io_error = SET_ERROR(ENOTSUP);
727 zio_interrupt(zio);
728 return;
729 }
730
731 zio->io_target_timestamp = zio_handle_io_delay(zio);
732 error = __vdev_disk_physio(vd->vd_bdev, zio,
733 zio->io_size, zio->io_offset, rw, 0);
734 rw_exit(&vd->vd_lock);
735
736 if (error) {
737 zio->io_error = error;
738 zio_interrupt(zio);
739 return;
740 }
741 }
742
743 static void
744 vdev_disk_io_done(zio_t *zio)
745 {
746 /*
747 * If the device returned EIO, we revalidate the media. If it is
748 * determined the media has changed this triggers the asynchronous
749 * removal of the device from the configuration.
750 */
751 if (zio->io_error == EIO) {
752 vdev_t *v = zio->io_vd;
753 vdev_disk_t *vd = v->vdev_tsd;
754
755 if (check_disk_change(vd->vd_bdev)) {
756 invalidate_bdev(vd->vd_bdev);
757 v->vdev_remove_wanted = B_TRUE;
758 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
759 }
760 }
761 }
762
763 static void
764 vdev_disk_hold(vdev_t *vd)
765 {
766 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
767
768 /* We must have a pathname, and it must be absolute. */
769 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
770 return;
771
772 /*
773 * Only prefetch path and devid info if the device has
774 * never been opened.
775 */
776 if (vd->vdev_tsd != NULL)
777 return;
778
779 }
780
781 static void
782 vdev_disk_rele(vdev_t *vd)
783 {
784 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
785
786 /* XXX: Implement me as a vnode rele for the device */
787 }
788
789 vdev_ops_t vdev_disk_ops = {
790 .vdev_op_open = vdev_disk_open,
791 .vdev_op_close = vdev_disk_close,
792 .vdev_op_asize = vdev_default_asize,
793 .vdev_op_io_start = vdev_disk_io_start,
794 .vdev_op_io_done = vdev_disk_io_done,
795 .vdev_op_state_change = NULL,
796 .vdev_op_need_resilver = NULL,
797 .vdev_op_hold = vdev_disk_hold,
798 .vdev_op_rele = vdev_disk_rele,
799 .vdev_op_remap = NULL,
800 .vdev_op_xlate = vdev_default_xlate,
801 .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
802 .vdev_op_leaf = B_TRUE /* leaf vdev */
803 };
804
805 /*
806 * The zfs_vdev_scheduler module option has been deprecated. Setting this
807 * value no longer has any effect. It has not yet been entirely removed
808 * to allow the module to be loaded if this option is specified in the
809 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
810 */
811 static int
812 param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
813 {
814 int error = param_set_charp(val, kp);
815 if (error == 0) {
816 printk(KERN_INFO "The 'zfs_vdev_scheduler' module option "
817 "is not supported.\n");
818 }
819
820 return (error);
821 }
822
823 char *zfs_vdev_scheduler = "unused";
824 module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
825 param_get_charp, &zfs_vdev_scheduler, 0644);
826 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");