]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/linux/zfs/vdev_disk.c
module/*.ko: prune .data, global .rodata
[mirror_zfs.git] / module / os / linux / zfs / vdev_disk.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 * Copyright (c) 2012, 2019 by Delphix. All rights reserved.
27 */
28
29 #include <sys/zfs_context.h>
30 #include <sys/spa_impl.h>
31 #include <sys/vdev_disk.h>
32 #include <sys/vdev_impl.h>
33 #include <sys/vdev_trim.h>
34 #include <sys/abd.h>
35 #include <sys/fs/zfs.h>
36 #include <sys/zio.h>
37 #include <linux/blkpg.h>
38 #include <linux/msdos_fs.h>
39 #include <linux/vfs_compat.h>
40 #ifdef HAVE_LINUX_BLK_CGROUP_HEADER
41 #include <linux/blk-cgroup.h>
42 #endif
43
44 typedef struct vdev_disk {
45 struct block_device *vd_bdev;
46 krwlock_t vd_lock;
47 } vdev_disk_t;
48
49 /*
50 * Unique identifier for the exclusive vdev holder.
51 */
52 static void *zfs_vdev_holder = VDEV_HOLDER;
53
54 /*
55 * Wait up to zfs_vdev_open_timeout_ms milliseconds before determining the
56 * device is missing. The missing path may be transient since the links
57 * can be briefly removed and recreated in response to udev events.
58 */
59 static unsigned zfs_vdev_open_timeout_ms = 1000;
60
61 /*
62 * Size of the "reserved" partition, in blocks.
63 */
64 #define EFI_MIN_RESV_SIZE (16 * 1024)
65
66 /*
67 * Virtual device vector for disks.
68 */
69 typedef struct dio_request {
70 zio_t *dr_zio; /* Parent ZIO */
71 atomic_t dr_ref; /* References */
72 int dr_error; /* Bio error */
73 int dr_bio_count; /* Count of bio's */
74 struct bio *dr_bio[0]; /* Attached bio's */
75 } dio_request_t;
76
77 static fmode_t
78 vdev_bdev_mode(spa_mode_t spa_mode)
79 {
80 fmode_t mode = 0;
81
82 if (spa_mode & SPA_MODE_READ)
83 mode |= FMODE_READ;
84
85 if (spa_mode & SPA_MODE_WRITE)
86 mode |= FMODE_WRITE;
87
88 return (mode);
89 }
90
91 /*
92 * Returns the usable capacity (in bytes) for the partition or disk.
93 */
94 static uint64_t
95 bdev_capacity(struct block_device *bdev)
96 {
97 return (i_size_read(bdev->bd_inode));
98 }
99
100 #if !defined(HAVE_BDEV_WHOLE)
101 static inline struct block_device *
102 bdev_whole(struct block_device *bdev)
103 {
104 return (bdev->bd_contains);
105 }
106 #endif
107
108 /*
109 * Returns the maximum expansion capacity of the block device (in bytes).
110 *
111 * It is possible to expand a vdev when it has been created as a wholedisk
112 * and the containing block device has increased in capacity. Or when the
113 * partition containing the pool has been manually increased in size.
114 *
115 * This function is only responsible for calculating the potential expansion
116 * size so it can be reported by 'zpool list'. The efi_use_whole_disk() is
117 * responsible for verifying the expected partition layout in the wholedisk
118 * case, and updating the partition table if appropriate. Once the partition
119 * size has been increased the additional capacity will be visible using
120 * bdev_capacity().
121 *
122 * The returned maximum expansion capacity is always expected to be larger, or
123 * at the very least equal, to its usable capacity to prevent overestimating
124 * the pool expandsize.
125 */
126 static uint64_t
127 bdev_max_capacity(struct block_device *bdev, uint64_t wholedisk)
128 {
129 uint64_t psize;
130 int64_t available;
131
132 if (wholedisk && bdev != bdev_whole(bdev)) {
133 /*
134 * When reporting maximum expansion capacity for a wholedisk
135 * deduct any capacity which is expected to be lost due to
136 * alignment restrictions. Over reporting this value isn't
137 * harmful and would only result in slightly less capacity
138 * than expected post expansion.
139 * The estimated available space may be slightly smaller than
140 * bdev_capacity() for devices where the number of sectors is
141 * not a multiple of the alignment size and the partition layout
142 * is keeping less than PARTITION_END_ALIGNMENT bytes after the
143 * "reserved" EFI partition: in such cases return the device
144 * usable capacity.
145 */
146 available = i_size_read(bdev_whole(bdev)->bd_inode) -
147 ((EFI_MIN_RESV_SIZE + NEW_START_BLOCK +
148 PARTITION_END_ALIGNMENT) << SECTOR_BITS);
149 psize = MAX(available, bdev_capacity(bdev));
150 } else {
151 psize = bdev_capacity(bdev);
152 }
153
154 return (psize);
155 }
156
157 static void
158 vdev_disk_error(zio_t *zio)
159 {
160 /*
161 * This function can be called in interrupt context, for instance while
162 * handling IRQs coming from a misbehaving disk device; use printk()
163 * which is safe from any context.
164 */
165 printk(KERN_WARNING "zio pool=%s vdev=%s error=%d type=%d "
166 "offset=%llu size=%llu flags=%x\n", spa_name(zio->io_spa),
167 zio->io_vd->vdev_path, zio->io_error, zio->io_type,
168 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
169 zio->io_flags);
170 }
171
172 static int
173 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
174 uint64_t *logical_ashift, uint64_t *physical_ashift)
175 {
176 struct block_device *bdev;
177 fmode_t mode = vdev_bdev_mode(spa_mode(v->vdev_spa));
178 hrtime_t timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms);
179 vdev_disk_t *vd;
180
181 /* Must have a pathname and it must be absolute. */
182 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
183 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
184 vdev_dbgmsg(v, "invalid vdev_path");
185 return (SET_ERROR(EINVAL));
186 }
187
188 /*
189 * Reopen the device if it is currently open. When expanding a
190 * partition force re-scanning the partition table if userland
191 * did not take care of this already. We need to do this while closed
192 * in order to get an accurate updated block device size. Then
193 * since udev may need to recreate the device links increase the
194 * open retry timeout before reporting the device as unavailable.
195 */
196 vd = v->vdev_tsd;
197 if (vd) {
198 char disk_name[BDEVNAME_SIZE + 6] = "/dev/";
199 boolean_t reread_part = B_FALSE;
200
201 rw_enter(&vd->vd_lock, RW_WRITER);
202 bdev = vd->vd_bdev;
203 vd->vd_bdev = NULL;
204
205 if (bdev) {
206 if (v->vdev_expanding && bdev != bdev_whole(bdev)) {
207 bdevname(bdev_whole(bdev), disk_name + 5);
208 /*
209 * If userland has BLKPG_RESIZE_PARTITION,
210 * then it should have updated the partition
211 * table already. We can detect this by
212 * comparing our current physical size
213 * with that of the device. If they are
214 * the same, then we must not have
215 * BLKPG_RESIZE_PARTITION or it failed to
216 * update the partition table online. We
217 * fallback to rescanning the partition
218 * table from the kernel below. However,
219 * if the capacity already reflects the
220 * updated partition, then we skip
221 * rescanning the partition table here.
222 */
223 if (v->vdev_psize == bdev_capacity(bdev))
224 reread_part = B_TRUE;
225 }
226
227 blkdev_put(bdev, mode | FMODE_EXCL);
228 }
229
230 if (reread_part) {
231 bdev = blkdev_get_by_path(disk_name, mode | FMODE_EXCL,
232 zfs_vdev_holder);
233 if (!IS_ERR(bdev)) {
234 int error = vdev_bdev_reread_part(bdev);
235 blkdev_put(bdev, mode | FMODE_EXCL);
236 if (error == 0) {
237 timeout = MSEC2NSEC(
238 zfs_vdev_open_timeout_ms * 2);
239 }
240 }
241 }
242 } else {
243 vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
244
245 rw_init(&vd->vd_lock, NULL, RW_DEFAULT, NULL);
246 rw_enter(&vd->vd_lock, RW_WRITER);
247 }
248
249 /*
250 * Devices are always opened by the path provided at configuration
251 * time. This means that if the provided path is a udev by-id path
252 * then drives may be re-cabled without an issue. If the provided
253 * path is a udev by-path path, then the physical location information
254 * will be preserved. This can be critical for more complicated
255 * configurations where drives are located in specific physical
256 * locations to maximize the systems tolerance to component failure.
257 *
258 * Alternatively, you can provide your own udev rule to flexibly map
259 * the drives as you see fit. It is not advised that you use the
260 * /dev/[hd]d devices which may be reordered due to probing order.
261 * Devices in the wrong locations will be detected by the higher
262 * level vdev validation.
263 *
264 * The specified paths may be briefly removed and recreated in
265 * response to udev events. This should be exceptionally unlikely
266 * because the zpool command makes every effort to verify these paths
267 * have already settled prior to reaching this point. Therefore,
268 * a ENOENT failure at this point is highly likely to be transient
269 * and it is reasonable to sleep and retry before giving up. In
270 * practice delays have been observed to be on the order of 100ms.
271 *
272 * When ERESTARTSYS is returned it indicates the block device is
273 * a zvol which could not be opened due to the deadlock detection
274 * logic in zvol_open(). Extend the timeout and retry the open
275 * subsequent attempts are expected to eventually succeed.
276 */
277 hrtime_t start = gethrtime();
278 bdev = ERR_PTR(-ENXIO);
279 while (IS_ERR(bdev) && ((gethrtime() - start) < timeout)) {
280 bdev = blkdev_get_by_path(v->vdev_path, mode | FMODE_EXCL,
281 zfs_vdev_holder);
282 if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
283 schedule_timeout(MSEC_TO_TICK(10));
284 } else if (unlikely(PTR_ERR(bdev) == -ERESTARTSYS)) {
285 timeout = MSEC2NSEC(zfs_vdev_open_timeout_ms * 10);
286 continue;
287 } else if (IS_ERR(bdev)) {
288 break;
289 }
290 }
291
292 if (IS_ERR(bdev)) {
293 int error = -PTR_ERR(bdev);
294 vdev_dbgmsg(v, "open error=%d timeout=%llu/%llu", error,
295 (u_longlong_t)(gethrtime() - start),
296 (u_longlong_t)timeout);
297 vd->vd_bdev = NULL;
298 v->vdev_tsd = vd;
299 rw_exit(&vd->vd_lock);
300 return (SET_ERROR(error));
301 } else {
302 vd->vd_bdev = bdev;
303 v->vdev_tsd = vd;
304 rw_exit(&vd->vd_lock);
305 }
306
307 struct request_queue *q = bdev_get_queue(vd->vd_bdev);
308
309 /* Determine the physical block size */
310 int physical_block_size = bdev_physical_block_size(vd->vd_bdev);
311
312 /* Determine the logical block size */
313 int logical_block_size = bdev_logical_block_size(vd->vd_bdev);
314
315 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
316 v->vdev_nowritecache = B_FALSE;
317
318 /* Set when device reports it supports TRIM. */
319 v->vdev_has_trim = !!blk_queue_discard(q);
320
321 /* Set when device reports it supports secure TRIM. */
322 v->vdev_has_securetrim = !!blk_queue_discard_secure(q);
323
324 /* Inform the ZIO pipeline that we are non-rotational */
325 v->vdev_nonrot = blk_queue_nonrot(q);
326
327 /* Physical volume size in bytes for the partition */
328 *psize = bdev_capacity(vd->vd_bdev);
329
330 /* Physical volume size in bytes including possible expansion space */
331 *max_psize = bdev_max_capacity(vd->vd_bdev, v->vdev_wholedisk);
332
333 /* Based on the minimum sector size set the block size */
334 *physical_ashift = highbit64(MAX(physical_block_size,
335 SPA_MINBLOCKSIZE)) - 1;
336
337 *logical_ashift = highbit64(MAX(logical_block_size,
338 SPA_MINBLOCKSIZE)) - 1;
339
340 return (0);
341 }
342
343 static void
344 vdev_disk_close(vdev_t *v)
345 {
346 vdev_disk_t *vd = v->vdev_tsd;
347
348 if (v->vdev_reopening || vd == NULL)
349 return;
350
351 if (vd->vd_bdev != NULL) {
352 blkdev_put(vd->vd_bdev,
353 vdev_bdev_mode(spa_mode(v->vdev_spa)) | FMODE_EXCL);
354 }
355
356 rw_destroy(&vd->vd_lock);
357 kmem_free(vd, sizeof (vdev_disk_t));
358 v->vdev_tsd = NULL;
359 }
360
361 static dio_request_t *
362 vdev_disk_dio_alloc(int bio_count)
363 {
364 dio_request_t *dr = kmem_zalloc(sizeof (dio_request_t) +
365 sizeof (struct bio *) * bio_count, KM_SLEEP);
366 atomic_set(&dr->dr_ref, 0);
367 dr->dr_bio_count = bio_count;
368 dr->dr_error = 0;
369
370 for (int i = 0; i < dr->dr_bio_count; i++)
371 dr->dr_bio[i] = NULL;
372
373 return (dr);
374 }
375
376 static void
377 vdev_disk_dio_free(dio_request_t *dr)
378 {
379 int i;
380
381 for (i = 0; i < dr->dr_bio_count; i++)
382 if (dr->dr_bio[i])
383 bio_put(dr->dr_bio[i]);
384
385 kmem_free(dr, sizeof (dio_request_t) +
386 sizeof (struct bio *) * dr->dr_bio_count);
387 }
388
389 static void
390 vdev_disk_dio_get(dio_request_t *dr)
391 {
392 atomic_inc(&dr->dr_ref);
393 }
394
395 static int
396 vdev_disk_dio_put(dio_request_t *dr)
397 {
398 int rc = atomic_dec_return(&dr->dr_ref);
399
400 /*
401 * Free the dio_request when the last reference is dropped and
402 * ensure zio_interpret is called only once with the correct zio
403 */
404 if (rc == 0) {
405 zio_t *zio = dr->dr_zio;
406 int error = dr->dr_error;
407
408 vdev_disk_dio_free(dr);
409
410 if (zio) {
411 zio->io_error = error;
412 ASSERT3S(zio->io_error, >=, 0);
413 if (zio->io_error)
414 vdev_disk_error(zio);
415
416 zio_delay_interrupt(zio);
417 }
418 }
419
420 return (rc);
421 }
422
423 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
424 {
425 dio_request_t *dr = bio->bi_private;
426 int rc;
427
428 if (dr->dr_error == 0) {
429 #ifdef HAVE_1ARG_BIO_END_IO_T
430 dr->dr_error = BIO_END_IO_ERROR(bio);
431 #else
432 if (error)
433 dr->dr_error = -(error);
434 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
435 dr->dr_error = EIO;
436 #endif
437 }
438
439 /* Drop reference acquired by __vdev_disk_physio */
440 rc = vdev_disk_dio_put(dr);
441 }
442
443 static inline void
444 vdev_submit_bio_impl(struct bio *bio)
445 {
446 #ifdef HAVE_1ARG_SUBMIT_BIO
447 (void) submit_bio(bio);
448 #else
449 (void) submit_bio(0, bio);
450 #endif
451 }
452
453 /*
454 * preempt_schedule_notrace is GPL-only which breaks the ZFS build, so
455 * replace it with preempt_schedule under the following condition:
456 */
457 #if defined(CONFIG_ARM64) && \
458 defined(CONFIG_PREEMPTION) && \
459 defined(CONFIG_BLK_CGROUP)
460 #define preempt_schedule_notrace(x) preempt_schedule(x)
461 #endif
462
463 #ifdef HAVE_BIO_SET_DEV
464 #if defined(CONFIG_BLK_CGROUP) && defined(HAVE_BIO_SET_DEV_GPL_ONLY)
465 /*
466 * The Linux 5.5 kernel updated percpu_ref_tryget() which is inlined by
467 * blkg_tryget() to use rcu_read_lock() instead of rcu_read_lock_sched().
468 * As a side effect the function was converted to GPL-only. Define our
469 * own version when needed which uses rcu_read_lock_sched().
470 */
471 #if defined(HAVE_BLKG_TRYGET_GPL_ONLY)
472 static inline bool
473 vdev_blkg_tryget(struct blkcg_gq *blkg)
474 {
475 struct percpu_ref *ref = &blkg->refcnt;
476 unsigned long __percpu *count;
477 bool rc;
478
479 rcu_read_lock_sched();
480
481 if (__ref_is_percpu(ref, &count)) {
482 this_cpu_inc(*count);
483 rc = true;
484 } else {
485 #ifdef ZFS_PERCPU_REF_COUNT_IN_DATA
486 rc = atomic_long_inc_not_zero(&ref->data->count);
487 #else
488 rc = atomic_long_inc_not_zero(&ref->count);
489 #endif
490 }
491
492 rcu_read_unlock_sched();
493
494 return (rc);
495 }
496 #elif defined(HAVE_BLKG_TRYGET)
497 #define vdev_blkg_tryget(bg) blkg_tryget(bg)
498 #endif
499 #ifdef HAVE_BIO_SET_DEV_MACRO
500 /*
501 * The Linux 5.0 kernel updated the bio_set_dev() macro so it calls the
502 * GPL-only bio_associate_blkg() symbol thus inadvertently converting
503 * the entire macro. Provide a minimal version which always assigns the
504 * request queue's root_blkg to the bio.
505 */
506 static inline void
507 vdev_bio_associate_blkg(struct bio *bio)
508 {
509 #if defined(HAVE_BIO_BDEV_DISK)
510 struct request_queue *q = bio->bi_bdev->bd_disk->queue;
511 #else
512 struct request_queue *q = bio->bi_disk->queue;
513 #endif
514
515 ASSERT3P(q, !=, NULL);
516 ASSERT3P(bio->bi_blkg, ==, NULL);
517
518 if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
519 bio->bi_blkg = q->root_blkg;
520 }
521
522 #define bio_associate_blkg vdev_bio_associate_blkg
523 #else
524 static inline void
525 vdev_bio_set_dev(struct bio *bio, struct block_device *bdev)
526 {
527 #if defined(HAVE_BIO_BDEV_DISK)
528 struct request_queue *q = bdev->bd_disk->queue;
529 #else
530 struct request_queue *q = bio->bi_disk->queue;
531 #endif
532 bio_clear_flag(bio, BIO_REMAPPED);
533 if (bio->bi_bdev != bdev)
534 bio_clear_flag(bio, BIO_THROTTLED);
535 bio->bi_bdev = bdev;
536
537 ASSERT3P(q, !=, NULL);
538 ASSERT3P(bio->bi_blkg, ==, NULL);
539
540 if (q->root_blkg && vdev_blkg_tryget(q->root_blkg))
541 bio->bi_blkg = q->root_blkg;
542 }
543 #define bio_set_dev vdev_bio_set_dev
544 #endif
545 #endif
546 #else
547 /*
548 * Provide a bio_set_dev() helper macro for pre-Linux 4.14 kernels.
549 */
550 static inline void
551 bio_set_dev(struct bio *bio, struct block_device *bdev)
552 {
553 bio->bi_bdev = bdev;
554 }
555 #endif /* HAVE_BIO_SET_DEV */
556
557 static inline void
558 vdev_submit_bio(struct bio *bio)
559 {
560 struct bio_list *bio_list = current->bio_list;
561 current->bio_list = NULL;
562 vdev_submit_bio_impl(bio);
563 current->bio_list = bio_list;
564 }
565
566 static int
567 __vdev_disk_physio(struct block_device *bdev, zio_t *zio,
568 size_t io_size, uint64_t io_offset, int rw, int flags)
569 {
570 dio_request_t *dr;
571 uint64_t abd_offset;
572 uint64_t bio_offset;
573 int bio_size;
574 int bio_count = 16;
575 int error = 0;
576 struct blk_plug plug;
577
578 /*
579 * Accessing outside the block device is never allowed.
580 */
581 if (io_offset + io_size > bdev->bd_inode->i_size) {
582 vdev_dbgmsg(zio->io_vd,
583 "Illegal access %llu size %llu, device size %llu",
584 (u_longlong_t)io_offset,
585 (u_longlong_t)io_size,
586 (u_longlong_t)i_size_read(bdev->bd_inode));
587 return (SET_ERROR(EIO));
588 }
589
590 retry:
591 dr = vdev_disk_dio_alloc(bio_count);
592
593 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
594 bio_set_flags_failfast(bdev, &flags);
595
596 dr->dr_zio = zio;
597
598 /*
599 * Since bio's can have up to BIO_MAX_PAGES=256 iovec's, each of which
600 * is at least 512 bytes and at most PAGESIZE (typically 4K), one bio
601 * can cover at least 128KB and at most 1MB. When the required number
602 * of iovec's exceeds this, we are forced to break the IO in multiple
603 * bio's and wait for them all to complete. This is likely if the
604 * recordsize property is increased beyond 1MB. The default
605 * bio_count=16 should typically accommodate the maximum-size zio of
606 * 16MB.
607 */
608
609 abd_offset = 0;
610 bio_offset = io_offset;
611 bio_size = io_size;
612 for (int i = 0; i <= dr->dr_bio_count; i++) {
613
614 /* Finished constructing bio's for given buffer */
615 if (bio_size <= 0)
616 break;
617
618 /*
619 * If additional bio's are required, we have to retry, but
620 * this should be rare - see the comment above.
621 */
622 if (dr->dr_bio_count == i) {
623 vdev_disk_dio_free(dr);
624 bio_count *= 2;
625 goto retry;
626 }
627
628 /* bio_alloc() with __GFP_WAIT never returns NULL */
629 #ifdef HAVE_BIO_MAX_SEGS
630 dr->dr_bio[i] = bio_alloc(GFP_NOIO, bio_max_segs(
631 abd_nr_pages_off(zio->io_abd, bio_size, abd_offset)));
632 #else
633 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
634 MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
635 BIO_MAX_PAGES));
636 #endif
637 if (unlikely(dr->dr_bio[i] == NULL)) {
638 vdev_disk_dio_free(dr);
639 return (SET_ERROR(ENOMEM));
640 }
641
642 /* Matching put called by vdev_disk_physio_completion */
643 vdev_disk_dio_get(dr);
644
645 bio_set_dev(dr->dr_bio[i], bdev);
646 BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
647 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
648 dr->dr_bio[i]->bi_private = dr;
649 bio_set_op_attrs(dr->dr_bio[i], rw, flags);
650
651 /* Remaining size is returned to become the new size */
652 bio_size = abd_bio_map_off(dr->dr_bio[i], zio->io_abd,
653 bio_size, abd_offset);
654
655 /* Advance in buffer and construct another bio if needed */
656 abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
657 bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
658 }
659
660 /* Extra reference to protect dio_request during vdev_submit_bio */
661 vdev_disk_dio_get(dr);
662
663 if (dr->dr_bio_count > 1)
664 blk_start_plug(&plug);
665
666 /* Submit all bio's associated with this dio */
667 for (int i = 0; i < dr->dr_bio_count; i++) {
668 if (dr->dr_bio[i])
669 vdev_submit_bio(dr->dr_bio[i]);
670 }
671
672 if (dr->dr_bio_count > 1)
673 blk_finish_plug(&plug);
674
675 (void) vdev_disk_dio_put(dr);
676
677 return (error);
678 }
679
680 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
681 {
682 zio_t *zio = bio->bi_private;
683 #ifdef HAVE_1ARG_BIO_END_IO_T
684 zio->io_error = BIO_END_IO_ERROR(bio);
685 #else
686 zio->io_error = -error;
687 #endif
688
689 if (zio->io_error && (zio->io_error == EOPNOTSUPP))
690 zio->io_vd->vdev_nowritecache = B_TRUE;
691
692 bio_put(bio);
693 ASSERT3S(zio->io_error, >=, 0);
694 if (zio->io_error)
695 vdev_disk_error(zio);
696 zio_interrupt(zio);
697 }
698
699 static int
700 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
701 {
702 struct request_queue *q;
703 struct bio *bio;
704
705 q = bdev_get_queue(bdev);
706 if (!q)
707 return (SET_ERROR(ENXIO));
708
709 bio = bio_alloc(GFP_NOIO, 0);
710 /* bio_alloc() with __GFP_WAIT never returns NULL */
711 if (unlikely(bio == NULL))
712 return (SET_ERROR(ENOMEM));
713
714 bio->bi_end_io = vdev_disk_io_flush_completion;
715 bio->bi_private = zio;
716 bio_set_dev(bio, bdev);
717 bio_set_flush(bio);
718 vdev_submit_bio(bio);
719 invalidate_bdev(bdev);
720
721 return (0);
722 }
723
724 static void
725 vdev_disk_io_start(zio_t *zio)
726 {
727 vdev_t *v = zio->io_vd;
728 vdev_disk_t *vd = v->vdev_tsd;
729 unsigned long trim_flags = 0;
730 int rw, error;
731
732 /*
733 * If the vdev is closed, it's likely in the REMOVED or FAULTED state.
734 * Nothing to be done here but return failure.
735 */
736 if (vd == NULL) {
737 zio->io_error = ENXIO;
738 zio_interrupt(zio);
739 return;
740 }
741
742 rw_enter(&vd->vd_lock, RW_READER);
743
744 /*
745 * If the vdev is closed, it's likely due to a failed reopen and is
746 * in the UNAVAIL state. Nothing to be done here but return failure.
747 */
748 if (vd->vd_bdev == NULL) {
749 rw_exit(&vd->vd_lock);
750 zio->io_error = ENXIO;
751 zio_interrupt(zio);
752 return;
753 }
754
755 switch (zio->io_type) {
756 case ZIO_TYPE_IOCTL:
757
758 if (!vdev_readable(v)) {
759 rw_exit(&vd->vd_lock);
760 zio->io_error = SET_ERROR(ENXIO);
761 zio_interrupt(zio);
762 return;
763 }
764
765 switch (zio->io_cmd) {
766 case DKIOCFLUSHWRITECACHE:
767
768 if (zfs_nocacheflush)
769 break;
770
771 if (v->vdev_nowritecache) {
772 zio->io_error = SET_ERROR(ENOTSUP);
773 break;
774 }
775
776 error = vdev_disk_io_flush(vd->vd_bdev, zio);
777 if (error == 0) {
778 rw_exit(&vd->vd_lock);
779 return;
780 }
781
782 zio->io_error = error;
783
784 break;
785
786 default:
787 zio->io_error = SET_ERROR(ENOTSUP);
788 }
789
790 rw_exit(&vd->vd_lock);
791 zio_execute(zio);
792 return;
793 case ZIO_TYPE_WRITE:
794 rw = WRITE;
795 break;
796
797 case ZIO_TYPE_READ:
798 rw = READ;
799 break;
800
801 case ZIO_TYPE_TRIM:
802 #if defined(BLKDEV_DISCARD_SECURE)
803 if (zio->io_trim_flags & ZIO_TRIM_SECURE)
804 trim_flags |= BLKDEV_DISCARD_SECURE;
805 #endif
806 zio->io_error = -blkdev_issue_discard(vd->vd_bdev,
807 zio->io_offset >> 9, zio->io_size >> 9, GFP_NOFS,
808 trim_flags);
809
810 rw_exit(&vd->vd_lock);
811 zio_interrupt(zio);
812 return;
813
814 default:
815 rw_exit(&vd->vd_lock);
816 zio->io_error = SET_ERROR(ENOTSUP);
817 zio_interrupt(zio);
818 return;
819 }
820
821 zio->io_target_timestamp = zio_handle_io_delay(zio);
822 error = __vdev_disk_physio(vd->vd_bdev, zio,
823 zio->io_size, zio->io_offset, rw, 0);
824 rw_exit(&vd->vd_lock);
825
826 if (error) {
827 zio->io_error = error;
828 zio_interrupt(zio);
829 return;
830 }
831 }
832
833 static void
834 vdev_disk_io_done(zio_t *zio)
835 {
836 /*
837 * If the device returned EIO, we revalidate the media. If it is
838 * determined the media has changed this triggers the asynchronous
839 * removal of the device from the configuration.
840 */
841 if (zio->io_error == EIO) {
842 vdev_t *v = zio->io_vd;
843 vdev_disk_t *vd = v->vdev_tsd;
844
845 if (zfs_check_media_change(vd->vd_bdev)) {
846 invalidate_bdev(vd->vd_bdev);
847 v->vdev_remove_wanted = B_TRUE;
848 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
849 }
850 }
851 }
852
853 static void
854 vdev_disk_hold(vdev_t *vd)
855 {
856 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
857
858 /* We must have a pathname, and it must be absolute. */
859 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
860 return;
861
862 /*
863 * Only prefetch path and devid info if the device has
864 * never been opened.
865 */
866 if (vd->vdev_tsd != NULL)
867 return;
868
869 }
870
871 static void
872 vdev_disk_rele(vdev_t *vd)
873 {
874 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
875
876 /* XXX: Implement me as a vnode rele for the device */
877 }
878
879 vdev_ops_t vdev_disk_ops = {
880 .vdev_op_init = NULL,
881 .vdev_op_fini = NULL,
882 .vdev_op_open = vdev_disk_open,
883 .vdev_op_close = vdev_disk_close,
884 .vdev_op_asize = vdev_default_asize,
885 .vdev_op_min_asize = vdev_default_min_asize,
886 .vdev_op_min_alloc = NULL,
887 .vdev_op_io_start = vdev_disk_io_start,
888 .vdev_op_io_done = vdev_disk_io_done,
889 .vdev_op_state_change = NULL,
890 .vdev_op_need_resilver = NULL,
891 .vdev_op_hold = vdev_disk_hold,
892 .vdev_op_rele = vdev_disk_rele,
893 .vdev_op_remap = NULL,
894 .vdev_op_xlate = vdev_default_xlate,
895 .vdev_op_rebuild_asize = NULL,
896 .vdev_op_metaslab_init = NULL,
897 .vdev_op_config_generate = NULL,
898 .vdev_op_nparity = NULL,
899 .vdev_op_ndisks = NULL,
900 .vdev_op_type = VDEV_TYPE_DISK, /* name of this vdev type */
901 .vdev_op_leaf = B_TRUE /* leaf vdev */
902 };
903
904 /*
905 * The zfs_vdev_scheduler module option has been deprecated. Setting this
906 * value no longer has any effect. It has not yet been entirely removed
907 * to allow the module to be loaded if this option is specified in the
908 * /etc/modprobe.d/zfs.conf file. The following warning will be logged.
909 */
910 static int
911 param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
912 {
913 int error = param_set_charp(val, kp);
914 if (error == 0) {
915 printk(KERN_INFO "The 'zfs_vdev_scheduler' module option "
916 "is not supported.\n");
917 }
918
919 return (error);
920 }
921
922 static const char *zfs_vdev_scheduler = "unused";
923 module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
924 param_get_charp, &zfs_vdev_scheduler, 0644);
925 MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
926
927 int
928 param_set_min_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
929 {
930 uint64_t val;
931 int error;
932
933 error = kstrtoull(buf, 0, &val);
934 if (error < 0)
935 return (SET_ERROR(error));
936
937 if (val < ASHIFT_MIN || val > zfs_vdev_max_auto_ashift)
938 return (SET_ERROR(-EINVAL));
939
940 error = param_set_ulong(buf, kp);
941 if (error < 0)
942 return (SET_ERROR(error));
943
944 return (0);
945 }
946
947 int
948 param_set_max_auto_ashift(const char *buf, zfs_kernel_param_t *kp)
949 {
950 uint64_t val;
951 int error;
952
953 error = kstrtoull(buf, 0, &val);
954 if (error < 0)
955 return (SET_ERROR(error));
956
957 if (val > ASHIFT_MAX || val < zfs_vdev_min_auto_ashift)
958 return (SET_ERROR(-EINVAL));
959
960 error = param_set_ulong(buf, kp);
961 if (error < 0)
962 return (SET_ERROR(error));
963
964 return (0);
965 }