]>
Commit | Line | Data |
---|---|---|
60101509 BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC. | |
23 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
24 | * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>. | |
25 | * LLNL-CODE-403049. | |
26ef0cc7 | 26 | * Copyright (c) 2012, 2015 by Delphix. All rights reserved. |
60101509 BB |
27 | */ |
28 | ||
29 | #include <sys/zfs_context.h> | |
e771de53 | 30 | #include <sys/spa_impl.h> |
60101509 BB |
31 | #include <sys/vdev_disk.h> |
32 | #include <sys/vdev_impl.h> | |
a6255b7f | 33 | #include <sys/abd.h> |
60101509 BB |
34 | #include <sys/fs/zfs.h> |
35 | #include <sys/zio.h> | |
36 | #include <sys/sunldi.h> | |
e771de53 | 37 | #include <linux/mod_compat.h> |
60101509 | 38 | |
6839eed2 | 39 | char *zfs_vdev_scheduler = VDEV_SCHEDULER; |
8128bd89 | 40 | static void *zfs_vdev_holder = VDEV_HOLDER; |
6839eed2 | 41 | |
60101509 BB |
42 | /* |
43 | * Virtual device vector for disks. | |
44 | */ | |
45 | typedef struct dio_request { | |
60101509 | 46 | zio_t *dr_zio; /* Parent ZIO */ |
aa159afb | 47 | atomic_t dr_ref; /* References */ |
60101509 BB |
48 | int dr_error; /* Bio error */ |
49 | int dr_bio_count; /* Count of bio's */ | |
d1d7e268 | 50 | struct bio *dr_bio[0]; /* Attached bio's */ |
60101509 BB |
51 | } dio_request_t; |
52 | ||
53 | ||
54 | #ifdef HAVE_OPEN_BDEV_EXCLUSIVE | |
55 | static fmode_t | |
56 | vdev_bdev_mode(int smode) | |
57 | { | |
58 | fmode_t mode = 0; | |
59 | ||
60 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
61 | ||
62 | if (smode & FREAD) | |
63 | mode |= FMODE_READ; | |
64 | ||
65 | if (smode & FWRITE) | |
66 | mode |= FMODE_WRITE; | |
67 | ||
d1d7e268 | 68 | return (mode); |
60101509 BB |
69 | } |
70 | #else | |
71 | static int | |
72 | vdev_bdev_mode(int smode) | |
73 | { | |
74 | int mode = 0; | |
75 | ||
76 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
77 | ||
78 | if ((smode & FREAD) && !(smode & FWRITE)) | |
79 | mode = MS_RDONLY; | |
80 | ||
d1d7e268 | 81 | return (mode); |
60101509 BB |
82 | } |
83 | #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */ | |
84 | ||
85 | static uint64_t | |
86 | bdev_capacity(struct block_device *bdev) | |
87 | { | |
88 | struct hd_struct *part = bdev->bd_part; | |
89 | ||
90 | /* The partition capacity referenced by the block device */ | |
91 | if (part) | |
f74fae8b | 92 | return (part->nr_sects << 9); |
60101509 BB |
93 | |
94 | /* Otherwise assume the full device capacity */ | |
f74fae8b | 95 | return (get_capacity(bdev->bd_disk) << 9); |
60101509 BB |
96 | } |
97 | ||
d148e951 BB |
98 | static void |
99 | vdev_disk_error(zio_t *zio) | |
100 | { | |
101 | #ifdef ZFS_DEBUG | |
7e980733 | 102 | printk(KERN_WARNING "ZFS: zio error=%d type=%d offset=%llu size=%llu " |
193a37cb | 103 | "flags=%x\n", zio->io_error, zio->io_type, |
d148e951 | 104 | (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size, |
193a37cb | 105 | zio->io_flags); |
d148e951 BB |
106 | #endif |
107 | } | |
108 | ||
6839eed2 BB |
109 | /* |
110 | * Use the Linux 'noop' elevator for zfs managed block devices. This | |
111 | * strikes the ideal balance by allowing the zfs elevator to do all | |
112 | * request ordering and prioritization. While allowing the Linux | |
113 | * elevator to do the maximum front/back merging allowed by the | |
114 | * physical device. This yields the largest possible requests for | |
115 | * the device with the lowest total overhead. | |
6839eed2 | 116 | */ |
e771de53 | 117 | static void |
fdcd952b | 118 | vdev_elevator_switch(vdev_t *v, char *elevator) |
6839eed2 | 119 | { |
fdcd952b | 120 | vdev_disk_t *vd = v->vdev_tsd; |
e771de53 BB |
121 | struct request_queue *q; |
122 | char *device; | |
e2448b0e | 123 | int error; |
fdcd952b | 124 | |
e771de53 BB |
125 | for (int c = 0; c < v->vdev_children; c++) |
126 | vdev_elevator_switch(v->vdev_child[c], elevator); | |
127 | ||
128 | if (!v->vdev_ops->vdev_op_leaf || vd->vd_bdev == NULL) | |
129 | return; | |
130 | ||
131 | q = bdev_get_queue(vd->vd_bdev); | |
132 | device = vd->vd_bdev->bd_disk->disk_name; | |
133 | ||
84daadde PS |
134 | /* |
135 | * Skip devices which are not whole disks (partitions). | |
136 | * Device-mapper devices are excepted since they may be whole | |
137 | * disks despite the vdev_wholedisk flag, in which case we can | |
138 | * and should switch the elevator. If the device-mapper device | |
139 | * does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the | |
140 | * "Skip devices without schedulers" check below will fail. | |
141 | */ | |
142 | if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0) | |
e771de53 | 143 | return; |
04516a45 | 144 | |
fdcd952b BB |
145 | /* Skip devices without schedulers (loop, ram, dm, etc) */ |
146 | if (!q->elevator || !blk_queue_stackable(q)) | |
e771de53 | 147 | return; |
6839eed2 | 148 | |
fdcd952b | 149 | /* Leave existing scheduler when set to "none" */ |
4903926f | 150 | if ((strncmp(elevator, "none", 4) == 0) && (strlen(elevator) == 4)) |
e771de53 | 151 | return; |
6839eed2 | 152 | |
6d1d976b BB |
153 | #ifdef HAVE_ELEVATOR_CHANGE |
154 | error = elevator_change(q, elevator); | |
155 | #else | |
d1d7e268 MK |
156 | /* |
157 | * For pre-2.6.36 kernels elevator_change() is not available. | |
6d1d976b BB |
158 | * Therefore we fall back to using a usermodehelper to echo the |
159 | * elevator into sysfs; This requires /bin/echo and sysfs to be | |
160 | * mounted which may not be true early in the boot process. | |
161 | */ | |
d1d7e268 | 162 | #define SET_SCHEDULER_CMD \ |
6d1d976b BB |
163 | "exec 0</dev/null " \ |
164 | " 1>/sys/block/%s/queue/scheduler " \ | |
165 | " 2>/dev/null; " \ | |
166 | "echo %s" | |
167 | ||
e771de53 BB |
168 | char *argv[] = { "/bin/sh", "-c", NULL, NULL }; |
169 | char *envp[] = { NULL }; | |
6d1d976b | 170 | |
e771de53 BB |
171 | argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator); |
172 | error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); | |
173 | strfree(argv[2]); | |
6d1d976b | 174 | #endif /* HAVE_ELEVATOR_CHANGE */ |
6839eed2 | 175 | if (error) |
7e980733 D |
176 | printk(KERN_NOTICE "ZFS: Unable to set \"%s\" scheduler" |
177 | " for %s (%s): %d\n", elevator, v->vdev_path, device, | |
178 | error); | |
6839eed2 BB |
179 | } |
180 | ||
b5a28807 ED |
181 | /* |
182 | * Expanding a whole disk vdev involves invoking BLKRRPART on the | |
183 | * whole disk device. This poses a problem, because BLKRRPART will | |
184 | * return EBUSY if one of the disk's partitions is open. That's why | |
185 | * we have to do it here, just before opening the data partition. | |
186 | * Unfortunately, BLKRRPART works by dropping all partitions and | |
187 | * recreating them, which means that for a short time window, all | |
188 | * /dev/sdxN device files disappear (until udev recreates them). | |
189 | * This means two things: | |
190 | * - When we open the data partition just after a BLKRRPART, we | |
191 | * can't do it using the normal device file path because of the | |
192 | * obvious race condition with udev. Instead, we use reliable | |
193 | * kernel APIs to get a handle to the new partition device from | |
194 | * the whole disk device. | |
195 | * - Because vdev_disk_open() initially needs to find the device | |
196 | * using its path, multiple vdev_disk_open() invocations in | |
197 | * short succession on the same disk with BLKRRPARTs in the | |
198 | * middle have a high probability of failure (because of the | |
199 | * race condition with udev). A typical situation where this | |
200 | * might happen is when the zpool userspace tool does a | |
201 | * TRYIMPORT immediately followed by an IMPORT. For this | |
202 | * reason, we only invoke BLKRRPART in the module when strictly | |
203 | * necessary (zpool online -e case), and rely on userspace to | |
204 | * do it when possible. | |
205 | */ | |
206 | static struct block_device * | |
207 | vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd) | |
208 | { | |
209 | #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) | |
210 | struct block_device *bdev, *result = ERR_PTR(-ENXIO); | |
211 | struct gendisk *disk; | |
212 | int error, partno; | |
213 | ||
8128bd89 | 214 | bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder); |
b5a28807 | 215 | if (IS_ERR(bdev)) |
d1d7e268 | 216 | return (bdev); |
b5a28807 ED |
217 | |
218 | disk = get_gendisk(bdev->bd_dev, &partno); | |
219 | vdev_bdev_close(bdev, vdev_bdev_mode(mode)); | |
220 | ||
221 | if (disk) { | |
222 | bdev = bdget(disk_devt(disk)); | |
223 | if (bdev) { | |
224 | error = blkdev_get(bdev, vdev_bdev_mode(mode), vd); | |
225 | if (error == 0) | |
226 | error = ioctl_by_bdev(bdev, BLKRRPART, 0); | |
227 | vdev_bdev_close(bdev, vdev_bdev_mode(mode)); | |
228 | } | |
229 | ||
230 | bdev = bdget_disk(disk, partno); | |
231 | if (bdev) { | |
232 | error = blkdev_get(bdev, | |
233 | vdev_bdev_mode(mode) | FMODE_EXCL, vd); | |
234 | if (error == 0) | |
235 | result = bdev; | |
236 | } | |
237 | put_disk(disk); | |
238 | } | |
239 | ||
d1d7e268 | 240 | return (result); |
b5a28807 | 241 | #else |
d1d7e268 | 242 | return (ERR_PTR(-EOPNOTSUPP)); |
b5a28807 ED |
243 | #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */ |
244 | } | |
245 | ||
60101509 | 246 | static int |
1bd201e7 CS |
247 | vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, |
248 | uint64_t *ashift) | |
60101509 | 249 | { |
b5a28807 | 250 | struct block_device *bdev = ERR_PTR(-ENXIO); |
60101509 | 251 | vdev_disk_t *vd; |
2d82ea8b | 252 | int count = 0, mode, block_size; |
60101509 BB |
253 | |
254 | /* Must have a pathname and it must be absolute. */ | |
255 | if (v->vdev_path == NULL || v->vdev_path[0] != '/') { | |
256 | v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; | |
2d82ea8b | 257 | return (SET_ERROR(EINVAL)); |
60101509 BB |
258 | } |
259 | ||
0d8103d9 BB |
260 | /* |
261 | * Reopen the device if it's not currently open. Otherwise, | |
262 | * just update the physical size of the device. | |
263 | */ | |
264 | if (v->vdev_tsd != NULL) { | |
265 | ASSERT(v->vdev_reopening); | |
266 | vd = v->vdev_tsd; | |
267 | goto skip_open; | |
268 | } | |
269 | ||
79c76d5b | 270 | vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); |
60101509 | 271 | if (vd == NULL) |
2d82ea8b | 272 | return (SET_ERROR(ENOMEM)); |
60101509 BB |
273 | |
274 | /* | |
275 | * Devices are always opened by the path provided at configuration | |
276 | * time. This means that if the provided path is a udev by-id path | |
277 | * then drives may be recabled without an issue. If the provided | |
4e95cc99 | 278 | * path is a udev by-path path, then the physical location information |
60101509 BB |
279 | * will be preserved. This can be critical for more complicated |
280 | * configurations where drives are located in specific physical | |
281 | * locations to maximize the systems tolerence to component failure. | |
4e95cc99 | 282 | * Alternatively, you can provide your own udev rule to flexibly map |
60101509 | 283 | * the drives as you see fit. It is not advised that you use the |
4e95cc99 | 284 | * /dev/[hd]d devices which may be reordered due to probing order. |
60101509 BB |
285 | * Devices in the wrong locations will be detected by the higher |
286 | * level vdev validation. | |
2d82ea8b BB |
287 | * |
288 | * The specified paths may be briefly removed and recreated in | |
289 | * response to udev events. This should be exceptionally unlikely | |
290 | * because the zpool command makes every effort to verify these paths | |
291 | * have already settled prior to reaching this point. Therefore, | |
292 | * a ENOENT failure at this point is highly likely to be transient | |
293 | * and it is reasonable to sleep and retry before giving up. In | |
294 | * practice delays have been observed to be on the order of 100ms. | |
60101509 BB |
295 | */ |
296 | mode = spa_mode(v->vdev_spa); | |
b5a28807 ED |
297 | if (v->vdev_wholedisk && v->vdev_expanding) |
298 | bdev = vdev_disk_rrpart(v->vdev_path, mode, vd); | |
2d82ea8b BB |
299 | |
300 | while (IS_ERR(bdev) && count < 50) { | |
8128bd89 BB |
301 | bdev = vdev_bdev_open(v->vdev_path, |
302 | vdev_bdev_mode(mode), zfs_vdev_holder); | |
2d82ea8b BB |
303 | if (unlikely(PTR_ERR(bdev) == -ENOENT)) { |
304 | msleep(10); | |
305 | count++; | |
306 | } else if (IS_ERR(bdev)) { | |
307 | break; | |
308 | } | |
309 | } | |
310 | ||
60101509 | 311 | if (IS_ERR(bdev)) { |
2d82ea8b BB |
312 | dprintf("failed open v->vdev_path=%s, error=%d count=%d\n", |
313 | v->vdev_path, -PTR_ERR(bdev), count); | |
d1d7e268 | 314 | kmem_free(vd, sizeof (vdev_disk_t)); |
2d82ea8b | 315 | return (SET_ERROR(-PTR_ERR(bdev))); |
60101509 BB |
316 | } |
317 | ||
318 | v->vdev_tsd = vd; | |
319 | vd->vd_bdev = bdev; | |
0d8103d9 BB |
320 | |
321 | skip_open: | |
322 | /* Determine the physical block size */ | |
323 | block_size = vdev_bdev_block_size(vd->vd_bdev); | |
60101509 | 324 | |
60101509 BB |
325 | /* Clear the nowritecache bit, causes vdev_reopen() to try again. */ |
326 | v->vdev_nowritecache = B_FALSE; | |
327 | ||
fb40095f RY |
328 | /* Inform the ZIO pipeline that we are non-rotational */ |
329 | v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev)); | |
330 | ||
60101509 | 331 | /* Physical volume size in bytes */ |
0d8103d9 | 332 | *psize = bdev_capacity(vd->vd_bdev); |
60101509 | 333 | |
1bd201e7 CS |
334 | /* TODO: report possible expansion size */ |
335 | *max_psize = *psize; | |
336 | ||
60101509 | 337 | /* Based on the minimum sector size set the block size */ |
9bd274dd | 338 | *ashift = highbit64(MAX(block_size, SPA_MINBLOCKSIZE)) - 1; |
60101509 | 339 | |
6839eed2 | 340 | /* Try to set the io scheduler elevator algorithm */ |
fdcd952b | 341 | (void) vdev_elevator_switch(v, zfs_vdev_scheduler); |
6839eed2 | 342 | |
d1d7e268 | 343 | return (0); |
60101509 BB |
344 | } |
345 | ||
346 | static void | |
347 | vdev_disk_close(vdev_t *v) | |
348 | { | |
349 | vdev_disk_t *vd = v->vdev_tsd; | |
350 | ||
0d8103d9 | 351 | if (v->vdev_reopening || vd == NULL) |
60101509 BB |
352 | return; |
353 | ||
354 | if (vd->vd_bdev != NULL) | |
355 | vdev_bdev_close(vd->vd_bdev, | |
d1d7e268 | 356 | vdev_bdev_mode(spa_mode(v->vdev_spa))); |
60101509 | 357 | |
d1d7e268 | 358 | kmem_free(vd, sizeof (vdev_disk_t)); |
60101509 BB |
359 | v->vdev_tsd = NULL; |
360 | } | |
361 | ||
362 | static dio_request_t * | |
363 | vdev_disk_dio_alloc(int bio_count) | |
364 | { | |
365 | dio_request_t *dr; | |
366 | int i; | |
367 | ||
d1d7e268 | 368 | dr = kmem_zalloc(sizeof (dio_request_t) + |
79c76d5b | 369 | sizeof (struct bio *) * bio_count, KM_SLEEP); |
60101509 | 370 | if (dr) { |
60101509 BB |
371 | atomic_set(&dr->dr_ref, 0); |
372 | dr->dr_bio_count = bio_count; | |
373 | dr->dr_error = 0; | |
374 | ||
375 | for (i = 0; i < dr->dr_bio_count; i++) | |
376 | dr->dr_bio[i] = NULL; | |
377 | } | |
378 | ||
d1d7e268 | 379 | return (dr); |
60101509 BB |
380 | } |
381 | ||
382 | static void | |
383 | vdev_disk_dio_free(dio_request_t *dr) | |
384 | { | |
385 | int i; | |
386 | ||
387 | for (i = 0; i < dr->dr_bio_count; i++) | |
388 | if (dr->dr_bio[i]) | |
389 | bio_put(dr->dr_bio[i]); | |
390 | ||
d1d7e268 MK |
391 | kmem_free(dr, sizeof (dio_request_t) + |
392 | sizeof (struct bio *) * dr->dr_bio_count); | |
60101509 BB |
393 | } |
394 | ||
395 | static void | |
396 | vdev_disk_dio_get(dio_request_t *dr) | |
397 | { | |
398 | atomic_inc(&dr->dr_ref); | |
399 | } | |
400 | ||
401 | static int | |
402 | vdev_disk_dio_put(dio_request_t *dr) | |
403 | { | |
404 | int rc = atomic_dec_return(&dr->dr_ref); | |
405 | ||
406 | /* | |
407 | * Free the dio_request when the last reference is dropped and | |
408 | * ensure zio_interpret is called only once with the correct zio | |
409 | */ | |
410 | if (rc == 0) { | |
411 | zio_t *zio = dr->dr_zio; | |
412 | int error = dr->dr_error; | |
413 | ||
414 | vdev_disk_dio_free(dr); | |
415 | ||
416 | if (zio) { | |
417 | zio->io_error = error; | |
d148e951 BB |
418 | ASSERT3S(zio->io_error, >=, 0); |
419 | if (zio->io_error) | |
420 | vdev_disk_error(zio); | |
a6255b7f | 421 | |
26ef0cc7 | 422 | zio_delay_interrupt(zio); |
60101509 BB |
423 | } |
424 | } | |
425 | ||
d1d7e268 | 426 | return (rc); |
60101509 BB |
427 | } |
428 | ||
784a7fe5 | 429 | BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error) |
60101509 BB |
430 | { |
431 | dio_request_t *dr = bio->bi_private; | |
432 | int rc; | |
433 | ||
784a7fe5 LW |
434 | if (dr->dr_error == 0) { |
435 | #ifdef HAVE_1ARG_BIO_END_IO_T | |
36ba27e9 | 436 | dr->dr_error = BIO_END_IO_ERROR(bio); |
784a7fe5 LW |
437 | #else |
438 | if (error) | |
439 | dr->dr_error = -(error); | |
440 | else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
441 | dr->dr_error = EIO; | |
442 | #endif | |
443 | } | |
60101509 | 444 | |
b0be93e8 | 445 | /* Drop reference acquired by __vdev_disk_physio */ |
60101509 | 446 | rc = vdev_disk_dio_put(dr); |
60101509 BB |
447 | } |
448 | ||
60101509 BB |
449 | static unsigned int |
450 | bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size) | |
451 | { | |
452 | unsigned int offset, size, i; | |
453 | struct page *page; | |
454 | ||
455 | offset = offset_in_page(bio_ptr); | |
456 | for (i = 0; i < bio->bi_max_vecs; i++) { | |
457 | size = PAGE_SIZE - offset; | |
458 | ||
459 | if (bio_size <= 0) | |
460 | break; | |
461 | ||
462 | if (size > bio_size) | |
463 | size = bio_size; | |
464 | ||
71f8548e | 465 | if (is_vmalloc_addr(bio_ptr)) |
60101509 BB |
466 | page = vmalloc_to_page(bio_ptr); |
467 | else | |
468 | page = virt_to_page(bio_ptr); | |
469 | ||
17584980 CC |
470 | /* |
471 | * Some network related block device uses tcp_sendpage, which | |
472 | * doesn't behave well when using 0-count page, this is a | |
473 | * safety net to catch them. | |
474 | */ | |
475 | ASSERT3S(page_count(page), >, 0); | |
476 | ||
60101509 BB |
477 | if (bio_add_page(bio, page, size, offset) != size) |
478 | break; | |
479 | ||
480 | bio_ptr += size; | |
481 | bio_size -= size; | |
482 | offset = 0; | |
483 | } | |
484 | ||
d1d7e268 | 485 | return (bio_size); |
60101509 BB |
486 | } |
487 | ||
b0be93e8 IH |
488 | static unsigned int |
489 | bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off) | |
490 | { | |
491 | if (abd_is_linear(abd)) | |
492 | return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size)); | |
493 | ||
494 | return (abd_scatter_bio_map_off(bio, abd, size, off)); | |
495 | } | |
496 | ||
bbb1b6ce | 497 | static inline void |
3b86aeb2 | 498 | vdev_submit_bio_impl(struct bio *bio) |
bbb1b6ce BB |
499 | { |
500 | #ifdef HAVE_1ARG_SUBMIT_BIO | |
bbb1b6ce BB |
501 | submit_bio(bio); |
502 | #else | |
3b86aeb2 | 503 | submit_bio(0, bio); |
bbb1b6ce BB |
504 | #endif |
505 | } | |
506 | ||
787acae0 GDN |
507 | #ifndef HAVE_BIO_SET_DEV |
508 | static inline void | |
509 | bio_set_dev(struct bio *bio, struct block_device *bdev) | |
510 | { | |
511 | bio->bi_bdev = bdev; | |
512 | } | |
513 | #endif /* !HAVE_BIO_SET_DEV */ | |
514 | ||
37f9dac5 | 515 | static inline void |
3b86aeb2 | 516 | vdev_submit_bio(struct bio *bio) |
37f9dac5 RY |
517 | { |
518 | #ifdef HAVE_CURRENT_BIO_TAIL | |
519 | struct bio **bio_tail = current->bio_tail; | |
520 | current->bio_tail = NULL; | |
3b86aeb2 | 521 | vdev_submit_bio_impl(bio); |
37f9dac5 RY |
522 | current->bio_tail = bio_tail; |
523 | #else | |
524 | struct bio_list *bio_list = current->bio_list; | |
525 | current->bio_list = NULL; | |
3b86aeb2 | 526 | vdev_submit_bio_impl(bio); |
37f9dac5 RY |
527 | current->bio_list = bio_list; |
528 | #endif | |
529 | } | |
530 | ||
60101509 | 531 | static int |
b0be93e8 IH |
532 | __vdev_disk_physio(struct block_device *bdev, zio_t *zio, |
533 | size_t io_size, uint64_t io_offset, int rw, int flags) | |
60101509 | 534 | { |
d1d7e268 | 535 | dio_request_t *dr; |
b0be93e8 | 536 | uint64_t abd_offset; |
60101509 | 537 | uint64_t bio_offset; |
3b86aeb2 | 538 | int bio_size, bio_count = 16; |
f74fae8b | 539 | int i = 0, error = 0; |
e8ac4557 IH |
540 | #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG) |
541 | struct blk_plug plug; | |
542 | #endif | |
60101509 | 543 | |
b0be93e8 IH |
544 | ASSERT(zio != NULL); |
545 | ASSERT3U(io_offset + io_size, <=, bdev->bd_inode->i_size); | |
e06be586 | 546 | |
60101509 BB |
547 | retry: |
548 | dr = vdev_disk_dio_alloc(bio_count); | |
549 | if (dr == NULL) | |
ecb2b7dc | 550 | return (SET_ERROR(ENOMEM)); |
60101509 | 551 | |
2959d94a | 552 | if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) |
29b763cd | 553 | bio_set_flags_failfast(bdev, &flags); |
2959d94a | 554 | |
60101509 | 555 | dr->dr_zio = zio; |
60101509 | 556 | |
60101509 BB |
557 | /* |
558 | * When the IO size exceeds the maximum bio size for the request | |
559 | * queue we are forced to break the IO in multiple bio's and wait | |
560 | * for them all to complete. Ideally, all pool users will set | |
561 | * their volume block size to match the maximum request size and | |
562 | * the common case will be one bio per vdev IO request. | |
563 | */ | |
a6255b7f | 564 | |
b0be93e8 IH |
565 | abd_offset = 0; |
566 | bio_offset = io_offset; | |
567 | bio_size = io_size; | |
60101509 BB |
568 | for (i = 0; i <= dr->dr_bio_count; i++) { |
569 | ||
570 | /* Finished constructing bio's for given buffer */ | |
571 | if (bio_size <= 0) | |
572 | break; | |
573 | ||
574 | /* | |
575 | * By default only 'bio_count' bio's per dio are allowed. | |
576 | * However, if we find ourselves in a situation where more | |
577 | * are needed we allocate a larger dio and warn the user. | |
578 | */ | |
579 | if (dr->dr_bio_count == i) { | |
580 | vdev_disk_dio_free(dr); | |
581 | bio_count *= 2; | |
60101509 BB |
582 | goto retry; |
583 | } | |
584 | ||
29b763cd | 585 | /* bio_alloc() with __GFP_WAIT never returns NULL */ |
f1512ee6 | 586 | dr->dr_bio[i] = bio_alloc(GFP_NOIO, |
b0be93e8 | 587 | MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset), |
02730c33 | 588 | BIO_MAX_PAGES)); |
29b763cd | 589 | if (unlikely(dr->dr_bio[i] == NULL)) { |
60101509 | 590 | vdev_disk_dio_free(dr); |
ecb2b7dc | 591 | return (SET_ERROR(ENOMEM)); |
60101509 BB |
592 | } |
593 | ||
594 | /* Matching put called by vdev_disk_physio_completion */ | |
595 | vdev_disk_dio_get(dr); | |
596 | ||
787acae0 | 597 | bio_set_dev(dr->dr_bio[i], bdev); |
d4541210 | 598 | BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9; |
60101509 BB |
599 | dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion; |
600 | dr->dr_bio[i]->bi_private = dr; | |
3b86aeb2 | 601 | bio_set_op_attrs(dr->dr_bio[i], rw, flags); |
60101509 BB |
602 | |
603 | /* Remaining size is returned to become the new size */ | |
b0be93e8 | 604 | bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd, |
02730c33 | 605 | bio_size, abd_offset); |
60101509 BB |
606 | |
607 | /* Advance in buffer and construct another bio if needed */ | |
b0be93e8 | 608 | abd_offset += BIO_BI_SIZE(dr->dr_bio[i]); |
d4541210 | 609 | bio_offset += BIO_BI_SIZE(dr->dr_bio[i]); |
60101509 BB |
610 | } |
611 | ||
37f9dac5 | 612 | /* Extra reference to protect dio_request during vdev_submit_bio */ |
60101509 BB |
613 | vdev_disk_dio_get(dr); |
614 | ||
e8ac4557 IH |
615 | #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG) |
616 | if (dr->dr_bio_count > 1) | |
617 | blk_start_plug(&plug); | |
618 | #endif | |
619 | ||
60101509 BB |
620 | /* Submit all bio's associated with this dio */ |
621 | for (i = 0; i < dr->dr_bio_count; i++) | |
622 | if (dr->dr_bio[i]) | |
3b86aeb2 | 623 | vdev_submit_bio(dr->dr_bio[i]); |
60101509 | 624 | |
e8ac4557 IH |
625 | #if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG) |
626 | if (dr->dr_bio_count > 1) | |
627 | blk_finish_plug(&plug); | |
628 | #endif | |
629 | ||
d1d7e268 | 630 | (void) vdev_disk_dio_put(dr); |
60101509 | 631 | |
d1d7e268 | 632 | return (error); |
60101509 BB |
633 | } |
634 | ||
36ba27e9 | 635 | BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error) |
60101509 BB |
636 | { |
637 | zio_t *zio = bio->bi_private; | |
784a7fe5 | 638 | #ifdef HAVE_1ARG_BIO_END_IO_T |
36ba27e9 BB |
639 | zio->io_error = BIO_END_IO_ERROR(bio); |
640 | #else | |
641 | zio->io_error = -error; | |
784a7fe5 | 642 | #endif |
60101509 | 643 | |
36ba27e9 | 644 | if (zio->io_error && (zio->io_error == EOPNOTSUPP)) |
60101509 BB |
645 | zio->io_vd->vdev_nowritecache = B_TRUE; |
646 | ||
647 | bio_put(bio); | |
d148e951 BB |
648 | ASSERT3S(zio->io_error, >=, 0); |
649 | if (zio->io_error) | |
650 | vdev_disk_error(zio); | |
60101509 | 651 | zio_interrupt(zio); |
60101509 BB |
652 | } |
653 | ||
654 | static int | |
655 | vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) | |
656 | { | |
657 | struct request_queue *q; | |
658 | struct bio *bio; | |
659 | ||
660 | q = bdev_get_queue(bdev); | |
661 | if (!q) | |
ecb2b7dc | 662 | return (SET_ERROR(ENXIO)); |
60101509 | 663 | |
abc41ac7 | 664 | bio = bio_alloc(GFP_NOIO, 0); |
29b763cd IH |
665 | /* bio_alloc() with __GFP_WAIT never returns NULL */ |
666 | if (unlikely(bio == NULL)) | |
ecb2b7dc | 667 | return (SET_ERROR(ENOMEM)); |
60101509 BB |
668 | |
669 | bio->bi_end_io = vdev_disk_io_flush_completion; | |
670 | bio->bi_private = zio; | |
787acae0 | 671 | bio_set_dev(bio, bdev); |
a5e046ea | 672 | bio_set_flush(bio); |
3b86aeb2 | 673 | vdev_submit_bio(bio); |
cecb7487 | 674 | invalidate_bdev(bdev); |
60101509 | 675 | |
d1d7e268 | 676 | return (0); |
60101509 | 677 | } |
60101509 | 678 | |
98b25418 | 679 | static void |
60101509 BB |
680 | vdev_disk_io_start(zio_t *zio) |
681 | { | |
682 | vdev_t *v = zio->io_vd; | |
683 | vdev_disk_t *vd = v->vdev_tsd; | |
3b86aeb2 | 684 | int rw, flags, error; |
60101509 BB |
685 | |
686 | switch (zio->io_type) { | |
687 | case ZIO_TYPE_IOCTL: | |
688 | ||
689 | if (!vdev_readable(v)) { | |
2e528b49 | 690 | zio->io_error = SET_ERROR(ENXIO); |
98b25418 GW |
691 | zio_interrupt(zio); |
692 | return; | |
60101509 BB |
693 | } |
694 | ||
695 | switch (zio->io_cmd) { | |
696 | case DKIOCFLUSHWRITECACHE: | |
697 | ||
698 | if (zfs_nocacheflush) | |
699 | break; | |
700 | ||
701 | if (v->vdev_nowritecache) { | |
2e528b49 | 702 | zio->io_error = SET_ERROR(ENOTSUP); |
60101509 BB |
703 | break; |
704 | } | |
705 | ||
706 | error = vdev_disk_io_flush(vd->vd_bdev, zio); | |
707 | if (error == 0) | |
98b25418 | 708 | return; |
60101509 BB |
709 | |
710 | zio->io_error = error; | |
60101509 BB |
711 | |
712 | break; | |
713 | ||
714 | default: | |
2e528b49 | 715 | zio->io_error = SET_ERROR(ENOTSUP); |
60101509 BB |
716 | } |
717 | ||
98b25418 GW |
718 | zio_execute(zio); |
719 | return; | |
60101509 | 720 | case ZIO_TYPE_WRITE: |
3b86aeb2 | 721 | rw = WRITE; |
e6603b7c | 722 | #if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG) |
3b86aeb2 | 723 | flags = (1 << BIO_RW_UNPLUG); |
e6603b7c | 724 | #elif defined(REQ_UNPLUG) |
3b86aeb2 | 725 | flags = REQ_UNPLUG; |
e6603b7c | 726 | #else |
3b86aeb2 | 727 | flags = 0; |
e6603b7c | 728 | #endif |
60101509 BB |
729 | break; |
730 | ||
731 | case ZIO_TYPE_READ: | |
3b86aeb2 | 732 | rw = READ; |
e6603b7c | 733 | #if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG) |
3b86aeb2 | 734 | flags = (1 << BIO_RW_UNPLUG); |
e6603b7c | 735 | #elif defined(REQ_UNPLUG) |
3b86aeb2 | 736 | flags = REQ_UNPLUG; |
e6603b7c | 737 | #else |
3b86aeb2 | 738 | flags = 0; |
e6603b7c | 739 | #endif |
60101509 BB |
740 | break; |
741 | ||
742 | default: | |
2e528b49 | 743 | zio->io_error = SET_ERROR(ENOTSUP); |
98b25418 GW |
744 | zio_interrupt(zio); |
745 | return; | |
60101509 BB |
746 | } |
747 | ||
26ef0cc7 | 748 | zio->io_target_timestamp = zio_handle_io_delay(zio); |
b0be93e8 | 749 | error = __vdev_disk_physio(vd->vd_bdev, zio, |
3b86aeb2 | 750 | zio->io_size, zio->io_offset, rw, flags); |
60101509 BB |
751 | if (error) { |
752 | zio->io_error = error; | |
98b25418 GW |
753 | zio_interrupt(zio); |
754 | return; | |
60101509 | 755 | } |
60101509 BB |
756 | } |
757 | ||
758 | static void | |
759 | vdev_disk_io_done(zio_t *zio) | |
760 | { | |
761 | /* | |
762 | * If the device returned EIO, we revalidate the media. If it is | |
763 | * determined the media has changed this triggers the asynchronous | |
764 | * removal of the device from the configuration. | |
765 | */ | |
766 | if (zio->io_error == EIO) { | |
d1d7e268 | 767 | vdev_t *v = zio->io_vd; |
60101509 BB |
768 | vdev_disk_t *vd = v->vdev_tsd; |
769 | ||
770 | if (check_disk_change(vd->vd_bdev)) { | |
771 | vdev_bdev_invalidate(vd->vd_bdev); | |
772 | v->vdev_remove_wanted = B_TRUE; | |
773 | spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); | |
774 | } | |
775 | } | |
776 | } | |
777 | ||
778 | static void | |
779 | vdev_disk_hold(vdev_t *vd) | |
780 | { | |
781 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
782 | ||
783 | /* We must have a pathname, and it must be absolute. */ | |
784 | if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') | |
785 | return; | |
786 | ||
787 | /* | |
788 | * Only prefetch path and devid info if the device has | |
789 | * never been opened. | |
790 | */ | |
791 | if (vd->vdev_tsd != NULL) | |
792 | return; | |
793 | ||
794 | /* XXX: Implement me as a vnode lookup for the device */ | |
795 | vd->vdev_name_vp = NULL; | |
796 | vd->vdev_devid_vp = NULL; | |
797 | } | |
798 | ||
799 | static void | |
800 | vdev_disk_rele(vdev_t *vd) | |
801 | { | |
802 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
803 | ||
804 | /* XXX: Implement me as a vnode rele for the device */ | |
805 | } | |
806 | ||
e771de53 BB |
807 | static int |
808 | param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp) | |
809 | { | |
810 | spa_t *spa = NULL; | |
811 | char *p; | |
812 | ||
813 | if (val == NULL) | |
814 | return (SET_ERROR(-EINVAL)); | |
815 | ||
816 | if ((p = strchr(val, '\n')) != NULL) | |
817 | *p = '\0'; | |
818 | ||
819 | mutex_enter(&spa_namespace_lock); | |
820 | while ((spa = spa_next(spa)) != NULL) { | |
821 | if (spa_state(spa) != POOL_STATE_ACTIVE || | |
822 | !spa_writeable(spa) || spa_suspended(spa)) | |
823 | continue; | |
824 | ||
825 | spa_open_ref(spa, FTAG); | |
826 | mutex_exit(&spa_namespace_lock); | |
827 | vdev_elevator_switch(spa->spa_root_vdev, (char *)val); | |
828 | mutex_enter(&spa_namespace_lock); | |
829 | spa_close(spa, FTAG); | |
830 | } | |
831 | mutex_exit(&spa_namespace_lock); | |
832 | ||
833 | return (param_set_charp(val, kp)); | |
834 | } | |
835 | ||
60101509 BB |
836 | vdev_ops_t vdev_disk_ops = { |
837 | vdev_disk_open, | |
838 | vdev_disk_close, | |
839 | vdev_default_asize, | |
840 | vdev_disk_io_start, | |
841 | vdev_disk_io_done, | |
842 | NULL, | |
3d6da72d | 843 | NULL, |
60101509 BB |
844 | vdev_disk_hold, |
845 | vdev_disk_rele, | |
a1d477c2 | 846 | NULL, |
60101509 BB |
847 | VDEV_TYPE_DISK, /* name of this vdev type */ |
848 | B_TRUE /* leaf vdev */ | |
849 | }; | |
850 | ||
e771de53 BB |
851 | module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler, |
852 | param_get_charp, &zfs_vdev_scheduler, 0644); | |
c409e464 | 853 | MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler"); |