]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC. | |
23 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
24 | * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>. | |
25 | * LLNL-CODE-403049. | |
26 | * Copyright (c) 2013 by Delphix. All rights reserved. | |
27 | */ | |
28 | ||
29 | #include <sys/zfs_context.h> | |
30 | #include <sys/spa.h> | |
31 | #include <sys/vdev_disk.h> | |
32 | #include <sys/vdev_impl.h> | |
33 | #include <sys/fs/zfs.h> | |
34 | #include <sys/zio.h> | |
35 | #include <sys/sunldi.h> | |
36 | ||
37 | char *zfs_vdev_scheduler = VDEV_SCHEDULER; | |
38 | static void *zfs_vdev_holder = VDEV_HOLDER; | |
39 | ||
40 | /* | |
41 | * Virtual device vector for disks. | |
42 | */ | |
43 | typedef struct dio_request { | |
44 | struct completion dr_comp; /* Completion for sync IO */ | |
45 | atomic_t dr_ref; /* References */ | |
46 | zio_t *dr_zio; /* Parent ZIO */ | |
47 | int dr_rw; /* Read/Write */ | |
48 | int dr_error; /* Bio error */ | |
49 | int dr_bio_count; /* Count of bio's */ | |
50 | struct bio *dr_bio[0]; /* Attached bio's */ | |
51 | } dio_request_t; | |
52 | ||
53 | ||
54 | #ifdef HAVE_OPEN_BDEV_EXCLUSIVE | |
55 | static fmode_t | |
56 | vdev_bdev_mode(int smode) | |
57 | { | |
58 | fmode_t mode = 0; | |
59 | ||
60 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
61 | ||
62 | if (smode & FREAD) | |
63 | mode |= FMODE_READ; | |
64 | ||
65 | if (smode & FWRITE) | |
66 | mode |= FMODE_WRITE; | |
67 | ||
68 | return mode; | |
69 | } | |
70 | #else | |
71 | static int | |
72 | vdev_bdev_mode(int smode) | |
73 | { | |
74 | int mode = 0; | |
75 | ||
76 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
77 | ||
78 | if ((smode & FREAD) && !(smode & FWRITE)) | |
79 | mode = MS_RDONLY; | |
80 | ||
81 | return mode; | |
82 | } | |
83 | #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */ | |
84 | ||
85 | static uint64_t | |
86 | bdev_capacity(struct block_device *bdev) | |
87 | { | |
88 | struct hd_struct *part = bdev->bd_part; | |
89 | ||
90 | /* The partition capacity referenced by the block device */ | |
91 | if (part) | |
92 | return (part->nr_sects << 9); | |
93 | ||
94 | /* Otherwise assume the full device capacity */ | |
95 | return (get_capacity(bdev->bd_disk) << 9); | |
96 | } | |
97 | ||
98 | static void | |
99 | vdev_disk_error(zio_t *zio) | |
100 | { | |
101 | #ifdef ZFS_DEBUG | |
102 | printk("ZFS: zio error=%d type=%d offset=%llu size=%llu " | |
103 | "flags=%x delay=%llu\n", zio->io_error, zio->io_type, | |
104 | (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size, | |
105 | zio->io_flags, (u_longlong_t)zio->io_delay); | |
106 | #endif | |
107 | } | |
108 | ||
109 | /* | |
110 | * Use the Linux 'noop' elevator for zfs managed block devices. This | |
111 | * strikes the ideal balance by allowing the zfs elevator to do all | |
112 | * request ordering and prioritization. While allowing the Linux | |
113 | * elevator to do the maximum front/back merging allowed by the | |
114 | * physical device. This yields the largest possible requests for | |
115 | * the device with the lowest total overhead. | |
116 | */ | |
117 | static int | |
118 | vdev_elevator_switch(vdev_t *v, char *elevator) | |
119 | { | |
120 | vdev_disk_t *vd = v->vdev_tsd; | |
121 | struct block_device *bdev = vd->vd_bdev; | |
122 | struct request_queue *q = bdev_get_queue(bdev); | |
123 | char *device = bdev->bd_disk->disk_name; | |
124 | int error; | |
125 | ||
126 | /* | |
127 | * Skip devices which are not whole disks (partitions). | |
128 | * Device-mapper devices are excepted since they may be whole | |
129 | * disks despite the vdev_wholedisk flag, in which case we can | |
130 | * and should switch the elevator. If the device-mapper device | |
131 | * does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the | |
132 | * "Skip devices without schedulers" check below will fail. | |
133 | */ | |
134 | if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0) | |
135 | return (0); | |
136 | ||
137 | /* Skip devices without schedulers (loop, ram, dm, etc) */ | |
138 | if (!q->elevator || !blk_queue_stackable(q)) | |
139 | return (0); | |
140 | ||
141 | /* Leave existing scheduler when set to "none" */ | |
142 | if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4)) | |
143 | return (0); | |
144 | ||
145 | #ifdef HAVE_ELEVATOR_CHANGE | |
146 | error = elevator_change(q, elevator); | |
147 | #else | |
148 | /* For pre-2.6.36 kernels elevator_change() is not available. | |
149 | * Therefore we fall back to using a usermodehelper to echo the | |
150 | * elevator into sysfs; This requires /bin/echo and sysfs to be | |
151 | * mounted which may not be true early in the boot process. | |
152 | */ | |
153 | # define SET_SCHEDULER_CMD \ | |
154 | "exec 0</dev/null " \ | |
155 | " 1>/sys/block/%s/queue/scheduler " \ | |
156 | " 2>/dev/null; " \ | |
157 | "echo %s" | |
158 | ||
159 | { | |
160 | char *argv[] = { "/bin/sh", "-c", NULL, NULL }; | |
161 | char *envp[] = { NULL }; | |
162 | ||
163 | argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator); | |
164 | error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); | |
165 | strfree(argv[2]); | |
166 | } | |
167 | #endif /* HAVE_ELEVATOR_CHANGE */ | |
168 | if (error) | |
169 | printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n", | |
170 | elevator, v->vdev_path, device, error); | |
171 | ||
172 | return (error); | |
173 | } | |
174 | ||
175 | /* | |
176 | * Expanding a whole disk vdev involves invoking BLKRRPART on the | |
177 | * whole disk device. This poses a problem, because BLKRRPART will | |
178 | * return EBUSY if one of the disk's partitions is open. That's why | |
179 | * we have to do it here, just before opening the data partition. | |
180 | * Unfortunately, BLKRRPART works by dropping all partitions and | |
181 | * recreating them, which means that for a short time window, all | |
182 | * /dev/sdxN device files disappear (until udev recreates them). | |
183 | * This means two things: | |
184 | * - When we open the data partition just after a BLKRRPART, we | |
185 | * can't do it using the normal device file path because of the | |
186 | * obvious race condition with udev. Instead, we use reliable | |
187 | * kernel APIs to get a handle to the new partition device from | |
188 | * the whole disk device. | |
189 | * - Because vdev_disk_open() initially needs to find the device | |
190 | * using its path, multiple vdev_disk_open() invocations in | |
191 | * short succession on the same disk with BLKRRPARTs in the | |
192 | * middle have a high probability of failure (because of the | |
193 | * race condition with udev). A typical situation where this | |
194 | * might happen is when the zpool userspace tool does a | |
195 | * TRYIMPORT immediately followed by an IMPORT. For this | |
196 | * reason, we only invoke BLKRRPART in the module when strictly | |
197 | * necessary (zpool online -e case), and rely on userspace to | |
198 | * do it when possible. | |
199 | */ | |
200 | static struct block_device * | |
201 | vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd) | |
202 | { | |
203 | #if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) | |
204 | struct block_device *bdev, *result = ERR_PTR(-ENXIO); | |
205 | struct gendisk *disk; | |
206 | int error, partno; | |
207 | ||
208 | bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder); | |
209 | if (IS_ERR(bdev)) | |
210 | return bdev; | |
211 | ||
212 | disk = get_gendisk(bdev->bd_dev, &partno); | |
213 | vdev_bdev_close(bdev, vdev_bdev_mode(mode)); | |
214 | ||
215 | if (disk) { | |
216 | bdev = bdget(disk_devt(disk)); | |
217 | if (bdev) { | |
218 | error = blkdev_get(bdev, vdev_bdev_mode(mode), vd); | |
219 | if (error == 0) | |
220 | error = ioctl_by_bdev(bdev, BLKRRPART, 0); | |
221 | vdev_bdev_close(bdev, vdev_bdev_mode(mode)); | |
222 | } | |
223 | ||
224 | bdev = bdget_disk(disk, partno); | |
225 | if (bdev) { | |
226 | error = blkdev_get(bdev, | |
227 | vdev_bdev_mode(mode) | FMODE_EXCL, vd); | |
228 | if (error == 0) | |
229 | result = bdev; | |
230 | } | |
231 | put_disk(disk); | |
232 | } | |
233 | ||
234 | return result; | |
235 | #else | |
236 | return ERR_PTR(-EOPNOTSUPP); | |
237 | #endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */ | |
238 | } | |
239 | ||
240 | static int | |
241 | vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize, | |
242 | uint64_t *ashift) | |
243 | { | |
244 | struct block_device *bdev = ERR_PTR(-ENXIO); | |
245 | vdev_disk_t *vd; | |
246 | int mode, block_size; | |
247 | ||
248 | /* Must have a pathname and it must be absolute. */ | |
249 | if (v->vdev_path == NULL || v->vdev_path[0] != '/') { | |
250 | v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; | |
251 | return EINVAL; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Reopen the device if it's not currently open. Otherwise, | |
256 | * just update the physical size of the device. | |
257 | */ | |
258 | if (v->vdev_tsd != NULL) { | |
259 | ASSERT(v->vdev_reopening); | |
260 | vd = v->vdev_tsd; | |
261 | goto skip_open; | |
262 | } | |
263 | ||
264 | vd = kmem_zalloc(sizeof(vdev_disk_t), KM_PUSHPAGE); | |
265 | if (vd == NULL) | |
266 | return ENOMEM; | |
267 | ||
268 | /* | |
269 | * Devices are always opened by the path provided at configuration | |
270 | * time. This means that if the provided path is a udev by-id path | |
271 | * then drives may be recabled without an issue. If the provided | |
272 | * path is a udev by-path path, then the physical location information | |
273 | * will be preserved. This can be critical for more complicated | |
274 | * configurations where drives are located in specific physical | |
275 | * locations to maximize the systems tolerence to component failure. | |
276 | * Alternatively, you can provide your own udev rule to flexibly map | |
277 | * the drives as you see fit. It is not advised that you use the | |
278 | * /dev/[hd]d devices which may be reordered due to probing order. | |
279 | * Devices in the wrong locations will be detected by the higher | |
280 | * level vdev validation. | |
281 | */ | |
282 | mode = spa_mode(v->vdev_spa); | |
283 | if (v->vdev_wholedisk && v->vdev_expanding) | |
284 | bdev = vdev_disk_rrpart(v->vdev_path, mode, vd); | |
285 | if (IS_ERR(bdev)) | |
286 | bdev = vdev_bdev_open(v->vdev_path, | |
287 | vdev_bdev_mode(mode), zfs_vdev_holder); | |
288 | if (IS_ERR(bdev)) { | |
289 | kmem_free(vd, sizeof(vdev_disk_t)); | |
290 | return -PTR_ERR(bdev); | |
291 | } | |
292 | ||
293 | v->vdev_tsd = vd; | |
294 | vd->vd_bdev = bdev; | |
295 | ||
296 | skip_open: | |
297 | /* Determine the physical block size */ | |
298 | block_size = vdev_bdev_block_size(vd->vd_bdev); | |
299 | ||
300 | /* Clear the nowritecache bit, causes vdev_reopen() to try again. */ | |
301 | v->vdev_nowritecache = B_FALSE; | |
302 | ||
303 | /* Physical volume size in bytes */ | |
304 | *psize = bdev_capacity(vd->vd_bdev); | |
305 | ||
306 | /* TODO: report possible expansion size */ | |
307 | *max_psize = *psize; | |
308 | ||
309 | /* Based on the minimum sector size set the block size */ | |
310 | *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1; | |
311 | ||
312 | /* Try to set the io scheduler elevator algorithm */ | |
313 | (void) vdev_elevator_switch(v, zfs_vdev_scheduler); | |
314 | ||
315 | return 0; | |
316 | } | |
317 | ||
318 | static void | |
319 | vdev_disk_close(vdev_t *v) | |
320 | { | |
321 | vdev_disk_t *vd = v->vdev_tsd; | |
322 | ||
323 | if (v->vdev_reopening || vd == NULL) | |
324 | return; | |
325 | ||
326 | if (vd->vd_bdev != NULL) | |
327 | vdev_bdev_close(vd->vd_bdev, | |
328 | vdev_bdev_mode(spa_mode(v->vdev_spa))); | |
329 | ||
330 | kmem_free(vd, sizeof(vdev_disk_t)); | |
331 | v->vdev_tsd = NULL; | |
332 | } | |
333 | ||
334 | static dio_request_t * | |
335 | vdev_disk_dio_alloc(int bio_count) | |
336 | { | |
337 | dio_request_t *dr; | |
338 | int i; | |
339 | ||
340 | dr = kmem_zalloc(sizeof(dio_request_t) + | |
341 | sizeof(struct bio *) * bio_count, KM_PUSHPAGE); | |
342 | if (dr) { | |
343 | init_completion(&dr->dr_comp); | |
344 | atomic_set(&dr->dr_ref, 0); | |
345 | dr->dr_bio_count = bio_count; | |
346 | dr->dr_error = 0; | |
347 | ||
348 | for (i = 0; i < dr->dr_bio_count; i++) | |
349 | dr->dr_bio[i] = NULL; | |
350 | } | |
351 | ||
352 | return dr; | |
353 | } | |
354 | ||
355 | static void | |
356 | vdev_disk_dio_free(dio_request_t *dr) | |
357 | { | |
358 | int i; | |
359 | ||
360 | for (i = 0; i < dr->dr_bio_count; i++) | |
361 | if (dr->dr_bio[i]) | |
362 | bio_put(dr->dr_bio[i]); | |
363 | ||
364 | kmem_free(dr, sizeof(dio_request_t) + | |
365 | sizeof(struct bio *) * dr->dr_bio_count); | |
366 | } | |
367 | ||
368 | static int | |
369 | vdev_disk_dio_is_sync(dio_request_t *dr) | |
370 | { | |
371 | #ifdef HAVE_BIO_RW_SYNC | |
372 | /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */ | |
373 | return (dr->dr_rw & (1 << BIO_RW_SYNC)); | |
374 | #else | |
375 | # ifdef HAVE_BIO_RW_SYNCIO | |
376 | /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */ | |
377 | return (dr->dr_rw & (1 << BIO_RW_SYNCIO)); | |
378 | # else | |
379 | # ifdef HAVE_REQ_SYNC | |
380 | /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */ | |
381 | return (dr->dr_rw & REQ_SYNC); | |
382 | # else | |
383 | # error "Unable to determine bio sync flag" | |
384 | # endif /* HAVE_REQ_SYNC */ | |
385 | # endif /* HAVE_BIO_RW_SYNC */ | |
386 | #endif /* HAVE_BIO_RW_SYNCIO */ | |
387 | } | |
388 | ||
389 | static void | |
390 | vdev_disk_dio_get(dio_request_t *dr) | |
391 | { | |
392 | atomic_inc(&dr->dr_ref); | |
393 | } | |
394 | ||
395 | static int | |
396 | vdev_disk_dio_put(dio_request_t *dr) | |
397 | { | |
398 | int rc = atomic_dec_return(&dr->dr_ref); | |
399 | ||
400 | /* | |
401 | * Free the dio_request when the last reference is dropped and | |
402 | * ensure zio_interpret is called only once with the correct zio | |
403 | */ | |
404 | if (rc == 0) { | |
405 | zio_t *zio = dr->dr_zio; | |
406 | int error = dr->dr_error; | |
407 | ||
408 | vdev_disk_dio_free(dr); | |
409 | ||
410 | if (zio) { | |
411 | zio->io_delay = jiffies_64 - zio->io_delay; | |
412 | zio->io_error = error; | |
413 | ASSERT3S(zio->io_error, >=, 0); | |
414 | if (zio->io_error) | |
415 | vdev_disk_error(zio); | |
416 | zio_interrupt(zio); | |
417 | } | |
418 | } | |
419 | ||
420 | return rc; | |
421 | } | |
422 | ||
423 | BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error) | |
424 | { | |
425 | dio_request_t *dr = bio->bi_private; | |
426 | int rc; | |
427 | ||
428 | /* Fatal error but print some useful debugging before asserting */ | |
429 | if (dr == NULL) | |
430 | PANIC("dr == NULL, bio->bi_private == NULL\n" | |
431 | "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n" | |
432 | "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n", | |
433 | bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt, | |
434 | bio->bi_idx, bio->bi_size, bio->bi_end_io, | |
435 | atomic_read(&bio->bi_cnt)); | |
436 | ||
437 | #ifndef HAVE_2ARGS_BIO_END_IO_T | |
438 | if (bio->bi_size) | |
439 | return 1; | |
440 | #endif /* HAVE_2ARGS_BIO_END_IO_T */ | |
441 | ||
442 | if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
443 | error = -EIO; | |
444 | ||
445 | if (dr->dr_error == 0) | |
446 | dr->dr_error = -error; | |
447 | ||
448 | /* Drop reference aquired by __vdev_disk_physio */ | |
449 | rc = vdev_disk_dio_put(dr); | |
450 | ||
451 | /* Wake up synchronous waiter this is the last outstanding bio */ | |
452 | if ((rc == 1) && vdev_disk_dio_is_sync(dr)) | |
453 | complete(&dr->dr_comp); | |
454 | ||
455 | BIO_END_IO_RETURN(0); | |
456 | } | |
457 | ||
458 | static inline unsigned long | |
459 | bio_nr_pages(void *bio_ptr, unsigned int bio_size) | |
460 | { | |
461 | return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >> | |
462 | PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT)); | |
463 | } | |
464 | ||
465 | static unsigned int | |
466 | bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size) | |
467 | { | |
468 | unsigned int offset, size, i; | |
469 | struct page *page; | |
470 | ||
471 | offset = offset_in_page(bio_ptr); | |
472 | for (i = 0; i < bio->bi_max_vecs; i++) { | |
473 | size = PAGE_SIZE - offset; | |
474 | ||
475 | if (bio_size <= 0) | |
476 | break; | |
477 | ||
478 | if (size > bio_size) | |
479 | size = bio_size; | |
480 | ||
481 | if (kmem_virt(bio_ptr)) | |
482 | page = vmalloc_to_page(bio_ptr); | |
483 | else | |
484 | page = virt_to_page(bio_ptr); | |
485 | ||
486 | if (bio_add_page(bio, page, size, offset) != size) | |
487 | break; | |
488 | ||
489 | bio_ptr += size; | |
490 | bio_size -= size; | |
491 | offset = 0; | |
492 | } | |
493 | ||
494 | return bio_size; | |
495 | } | |
496 | ||
497 | static int | |
498 | __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr, | |
499 | size_t kbuf_size, uint64_t kbuf_offset, int flags) | |
500 | { | |
501 | dio_request_t *dr; | |
502 | caddr_t bio_ptr; | |
503 | uint64_t bio_offset; | |
504 | int bio_size, bio_count = 16; | |
505 | int i = 0, error = 0; | |
506 | ||
507 | ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size); | |
508 | ||
509 | retry: | |
510 | dr = vdev_disk_dio_alloc(bio_count); | |
511 | if (dr == NULL) | |
512 | return ENOMEM; | |
513 | ||
514 | if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) | |
515 | bio_set_flags_failfast(bdev, &flags); | |
516 | ||
517 | dr->dr_zio = zio; | |
518 | dr->dr_rw = flags; | |
519 | ||
520 | /* | |
521 | * When the IO size exceeds the maximum bio size for the request | |
522 | * queue we are forced to break the IO in multiple bio's and wait | |
523 | * for them all to complete. Ideally, all pool users will set | |
524 | * their volume block size to match the maximum request size and | |
525 | * the common case will be one bio per vdev IO request. | |
526 | */ | |
527 | bio_ptr = kbuf_ptr; | |
528 | bio_offset = kbuf_offset; | |
529 | bio_size = kbuf_size; | |
530 | for (i = 0; i <= dr->dr_bio_count; i++) { | |
531 | ||
532 | /* Finished constructing bio's for given buffer */ | |
533 | if (bio_size <= 0) | |
534 | break; | |
535 | ||
536 | /* | |
537 | * By default only 'bio_count' bio's per dio are allowed. | |
538 | * However, if we find ourselves in a situation where more | |
539 | * are needed we allocate a larger dio and warn the user. | |
540 | */ | |
541 | if (dr->dr_bio_count == i) { | |
542 | vdev_disk_dio_free(dr); | |
543 | bio_count *= 2; | |
544 | goto retry; | |
545 | } | |
546 | ||
547 | dr->dr_bio[i] = bio_alloc(GFP_NOIO, | |
548 | bio_nr_pages(bio_ptr, bio_size)); | |
549 | if (dr->dr_bio[i] == NULL) { | |
550 | vdev_disk_dio_free(dr); | |
551 | return ENOMEM; | |
552 | } | |
553 | ||
554 | /* Matching put called by vdev_disk_physio_completion */ | |
555 | vdev_disk_dio_get(dr); | |
556 | ||
557 | dr->dr_bio[i]->bi_bdev = bdev; | |
558 | dr->dr_bio[i]->bi_sector = bio_offset >> 9; | |
559 | dr->dr_bio[i]->bi_rw = dr->dr_rw; | |
560 | dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion; | |
561 | dr->dr_bio[i]->bi_private = dr; | |
562 | ||
563 | /* Remaining size is returned to become the new size */ | |
564 | bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size); | |
565 | ||
566 | /* Advance in buffer and construct another bio if needed */ | |
567 | bio_ptr += dr->dr_bio[i]->bi_size; | |
568 | bio_offset += dr->dr_bio[i]->bi_size; | |
569 | } | |
570 | ||
571 | /* Extra reference to protect dio_request during submit_bio */ | |
572 | vdev_disk_dio_get(dr); | |
573 | if (zio) | |
574 | zio->io_delay = jiffies_64; | |
575 | ||
576 | /* Submit all bio's associated with this dio */ | |
577 | for (i = 0; i < dr->dr_bio_count; i++) | |
578 | if (dr->dr_bio[i]) | |
579 | submit_bio(dr->dr_rw, dr->dr_bio[i]); | |
580 | ||
581 | /* | |
582 | * On synchronous blocking requests we wait for all bio the completion | |
583 | * callbacks to run. We will be woken when the last callback runs | |
584 | * for this dio. We are responsible for putting the last dio_request | |
585 | * reference will in turn put back the last bio references. The | |
586 | * only synchronous consumer is vdev_disk_read_rootlabel() all other | |
587 | * IO originating from vdev_disk_io_start() is asynchronous. | |
588 | */ | |
589 | if (vdev_disk_dio_is_sync(dr)) { | |
590 | wait_for_completion(&dr->dr_comp); | |
591 | error = dr->dr_error; | |
592 | ASSERT3S(atomic_read(&dr->dr_ref), ==, 1); | |
593 | } | |
594 | ||
595 | (void)vdev_disk_dio_put(dr); | |
596 | ||
597 | return error; | |
598 | } | |
599 | ||
600 | int | |
601 | vdev_disk_physio(struct block_device *bdev, caddr_t kbuf, | |
602 | size_t size, uint64_t offset, int flags) | |
603 | { | |
604 | bio_set_flags_failfast(bdev, &flags); | |
605 | return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags); | |
606 | } | |
607 | ||
608 | BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc) | |
609 | { | |
610 | zio_t *zio = bio->bi_private; | |
611 | ||
612 | zio->io_delay = jiffies_64 - zio->io_delay; | |
613 | zio->io_error = -rc; | |
614 | if (rc && (rc == -EOPNOTSUPP)) | |
615 | zio->io_vd->vdev_nowritecache = B_TRUE; | |
616 | ||
617 | bio_put(bio); | |
618 | ASSERT3S(zio->io_error, >=, 0); | |
619 | if (zio->io_error) | |
620 | vdev_disk_error(zio); | |
621 | zio_interrupt(zio); | |
622 | ||
623 | BIO_END_IO_RETURN(0); | |
624 | } | |
625 | ||
626 | static int | |
627 | vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) | |
628 | { | |
629 | struct request_queue *q; | |
630 | struct bio *bio; | |
631 | ||
632 | q = bdev_get_queue(bdev); | |
633 | if (!q) | |
634 | return ENXIO; | |
635 | ||
636 | bio = bio_alloc(GFP_NOIO, 0); | |
637 | if (!bio) | |
638 | return ENOMEM; | |
639 | ||
640 | bio->bi_end_io = vdev_disk_io_flush_completion; | |
641 | bio->bi_private = zio; | |
642 | bio->bi_bdev = bdev; | |
643 | zio->io_delay = jiffies_64; | |
644 | submit_bio(VDEV_WRITE_FLUSH_FUA, bio); | |
645 | ||
646 | return 0; | |
647 | } | |
648 | ||
649 | static int | |
650 | vdev_disk_io_start(zio_t *zio) | |
651 | { | |
652 | vdev_t *v = zio->io_vd; | |
653 | vdev_disk_t *vd = v->vdev_tsd; | |
654 | int flags, error; | |
655 | ||
656 | switch (zio->io_type) { | |
657 | case ZIO_TYPE_IOCTL: | |
658 | ||
659 | if (!vdev_readable(v)) { | |
660 | zio->io_error = SET_ERROR(ENXIO); | |
661 | return ZIO_PIPELINE_CONTINUE; | |
662 | } | |
663 | ||
664 | switch (zio->io_cmd) { | |
665 | case DKIOCFLUSHWRITECACHE: | |
666 | ||
667 | if (zfs_nocacheflush) | |
668 | break; | |
669 | ||
670 | if (v->vdev_nowritecache) { | |
671 | zio->io_error = SET_ERROR(ENOTSUP); | |
672 | break; | |
673 | } | |
674 | ||
675 | error = vdev_disk_io_flush(vd->vd_bdev, zio); | |
676 | if (error == 0) | |
677 | return ZIO_PIPELINE_STOP; | |
678 | ||
679 | zio->io_error = error; | |
680 | if (error == ENOTSUP) | |
681 | v->vdev_nowritecache = B_TRUE; | |
682 | ||
683 | break; | |
684 | ||
685 | default: | |
686 | zio->io_error = SET_ERROR(ENOTSUP); | |
687 | } | |
688 | ||
689 | return ZIO_PIPELINE_CONTINUE; | |
690 | ||
691 | case ZIO_TYPE_WRITE: | |
692 | flags = WRITE; | |
693 | break; | |
694 | ||
695 | case ZIO_TYPE_READ: | |
696 | flags = READ; | |
697 | break; | |
698 | ||
699 | default: | |
700 | zio->io_error = SET_ERROR(ENOTSUP); | |
701 | return ZIO_PIPELINE_CONTINUE; | |
702 | } | |
703 | ||
704 | error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data, | |
705 | zio->io_size, zio->io_offset, flags); | |
706 | if (error) { | |
707 | zio->io_error = error; | |
708 | return ZIO_PIPELINE_CONTINUE; | |
709 | } | |
710 | ||
711 | return ZIO_PIPELINE_STOP; | |
712 | } | |
713 | ||
714 | static void | |
715 | vdev_disk_io_done(zio_t *zio) | |
716 | { | |
717 | /* | |
718 | * If the device returned EIO, we revalidate the media. If it is | |
719 | * determined the media has changed this triggers the asynchronous | |
720 | * removal of the device from the configuration. | |
721 | */ | |
722 | if (zio->io_error == EIO) { | |
723 | vdev_t *v = zio->io_vd; | |
724 | vdev_disk_t *vd = v->vdev_tsd; | |
725 | ||
726 | if (check_disk_change(vd->vd_bdev)) { | |
727 | vdev_bdev_invalidate(vd->vd_bdev); | |
728 | v->vdev_remove_wanted = B_TRUE; | |
729 | spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); | |
730 | } | |
731 | } | |
732 | } | |
733 | ||
734 | static void | |
735 | vdev_disk_hold(vdev_t *vd) | |
736 | { | |
737 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
738 | ||
739 | /* We must have a pathname, and it must be absolute. */ | |
740 | if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') | |
741 | return; | |
742 | ||
743 | /* | |
744 | * Only prefetch path and devid info if the device has | |
745 | * never been opened. | |
746 | */ | |
747 | if (vd->vdev_tsd != NULL) | |
748 | return; | |
749 | ||
750 | /* XXX: Implement me as a vnode lookup for the device */ | |
751 | vd->vdev_name_vp = NULL; | |
752 | vd->vdev_devid_vp = NULL; | |
753 | } | |
754 | ||
755 | static void | |
756 | vdev_disk_rele(vdev_t *vd) | |
757 | { | |
758 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
759 | ||
760 | /* XXX: Implement me as a vnode rele for the device */ | |
761 | } | |
762 | ||
763 | vdev_ops_t vdev_disk_ops = { | |
764 | vdev_disk_open, | |
765 | vdev_disk_close, | |
766 | vdev_default_asize, | |
767 | vdev_disk_io_start, | |
768 | vdev_disk_io_done, | |
769 | NULL, | |
770 | vdev_disk_hold, | |
771 | vdev_disk_rele, | |
772 | VDEV_TYPE_DISK, /* name of this vdev type */ | |
773 | B_TRUE /* leaf vdev */ | |
774 | }; | |
775 | ||
776 | /* | |
777 | * Given the root disk device devid or pathname, read the label from | |
778 | * the device, and construct a configuration nvlist. | |
779 | */ | |
780 | int | |
781 | vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) | |
782 | { | |
783 | struct block_device *bdev; | |
784 | vdev_label_t *label; | |
785 | uint64_t s, size; | |
786 | int i; | |
787 | ||
788 | bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), zfs_vdev_holder); | |
789 | if (IS_ERR(bdev)) | |
790 | return -PTR_ERR(bdev); | |
791 | ||
792 | s = bdev_capacity(bdev); | |
793 | if (s == 0) { | |
794 | vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); | |
795 | return EIO; | |
796 | } | |
797 | ||
798 | size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t); | |
799 | label = vmem_alloc(sizeof(vdev_label_t), KM_PUSHPAGE); | |
800 | ||
801 | for (i = 0; i < VDEV_LABELS; i++) { | |
802 | uint64_t offset, state, txg = 0; | |
803 | ||
804 | /* read vdev label */ | |
805 | offset = vdev_label_offset(size, i, 0); | |
806 | if (vdev_disk_physio(bdev, (caddr_t)label, | |
807 | VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0) | |
808 | continue; | |
809 | ||
810 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
811 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { | |
812 | *config = NULL; | |
813 | continue; | |
814 | } | |
815 | ||
816 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
817 | &state) != 0 || state >= POOL_STATE_DESTROYED) { | |
818 | nvlist_free(*config); | |
819 | *config = NULL; | |
820 | continue; | |
821 | } | |
822 | ||
823 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
824 | &txg) != 0 || txg == 0) { | |
825 | nvlist_free(*config); | |
826 | *config = NULL; | |
827 | continue; | |
828 | } | |
829 | ||
830 | break; | |
831 | } | |
832 | ||
833 | vmem_free(label, sizeof(vdev_label_t)); | |
834 | vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); | |
835 | ||
836 | return 0; | |
837 | } | |
838 | ||
839 | module_param(zfs_vdev_scheduler, charp, 0644); | |
840 | MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler"); |