]>
Commit | Line | Data |
---|---|---|
60101509 BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC. | |
23 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
24 | * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>. | |
25 | * LLNL-CODE-403049. | |
26 | */ | |
27 | ||
28 | #include <sys/zfs_context.h> | |
29 | #include <sys/spa.h> | |
30 | #include <sys/vdev_disk.h> | |
31 | #include <sys/vdev_impl.h> | |
32 | #include <sys/fs/zfs.h> | |
33 | #include <sys/zio.h> | |
34 | #include <sys/sunldi.h> | |
35 | ||
6839eed2 BB |
36 | char *zfs_vdev_scheduler = VDEV_SCHEDULER; |
37 | ||
60101509 BB |
38 | /* |
39 | * Virtual device vector for disks. | |
40 | */ | |
41 | typedef struct dio_request { | |
42 | struct completion dr_comp; /* Completion for sync IO */ | |
43 | atomic_t dr_ref; /* References */ | |
44 | zio_t *dr_zio; /* Parent ZIO */ | |
45 | int dr_rw; /* Read/Write */ | |
46 | int dr_error; /* Bio error */ | |
47 | int dr_bio_count; /* Count of bio's */ | |
48 | struct bio *dr_bio[0]; /* Attached bio's */ | |
49 | } dio_request_t; | |
50 | ||
51 | ||
52 | #ifdef HAVE_OPEN_BDEV_EXCLUSIVE | |
53 | static fmode_t | |
54 | vdev_bdev_mode(int smode) | |
55 | { | |
56 | fmode_t mode = 0; | |
57 | ||
58 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
59 | ||
60 | if (smode & FREAD) | |
61 | mode |= FMODE_READ; | |
62 | ||
63 | if (smode & FWRITE) | |
64 | mode |= FMODE_WRITE; | |
65 | ||
66 | return mode; | |
67 | } | |
68 | #else | |
69 | static int | |
70 | vdev_bdev_mode(int smode) | |
71 | { | |
72 | int mode = 0; | |
73 | ||
74 | ASSERT3S(smode & (FREAD | FWRITE), !=, 0); | |
75 | ||
76 | if ((smode & FREAD) && !(smode & FWRITE)) | |
77 | mode = MS_RDONLY; | |
78 | ||
79 | return mode; | |
80 | } | |
81 | #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */ | |
82 | ||
83 | static uint64_t | |
84 | bdev_capacity(struct block_device *bdev) | |
85 | { | |
86 | struct hd_struct *part = bdev->bd_part; | |
87 | ||
88 | /* The partition capacity referenced by the block device */ | |
89 | if (part) | |
90 | return part->nr_sects; | |
91 | ||
92 | /* Otherwise assume the full device capacity */ | |
93 | return get_capacity(bdev->bd_disk); | |
94 | } | |
95 | ||
d148e951 BB |
96 | static void |
97 | vdev_disk_error(zio_t *zio) | |
98 | { | |
99 | #ifdef ZFS_DEBUG | |
a69052be BB |
100 | printk("ZFS: zio error=%d type=%d offset=%llu size=%llu " |
101 | "flags=%x delay=%llu\n", zio->io_error, zio->io_type, | |
d148e951 | 102 | (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size, |
a69052be | 103 | zio->io_flags, (u_longlong_t)zio->io_delay); |
d148e951 BB |
104 | #endif |
105 | } | |
106 | ||
6839eed2 BB |
107 | /* |
108 | * Use the Linux 'noop' elevator for zfs managed block devices. This | |
109 | * strikes the ideal balance by allowing the zfs elevator to do all | |
110 | * request ordering and prioritization. While allowing the Linux | |
111 | * elevator to do the maximum front/back merging allowed by the | |
112 | * physical device. This yields the largest possible requests for | |
113 | * the device with the lowest total overhead. | |
114 | * | |
115 | * Unfortunately we cannot directly call the elevator_switch() function | |
116 | * because it is not exported from the block layer. This means we have | |
117 | * to use the sysfs interface and a user space upcall. Pools will be | |
118 | * automatically imported on module load so we must do this at device | |
119 | * open time from the kernel. | |
120 | */ | |
e2448b0e BB |
121 | #define SET_SCHEDULER_CMD \ |
122 | "exec 0</dev/null " \ | |
123 | " 1>/sys/block/%s/queue/scheduler " \ | |
124 | " 2>/dev/null; " \ | |
125 | "echo %s" | |
126 | ||
6839eed2 | 127 | static int |
fdcd952b | 128 | vdev_elevator_switch(vdev_t *v, char *elevator) |
6839eed2 | 129 | { |
fdcd952b BB |
130 | vdev_disk_t *vd = v->vdev_tsd; |
131 | struct block_device *bdev = vd->vd_bdev; | |
132 | struct request_queue *q = bdev_get_queue(bdev); | |
133 | char *device = bdev->bd_disk->disk_name; | |
e2448b0e | 134 | char *argv[] = { "/bin/sh", "-c", NULL, NULL }; |
6839eed2 | 135 | char *envp[] = { NULL }; |
e2448b0e | 136 | int error; |
fdcd952b | 137 | |
04516a45 BB |
138 | /* Skip devices which are not whole disks (partitions) */ |
139 | if (!v->vdev_wholedisk) | |
140 | return (0); | |
141 | ||
fdcd952b BB |
142 | /* Skip devices without schedulers (loop, ram, dm, etc) */ |
143 | if (!q->elevator || !blk_queue_stackable(q)) | |
144 | return (0); | |
6839eed2 | 145 | |
fdcd952b | 146 | /* Leave existing scheduler when set to "none" */ |
6839eed2 BB |
147 | if (!strncmp(elevator, "none", 4) && (strlen(elevator) == 4)) |
148 | return (0); | |
149 | ||
e2448b0e BB |
150 | argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator); |
151 | error = call_usermodehelper(argv[0], argv, envp, 1); | |
6839eed2 BB |
152 | if (error) |
153 | printk("ZFS: Unable to set \"%s\" scheduler for %s (%s): %d\n", | |
fdcd952b | 154 | elevator, v->vdev_path, device, error); |
6839eed2 | 155 | |
e2448b0e BB |
156 | strfree(argv[2]); |
157 | ||
6839eed2 BB |
158 | return (error); |
159 | } | |
160 | ||
60101509 BB |
161 | static int |
162 | vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift) | |
163 | { | |
164 | struct block_device *bdev; | |
165 | vdev_disk_t *vd; | |
166 | int mode, block_size; | |
167 | ||
168 | /* Must have a pathname and it must be absolute. */ | |
169 | if (v->vdev_path == NULL || v->vdev_path[0] != '/') { | |
170 | v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; | |
171 | return EINVAL; | |
172 | } | |
173 | ||
174 | vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP); | |
175 | if (vd == NULL) | |
176 | return ENOMEM; | |
177 | ||
178 | /* | |
179 | * Devices are always opened by the path provided at configuration | |
180 | * time. This means that if the provided path is a udev by-id path | |
181 | * then drives may be recabled without an issue. If the provided | |
182 | * path is a udev by-path path then the physical location information | |
183 | * will be preserved. This can be critical for more complicated | |
184 | * configurations where drives are located in specific physical | |
185 | * locations to maximize the systems tolerence to component failure. | |
186 | * Alternately you can provide your own udev rule to flexibly map | |
187 | * the drives as you see fit. It is not advised that you use the | |
188 | * /dev/[hd]d devices which may be reorder due to probing order. | |
189 | * Devices in the wrong locations will be detected by the higher | |
190 | * level vdev validation. | |
191 | */ | |
192 | mode = spa_mode(v->vdev_spa); | |
193 | bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd); | |
194 | if (IS_ERR(bdev)) { | |
195 | kmem_free(vd, sizeof(vdev_disk_t)); | |
196 | return -PTR_ERR(bdev); | |
197 | } | |
198 | ||
199 | v->vdev_tsd = vd; | |
200 | vd->vd_bdev = bdev; | |
201 | block_size = vdev_bdev_block_size(bdev); | |
202 | ||
3a7381e5 NB |
203 | /* We think the wholedisk property should always be set when this |
204 | * function is called. ASSERT here so if any legitimate cases exist | |
205 | * where it's not set, we'll find them during debugging. If we never | |
206 | * hit the ASSERT, this and the following conditional statement can be | |
207 | * removed. */ | |
208 | ASSERT3S(v->vdev_wholedisk, !=, -1ULL); | |
209 | ||
210 | /* The wholedisk property was initialized to -1 in vdev_alloc() if it | |
211 | * was unspecified. In that case, check if this is a whole device. | |
212 | * When bdev->bd_contains == bdev we have a whole device and not simply | |
213 | * a partition. */ | |
214 | if (v->vdev_wholedisk == -1ULL) | |
215 | v->vdev_wholedisk = (bdev->bd_contains == bdev); | |
60101509 BB |
216 | |
217 | /* Clear the nowritecache bit, causes vdev_reopen() to try again. */ | |
218 | v->vdev_nowritecache = B_FALSE; | |
219 | ||
220 | /* Physical volume size in bytes */ | |
221 | *psize = bdev_capacity(bdev) * block_size; | |
222 | ||
223 | /* Based on the minimum sector size set the block size */ | |
224 | *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1; | |
225 | ||
6839eed2 | 226 | /* Try to set the io scheduler elevator algorithm */ |
fdcd952b | 227 | (void) vdev_elevator_switch(v, zfs_vdev_scheduler); |
6839eed2 | 228 | |
60101509 BB |
229 | return 0; |
230 | } | |
231 | ||
232 | static void | |
233 | vdev_disk_close(vdev_t *v) | |
234 | { | |
235 | vdev_disk_t *vd = v->vdev_tsd; | |
236 | ||
237 | if (vd == NULL) | |
238 | return; | |
239 | ||
240 | if (vd->vd_bdev != NULL) | |
241 | vdev_bdev_close(vd->vd_bdev, | |
242 | vdev_bdev_mode(spa_mode(v->vdev_spa))); | |
243 | ||
244 | kmem_free(vd, sizeof(vdev_disk_t)); | |
245 | v->vdev_tsd = NULL; | |
246 | } | |
247 | ||
248 | static dio_request_t * | |
249 | vdev_disk_dio_alloc(int bio_count) | |
250 | { | |
251 | dio_request_t *dr; | |
252 | int i; | |
253 | ||
254 | dr = kmem_zalloc(sizeof(dio_request_t) + | |
255 | sizeof(struct bio *) * bio_count, KM_SLEEP); | |
256 | if (dr) { | |
257 | init_completion(&dr->dr_comp); | |
258 | atomic_set(&dr->dr_ref, 0); | |
259 | dr->dr_bio_count = bio_count; | |
260 | dr->dr_error = 0; | |
261 | ||
262 | for (i = 0; i < dr->dr_bio_count; i++) | |
263 | dr->dr_bio[i] = NULL; | |
264 | } | |
265 | ||
266 | return dr; | |
267 | } | |
268 | ||
269 | static void | |
270 | vdev_disk_dio_free(dio_request_t *dr) | |
271 | { | |
272 | int i; | |
273 | ||
274 | for (i = 0; i < dr->dr_bio_count; i++) | |
275 | if (dr->dr_bio[i]) | |
276 | bio_put(dr->dr_bio[i]); | |
277 | ||
278 | kmem_free(dr, sizeof(dio_request_t) + | |
279 | sizeof(struct bio *) * dr->dr_bio_count); | |
280 | } | |
281 | ||
675de5aa BB |
282 | static int |
283 | vdev_disk_dio_is_sync(dio_request_t *dr) | |
284 | { | |
285 | #ifdef HAVE_BIO_RW_SYNC | |
286 | /* BIO_RW_SYNC preferred interface from 2.6.12-2.6.29 */ | |
287 | return (dr->dr_rw & (1 << BIO_RW_SYNC)); | |
288 | #else | |
289 | # ifdef HAVE_BIO_RW_SYNCIO | |
290 | /* BIO_RW_SYNCIO preferred interface from 2.6.30-2.6.35 */ | |
291 | return (dr->dr_rw & (1 << BIO_RW_SYNCIO)); | |
292 | # else | |
293 | # ifdef HAVE_REQ_SYNC | |
294 | /* REQ_SYNC preferred interface from 2.6.36-2.6.xx */ | |
295 | return (dr->dr_rw & REQ_SYNC); | |
296 | # else | |
297 | # error "Unable to determine bio sync flag" | |
298 | # endif /* HAVE_REQ_SYNC */ | |
299 | # endif /* HAVE_BIO_RW_SYNC */ | |
300 | #endif /* HAVE_BIO_RW_SYNCIO */ | |
301 | } | |
302 | ||
60101509 BB |
303 | static void |
304 | vdev_disk_dio_get(dio_request_t *dr) | |
305 | { | |
306 | atomic_inc(&dr->dr_ref); | |
307 | } | |
308 | ||
309 | static int | |
310 | vdev_disk_dio_put(dio_request_t *dr) | |
311 | { | |
312 | int rc = atomic_dec_return(&dr->dr_ref); | |
313 | ||
314 | /* | |
315 | * Free the dio_request when the last reference is dropped and | |
316 | * ensure zio_interpret is called only once with the correct zio | |
317 | */ | |
318 | if (rc == 0) { | |
319 | zio_t *zio = dr->dr_zio; | |
320 | int error = dr->dr_error; | |
321 | ||
322 | vdev_disk_dio_free(dr); | |
323 | ||
324 | if (zio) { | |
a69052be BB |
325 | zio->io_delay = jiffies_to_msecs( |
326 | jiffies_64 - zio->io_delay); | |
60101509 | 327 | zio->io_error = error; |
d148e951 BB |
328 | ASSERT3S(zio->io_error, >=, 0); |
329 | if (zio->io_error) | |
330 | vdev_disk_error(zio); | |
60101509 BB |
331 | zio_interrupt(zio); |
332 | } | |
333 | } | |
334 | ||
335 | return rc; | |
336 | } | |
337 | ||
338 | BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error) | |
339 | { | |
340 | dio_request_t *dr = bio->bi_private; | |
341 | int rc; | |
342 | ||
343 | /* Fatal error but print some useful debugging before asserting */ | |
344 | if (dr == NULL) | |
345 | PANIC("dr == NULL, bio->bi_private == NULL\n" | |
346 | "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n" | |
347 | "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n", | |
348 | bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt, | |
349 | bio->bi_idx, bio->bi_size, bio->bi_end_io, | |
350 | atomic_read(&bio->bi_cnt)); | |
351 | ||
352 | #ifndef HAVE_2ARGS_BIO_END_IO_T | |
353 | if (bio->bi_size) | |
354 | return 1; | |
355 | #endif /* HAVE_2ARGS_BIO_END_IO_T */ | |
356 | ||
357 | if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags)) | |
d148e951 | 358 | error = -EIO; |
60101509 BB |
359 | |
360 | if (dr->dr_error == 0) | |
d148e951 | 361 | dr->dr_error = -error; |
60101509 BB |
362 | |
363 | /* Drop reference aquired by __vdev_disk_physio */ | |
364 | rc = vdev_disk_dio_put(dr); | |
365 | ||
366 | /* Wake up synchronous waiter this is the last outstanding bio */ | |
675de5aa | 367 | if ((rc == 1) && vdev_disk_dio_is_sync(dr)) |
60101509 BB |
368 | complete(&dr->dr_comp); |
369 | ||
370 | BIO_END_IO_RETURN(0); | |
371 | } | |
372 | ||
373 | static inline unsigned long | |
374 | bio_nr_pages(void *bio_ptr, unsigned int bio_size) | |
375 | { | |
376 | return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >> | |
377 | PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT)); | |
378 | } | |
379 | ||
380 | static unsigned int | |
381 | bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size) | |
382 | { | |
383 | unsigned int offset, size, i; | |
384 | struct page *page; | |
385 | ||
386 | offset = offset_in_page(bio_ptr); | |
387 | for (i = 0; i < bio->bi_max_vecs; i++) { | |
388 | size = PAGE_SIZE - offset; | |
389 | ||
390 | if (bio_size <= 0) | |
391 | break; | |
392 | ||
393 | if (size > bio_size) | |
394 | size = bio_size; | |
395 | ||
396 | if (kmem_virt(bio_ptr)) | |
397 | page = vmalloc_to_page(bio_ptr); | |
398 | else | |
399 | page = virt_to_page(bio_ptr); | |
400 | ||
401 | if (bio_add_page(bio, page, size, offset) != size) | |
402 | break; | |
403 | ||
404 | bio_ptr += size; | |
405 | bio_size -= size; | |
406 | offset = 0; | |
407 | } | |
408 | ||
409 | return bio_size; | |
410 | } | |
411 | ||
412 | static int | |
413 | __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr, | |
414 | size_t kbuf_size, uint64_t kbuf_offset, int flags) | |
415 | { | |
416 | dio_request_t *dr; | |
417 | caddr_t bio_ptr; | |
418 | uint64_t bio_offset; | |
419 | int bio_size, bio_count = 16; | |
420 | int i = 0, error = 0, block_size; | |
421 | ||
e06be586 NB |
422 | ASSERT3U(kbuf_offset + kbuf_size, <=, bdev->bd_inode->i_size); |
423 | ||
60101509 BB |
424 | retry: |
425 | dr = vdev_disk_dio_alloc(bio_count); | |
426 | if (dr == NULL) | |
427 | return ENOMEM; | |
428 | ||
2959d94a BB |
429 | if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) |
430 | bio_set_flags_failfast(bdev, &flags); | |
431 | ||
60101509 BB |
432 | dr->dr_zio = zio; |
433 | dr->dr_rw = flags; | |
434 | block_size = vdev_bdev_block_size(bdev); | |
435 | ||
60101509 BB |
436 | /* |
437 | * When the IO size exceeds the maximum bio size for the request | |
438 | * queue we are forced to break the IO in multiple bio's and wait | |
439 | * for them all to complete. Ideally, all pool users will set | |
440 | * their volume block size to match the maximum request size and | |
441 | * the common case will be one bio per vdev IO request. | |
442 | */ | |
443 | bio_ptr = kbuf_ptr; | |
444 | bio_offset = kbuf_offset; | |
445 | bio_size = kbuf_size; | |
446 | for (i = 0; i <= dr->dr_bio_count; i++) { | |
447 | ||
448 | /* Finished constructing bio's for given buffer */ | |
449 | if (bio_size <= 0) | |
450 | break; | |
451 | ||
452 | /* | |
453 | * By default only 'bio_count' bio's per dio are allowed. | |
454 | * However, if we find ourselves in a situation where more | |
455 | * are needed we allocate a larger dio and warn the user. | |
456 | */ | |
457 | if (dr->dr_bio_count == i) { | |
458 | vdev_disk_dio_free(dr); | |
459 | bio_count *= 2; | |
460 | printk("WARNING: Resized bio's/dio to %d\n",bio_count); | |
461 | goto retry; | |
462 | } | |
463 | ||
464 | dr->dr_bio[i] = bio_alloc(GFP_NOIO, | |
465 | bio_nr_pages(bio_ptr, bio_size)); | |
466 | if (dr->dr_bio[i] == NULL) { | |
467 | vdev_disk_dio_free(dr); | |
468 | return ENOMEM; | |
469 | } | |
470 | ||
471 | /* Matching put called by vdev_disk_physio_completion */ | |
472 | vdev_disk_dio_get(dr); | |
473 | ||
474 | dr->dr_bio[i]->bi_bdev = bdev; | |
475 | dr->dr_bio[i]->bi_sector = bio_offset / block_size; | |
476 | dr->dr_bio[i]->bi_rw = dr->dr_rw; | |
477 | dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion; | |
478 | dr->dr_bio[i]->bi_private = dr; | |
479 | ||
480 | /* Remaining size is returned to become the new size */ | |
481 | bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size); | |
482 | ||
483 | /* Advance in buffer and construct another bio if needed */ | |
484 | bio_ptr += dr->dr_bio[i]->bi_size; | |
485 | bio_offset += dr->dr_bio[i]->bi_size; | |
486 | } | |
487 | ||
488 | /* Extra reference to protect dio_request during submit_bio */ | |
489 | vdev_disk_dio_get(dr); | |
a69052be BB |
490 | if (zio) |
491 | zio->io_delay = jiffies_64; | |
60101509 BB |
492 | |
493 | /* Submit all bio's associated with this dio */ | |
494 | for (i = 0; i < dr->dr_bio_count; i++) | |
495 | if (dr->dr_bio[i]) | |
496 | submit_bio(dr->dr_rw, dr->dr_bio[i]); | |
497 | ||
498 | /* | |
499 | * On synchronous blocking requests we wait for all bio the completion | |
500 | * callbacks to run. We will be woken when the last callback runs | |
501 | * for this dio. We are responsible for putting the last dio_request | |
502 | * reference will in turn put back the last bio references. The | |
503 | * only synchronous consumer is vdev_disk_read_rootlabel() all other | |
504 | * IO originating from vdev_disk_io_start() is asynchronous. | |
505 | */ | |
675de5aa | 506 | if (vdev_disk_dio_is_sync(dr)) { |
60101509 BB |
507 | wait_for_completion(&dr->dr_comp); |
508 | error = dr->dr_error; | |
509 | ASSERT3S(atomic_read(&dr->dr_ref), ==, 1); | |
510 | } | |
511 | ||
512 | (void)vdev_disk_dio_put(dr); | |
513 | ||
514 | return error; | |
515 | } | |
516 | ||
517 | int | |
518 | vdev_disk_physio(struct block_device *bdev, caddr_t kbuf, | |
519 | size_t size, uint64_t offset, int flags) | |
520 | { | |
2959d94a | 521 | bio_set_flags_failfast(bdev, &flags); |
60101509 BB |
522 | return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags); |
523 | } | |
524 | ||
525 | /* 2.6.24 API change */ | |
526 | #ifdef HAVE_BIO_EMPTY_BARRIER | |
527 | BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc) | |
528 | { | |
529 | zio_t *zio = bio->bi_private; | |
530 | ||
a69052be | 531 | zio->io_delay = jiffies_to_msecs(jiffies_64 - zio->io_delay); |
60101509 BB |
532 | zio->io_error = -rc; |
533 | if (rc && (rc == -EOPNOTSUPP)) | |
534 | zio->io_vd->vdev_nowritecache = B_TRUE; | |
535 | ||
536 | bio_put(bio); | |
d148e951 BB |
537 | ASSERT3S(zio->io_error, >=, 0); |
538 | if (zio->io_error) | |
539 | vdev_disk_error(zio); | |
60101509 BB |
540 | zio_interrupt(zio); |
541 | ||
542 | BIO_END_IO_RETURN(0); | |
543 | } | |
544 | ||
545 | static int | |
546 | vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) | |
547 | { | |
548 | struct request_queue *q; | |
549 | struct bio *bio; | |
550 | ||
551 | q = bdev_get_queue(bdev); | |
552 | if (!q) | |
553 | return ENXIO; | |
554 | ||
555 | bio = bio_alloc(GFP_KERNEL, 0); | |
556 | if (!bio) | |
557 | return ENOMEM; | |
558 | ||
559 | bio->bi_end_io = vdev_disk_io_flush_completion; | |
560 | bio->bi_private = zio; | |
561 | bio->bi_bdev = bdev; | |
a69052be | 562 | zio->io_delay = jiffies_64; |
60101509 BB |
563 | submit_bio(WRITE_BARRIER, bio); |
564 | ||
565 | return 0; | |
566 | } | |
567 | #else | |
568 | static int | |
569 | vdev_disk_io_flush(struct block_device *bdev, zio_t *zio) | |
570 | { | |
571 | return ENOTSUP; | |
572 | } | |
573 | #endif /* HAVE_BIO_EMPTY_BARRIER */ | |
574 | ||
575 | static int | |
576 | vdev_disk_io_start(zio_t *zio) | |
577 | { | |
578 | vdev_t *v = zio->io_vd; | |
579 | vdev_disk_t *vd = v->vdev_tsd; | |
580 | int flags, error; | |
581 | ||
582 | switch (zio->io_type) { | |
583 | case ZIO_TYPE_IOCTL: | |
584 | ||
585 | if (!vdev_readable(v)) { | |
586 | zio->io_error = ENXIO; | |
587 | return ZIO_PIPELINE_CONTINUE; | |
588 | } | |
589 | ||
590 | switch (zio->io_cmd) { | |
591 | case DKIOCFLUSHWRITECACHE: | |
592 | ||
593 | if (zfs_nocacheflush) | |
594 | break; | |
595 | ||
596 | if (v->vdev_nowritecache) { | |
597 | zio->io_error = ENOTSUP; | |
598 | break; | |
599 | } | |
600 | ||
601 | error = vdev_disk_io_flush(vd->vd_bdev, zio); | |
602 | if (error == 0) | |
603 | return ZIO_PIPELINE_STOP; | |
604 | ||
605 | zio->io_error = error; | |
606 | if (error == ENOTSUP) | |
607 | v->vdev_nowritecache = B_TRUE; | |
608 | ||
609 | break; | |
610 | ||
611 | default: | |
612 | zio->io_error = ENOTSUP; | |
613 | } | |
614 | ||
615 | return ZIO_PIPELINE_CONTINUE; | |
616 | ||
617 | case ZIO_TYPE_WRITE: | |
618 | flags = WRITE; | |
619 | break; | |
620 | ||
621 | case ZIO_TYPE_READ: | |
622 | flags = READ; | |
623 | break; | |
624 | ||
625 | default: | |
626 | zio->io_error = ENOTSUP; | |
627 | return ZIO_PIPELINE_CONTINUE; | |
628 | } | |
629 | ||
60101509 BB |
630 | error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data, |
631 | zio->io_size, zio->io_offset, flags); | |
632 | if (error) { | |
633 | zio->io_error = error; | |
634 | return ZIO_PIPELINE_CONTINUE; | |
635 | } | |
636 | ||
637 | return ZIO_PIPELINE_STOP; | |
638 | } | |
639 | ||
640 | static void | |
641 | vdev_disk_io_done(zio_t *zio) | |
642 | { | |
643 | /* | |
644 | * If the device returned EIO, we revalidate the media. If it is | |
645 | * determined the media has changed this triggers the asynchronous | |
646 | * removal of the device from the configuration. | |
647 | */ | |
648 | if (zio->io_error == EIO) { | |
649 | vdev_t *v = zio->io_vd; | |
650 | vdev_disk_t *vd = v->vdev_tsd; | |
651 | ||
652 | if (check_disk_change(vd->vd_bdev)) { | |
653 | vdev_bdev_invalidate(vd->vd_bdev); | |
654 | v->vdev_remove_wanted = B_TRUE; | |
655 | spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); | |
656 | } | |
657 | } | |
658 | } | |
659 | ||
660 | static void | |
661 | vdev_disk_hold(vdev_t *vd) | |
662 | { | |
663 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
664 | ||
665 | /* We must have a pathname, and it must be absolute. */ | |
666 | if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') | |
667 | return; | |
668 | ||
669 | /* | |
670 | * Only prefetch path and devid info if the device has | |
671 | * never been opened. | |
672 | */ | |
673 | if (vd->vdev_tsd != NULL) | |
674 | return; | |
675 | ||
676 | /* XXX: Implement me as a vnode lookup for the device */ | |
677 | vd->vdev_name_vp = NULL; | |
678 | vd->vdev_devid_vp = NULL; | |
679 | } | |
680 | ||
681 | static void | |
682 | vdev_disk_rele(vdev_t *vd) | |
683 | { | |
684 | ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); | |
685 | ||
686 | /* XXX: Implement me as a vnode rele for the device */ | |
687 | } | |
688 | ||
689 | vdev_ops_t vdev_disk_ops = { | |
690 | vdev_disk_open, | |
691 | vdev_disk_close, | |
692 | vdev_default_asize, | |
693 | vdev_disk_io_start, | |
694 | vdev_disk_io_done, | |
695 | NULL, | |
696 | vdev_disk_hold, | |
697 | vdev_disk_rele, | |
698 | VDEV_TYPE_DISK, /* name of this vdev type */ | |
699 | B_TRUE /* leaf vdev */ | |
700 | }; | |
701 | ||
702 | /* | |
703 | * Given the root disk device devid or pathname, read the label from | |
704 | * the device, and construct a configuration nvlist. | |
705 | */ | |
706 | int | |
707 | vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) | |
708 | { | |
709 | struct block_device *bdev; | |
710 | vdev_label_t *label; | |
711 | uint64_t s, size; | |
712 | int i; | |
713 | ||
714 | bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL); | |
715 | if (IS_ERR(bdev)) | |
716 | return -PTR_ERR(bdev); | |
717 | ||
718 | s = bdev_capacity(bdev) * vdev_bdev_block_size(bdev); | |
719 | if (s == 0) { | |
720 | vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); | |
721 | return EIO; | |
722 | } | |
723 | ||
724 | size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t); | |
725 | label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP); | |
726 | ||
727 | for (i = 0; i < VDEV_LABELS; i++) { | |
728 | uint64_t offset, state, txg = 0; | |
729 | ||
730 | /* read vdev label */ | |
731 | offset = vdev_label_offset(size, i, 0); | |
732 | if (vdev_disk_physio(bdev, (caddr_t)label, | |
733 | VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0) | |
734 | continue; | |
735 | ||
736 | if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, | |
737 | sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { | |
738 | *config = NULL; | |
739 | continue; | |
740 | } | |
741 | ||
742 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, | |
743 | &state) != 0 || state >= POOL_STATE_DESTROYED) { | |
744 | nvlist_free(*config); | |
745 | *config = NULL; | |
746 | continue; | |
747 | } | |
748 | ||
749 | if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, | |
750 | &txg) != 0 || txg == 0) { | |
751 | nvlist_free(*config); | |
752 | *config = NULL; | |
753 | continue; | |
754 | } | |
755 | ||
756 | break; | |
757 | } | |
758 | ||
759 | vmem_free(label, sizeof(vdev_label_t)); | |
760 | vdev_bdev_close(bdev, vdev_bdev_mode(FREAD)); | |
761 | ||
762 | return 0; | |
763 | } | |
6839eed2 BB |
764 | |
765 | module_param(zfs_vdev_scheduler, charp, 0644); | |
c409e464 | 766 | MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler"); |