]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/vdev_disk.c
Add FAILFAST support
[mirror_zfs.git] / module / zfs / vdev_disk.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
33 #include <sys/zio.h>
34 #include <sys/sunldi.h>
35
36 /*
37 * Virtual device vector for disks.
38 */
39 typedef struct dio_request {
40 struct completion dr_comp; /* Completion for sync IO */
41 atomic_t dr_ref; /* References */
42 zio_t *dr_zio; /* Parent ZIO */
43 int dr_rw; /* Read/Write */
44 int dr_error; /* Bio error */
45 int dr_bio_count; /* Count of bio's */
46 struct bio *dr_bio[0]; /* Attached bio's */
47 } dio_request_t;
48
49
50 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
51 static fmode_t
52 vdev_bdev_mode(int smode)
53 {
54 fmode_t mode = 0;
55
56 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
57
58 if (smode & FREAD)
59 mode |= FMODE_READ;
60
61 if (smode & FWRITE)
62 mode |= FMODE_WRITE;
63
64 return mode;
65 }
66 #else
67 static int
68 vdev_bdev_mode(int smode)
69 {
70 int mode = 0;
71
72 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
73
74 if ((smode & FREAD) && !(smode & FWRITE))
75 mode = MS_RDONLY;
76
77 return mode;
78 }
79 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
80
81 static uint64_t
82 bdev_capacity(struct block_device *bdev)
83 {
84 struct hd_struct *part = bdev->bd_part;
85
86 /* The partition capacity referenced by the block device */
87 if (part)
88 return part->nr_sects;
89
90 /* Otherwise assume the full device capacity */
91 return get_capacity(bdev->bd_disk);
92 }
93
94 static void
95 vdev_disk_error(zio_t *zio)
96 {
97 #ifdef ZFS_DEBUG
98 printk("ZFS: zio error=%d type=%d offset=%llu "
99 "size=%llu flags=%x\n", zio->io_error, zio->io_type,
100 (u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
101 zio->io_flags);
102 #endif
103 }
104
105 static int
106 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
107 {
108 struct block_device *bdev;
109 vdev_disk_t *vd;
110 int mode, block_size;
111
112 /* Must have a pathname and it must be absolute. */
113 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
114 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
115 return EINVAL;
116 }
117
118 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
119 if (vd == NULL)
120 return ENOMEM;
121
122 /*
123 * Devices are always opened by the path provided at configuration
124 * time. This means that if the provided path is a udev by-id path
125 * then drives may be recabled without an issue. If the provided
126 * path is a udev by-path path then the physical location information
127 * will be preserved. This can be critical for more complicated
128 * configurations where drives are located in specific physical
129 * locations to maximize the systems tolerence to component failure.
130 * Alternately you can provide your own udev rule to flexibly map
131 * the drives as you see fit. It is not advised that you use the
132 * /dev/[hd]d devices which may be reorder due to probing order.
133 * Devices in the wrong locations will be detected by the higher
134 * level vdev validation.
135 */
136 mode = spa_mode(v->vdev_spa);
137 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
138 if (IS_ERR(bdev)) {
139 kmem_free(vd, sizeof(vdev_disk_t));
140 return -PTR_ERR(bdev);
141 }
142
143 v->vdev_tsd = vd;
144 vd->vd_bdev = bdev;
145 block_size = vdev_bdev_block_size(bdev);
146
147 /* We think the wholedisk property should always be set when this
148 * function is called. ASSERT here so if any legitimate cases exist
149 * where it's not set, we'll find them during debugging. If we never
150 * hit the ASSERT, this and the following conditional statement can be
151 * removed. */
152 ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
153
154 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
155 * was unspecified. In that case, check if this is a whole device.
156 * When bdev->bd_contains == bdev we have a whole device and not simply
157 * a partition. */
158 if (v->vdev_wholedisk == -1ULL)
159 v->vdev_wholedisk = (bdev->bd_contains == bdev);
160
161 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
162 v->vdev_nowritecache = B_FALSE;
163
164 /* Physical volume size in bytes */
165 *psize = bdev_capacity(bdev) * block_size;
166
167 /* Based on the minimum sector size set the block size */
168 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
169
170 return 0;
171 }
172
173 static void
174 vdev_disk_close(vdev_t *v)
175 {
176 vdev_disk_t *vd = v->vdev_tsd;
177
178 if (vd == NULL)
179 return;
180
181 if (vd->vd_bdev != NULL)
182 vdev_bdev_close(vd->vd_bdev,
183 vdev_bdev_mode(spa_mode(v->vdev_spa)));
184
185 kmem_free(vd, sizeof(vdev_disk_t));
186 v->vdev_tsd = NULL;
187 }
188
189 static dio_request_t *
190 vdev_disk_dio_alloc(int bio_count)
191 {
192 dio_request_t *dr;
193 int i;
194
195 dr = kmem_zalloc(sizeof(dio_request_t) +
196 sizeof(struct bio *) * bio_count, KM_SLEEP);
197 if (dr) {
198 init_completion(&dr->dr_comp);
199 atomic_set(&dr->dr_ref, 0);
200 dr->dr_bio_count = bio_count;
201 dr->dr_error = 0;
202
203 for (i = 0; i < dr->dr_bio_count; i++)
204 dr->dr_bio[i] = NULL;
205 }
206
207 return dr;
208 }
209
210 static void
211 vdev_disk_dio_free(dio_request_t *dr)
212 {
213 int i;
214
215 for (i = 0; i < dr->dr_bio_count; i++)
216 if (dr->dr_bio[i])
217 bio_put(dr->dr_bio[i]);
218
219 kmem_free(dr, sizeof(dio_request_t) +
220 sizeof(struct bio *) * dr->dr_bio_count);
221 }
222
223 static void
224 vdev_disk_dio_get(dio_request_t *dr)
225 {
226 atomic_inc(&dr->dr_ref);
227 }
228
229 static int
230 vdev_disk_dio_put(dio_request_t *dr)
231 {
232 int rc = atomic_dec_return(&dr->dr_ref);
233
234 /*
235 * Free the dio_request when the last reference is dropped and
236 * ensure zio_interpret is called only once with the correct zio
237 */
238 if (rc == 0) {
239 zio_t *zio = dr->dr_zio;
240 int error = dr->dr_error;
241
242 vdev_disk_dio_free(dr);
243
244 if (zio) {
245 zio->io_error = error;
246 ASSERT3S(zio->io_error, >=, 0);
247 if (zio->io_error)
248 vdev_disk_error(zio);
249 zio_interrupt(zio);
250 }
251 }
252
253 return rc;
254 }
255
256 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
257 {
258 dio_request_t *dr = bio->bi_private;
259 int rc;
260
261 /* Fatal error but print some useful debugging before asserting */
262 if (dr == NULL)
263 PANIC("dr == NULL, bio->bi_private == NULL\n"
264 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
265 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
266 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
267 bio->bi_idx, bio->bi_size, bio->bi_end_io,
268 atomic_read(&bio->bi_cnt));
269
270 #ifndef HAVE_2ARGS_BIO_END_IO_T
271 if (bio->bi_size)
272 return 1;
273 #endif /* HAVE_2ARGS_BIO_END_IO_T */
274
275 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
276 error = -EIO;
277
278 if (dr->dr_error == 0)
279 dr->dr_error = -error;
280
281 /* Drop reference aquired by __vdev_disk_physio */
282 rc = vdev_disk_dio_put(dr);
283
284 /* Wake up synchronous waiter this is the last outstanding bio */
285 if ((rc == 1) && (dr->dr_rw & (1 << DIO_RW_SYNCIO)))
286 complete(&dr->dr_comp);
287
288 BIO_END_IO_RETURN(0);
289 }
290
291 static inline unsigned long
292 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
293 {
294 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
295 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
296 }
297
298 static unsigned int
299 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
300 {
301 unsigned int offset, size, i;
302 struct page *page;
303
304 offset = offset_in_page(bio_ptr);
305 for (i = 0; i < bio->bi_max_vecs; i++) {
306 size = PAGE_SIZE - offset;
307
308 if (bio_size <= 0)
309 break;
310
311 if (size > bio_size)
312 size = bio_size;
313
314 if (kmem_virt(bio_ptr))
315 page = vmalloc_to_page(bio_ptr);
316 else
317 page = virt_to_page(bio_ptr);
318
319 if (bio_add_page(bio, page, size, offset) != size)
320 break;
321
322 bio_ptr += size;
323 bio_size -= size;
324 offset = 0;
325 }
326
327 return bio_size;
328 }
329
330 static int
331 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
332 size_t kbuf_size, uint64_t kbuf_offset, int flags)
333 {
334 dio_request_t *dr;
335 caddr_t bio_ptr;
336 uint64_t bio_offset;
337 int bio_size, bio_count = 16;
338 int i = 0, error = 0, block_size;
339
340 retry:
341 dr = vdev_disk_dio_alloc(bio_count);
342 if (dr == NULL)
343 return ENOMEM;
344
345 if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
346 bio_set_flags_failfast(bdev, &flags);
347
348 dr->dr_zio = zio;
349 dr->dr_rw = flags;
350 block_size = vdev_bdev_block_size(bdev);
351
352 /*
353 * When the IO size exceeds the maximum bio size for the request
354 * queue we are forced to break the IO in multiple bio's and wait
355 * for them all to complete. Ideally, all pool users will set
356 * their volume block size to match the maximum request size and
357 * the common case will be one bio per vdev IO request.
358 */
359 bio_ptr = kbuf_ptr;
360 bio_offset = kbuf_offset;
361 bio_size = kbuf_size;
362 for (i = 0; i <= dr->dr_bio_count; i++) {
363
364 /* Finished constructing bio's for given buffer */
365 if (bio_size <= 0)
366 break;
367
368 /*
369 * By default only 'bio_count' bio's per dio are allowed.
370 * However, if we find ourselves in a situation where more
371 * are needed we allocate a larger dio and warn the user.
372 */
373 if (dr->dr_bio_count == i) {
374 vdev_disk_dio_free(dr);
375 bio_count *= 2;
376 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
377 goto retry;
378 }
379
380 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
381 bio_nr_pages(bio_ptr, bio_size));
382 if (dr->dr_bio[i] == NULL) {
383 vdev_disk_dio_free(dr);
384 return ENOMEM;
385 }
386
387 /* Matching put called by vdev_disk_physio_completion */
388 vdev_disk_dio_get(dr);
389
390 dr->dr_bio[i]->bi_bdev = bdev;
391 dr->dr_bio[i]->bi_sector = bio_offset / block_size;
392 dr->dr_bio[i]->bi_rw = dr->dr_rw;
393 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
394 dr->dr_bio[i]->bi_private = dr;
395
396 /* Remaining size is returned to become the new size */
397 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
398
399 /* Advance in buffer and construct another bio if needed */
400 bio_ptr += dr->dr_bio[i]->bi_size;
401 bio_offset += dr->dr_bio[i]->bi_size;
402 }
403
404 /* Extra reference to protect dio_request during submit_bio */
405 vdev_disk_dio_get(dr);
406
407 /* Submit all bio's associated with this dio */
408 for (i = 0; i < dr->dr_bio_count; i++)
409 if (dr->dr_bio[i])
410 submit_bio(dr->dr_rw, dr->dr_bio[i]);
411
412 /*
413 * On synchronous blocking requests we wait for all bio the completion
414 * callbacks to run. We will be woken when the last callback runs
415 * for this dio. We are responsible for putting the last dio_request
416 * reference will in turn put back the last bio references. The
417 * only synchronous consumer is vdev_disk_read_rootlabel() all other
418 * IO originating from vdev_disk_io_start() is asynchronous.
419 */
420 if (dr->dr_rw & (1 << DIO_RW_SYNCIO)) {
421 wait_for_completion(&dr->dr_comp);
422 error = dr->dr_error;
423 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
424 }
425
426 (void)vdev_disk_dio_put(dr);
427
428 return error;
429 }
430
431 int
432 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
433 size_t size, uint64_t offset, int flags)
434 {
435 bio_set_flags_failfast(bdev, &flags);
436 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
437 }
438
439 /* 2.6.24 API change */
440 #ifdef HAVE_BIO_EMPTY_BARRIER
441 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
442 {
443 zio_t *zio = bio->bi_private;
444
445 zio->io_error = -rc;
446 if (rc && (rc == -EOPNOTSUPP))
447 zio->io_vd->vdev_nowritecache = B_TRUE;
448
449 bio_put(bio);
450 ASSERT3S(zio->io_error, >=, 0);
451 if (zio->io_error)
452 vdev_disk_error(zio);
453 zio_interrupt(zio);
454
455 BIO_END_IO_RETURN(0);
456 }
457
458 static int
459 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
460 {
461 struct request_queue *q;
462 struct bio *bio;
463
464 q = bdev_get_queue(bdev);
465 if (!q)
466 return ENXIO;
467
468 bio = bio_alloc(GFP_KERNEL, 0);
469 if (!bio)
470 return ENOMEM;
471
472 bio->bi_end_io = vdev_disk_io_flush_completion;
473 bio->bi_private = zio;
474 bio->bi_bdev = bdev;
475 submit_bio(WRITE_BARRIER, bio);
476
477 return 0;
478 }
479 #else
480 static int
481 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
482 {
483 return ENOTSUP;
484 }
485 #endif /* HAVE_BIO_EMPTY_BARRIER */
486
487 static int
488 vdev_disk_io_start(zio_t *zio)
489 {
490 vdev_t *v = zio->io_vd;
491 vdev_disk_t *vd = v->vdev_tsd;
492 int flags, error;
493
494 switch (zio->io_type) {
495 case ZIO_TYPE_IOCTL:
496
497 if (!vdev_readable(v)) {
498 zio->io_error = ENXIO;
499 return ZIO_PIPELINE_CONTINUE;
500 }
501
502 switch (zio->io_cmd) {
503 case DKIOCFLUSHWRITECACHE:
504
505 if (zfs_nocacheflush)
506 break;
507
508 if (v->vdev_nowritecache) {
509 zio->io_error = ENOTSUP;
510 break;
511 }
512
513 error = vdev_disk_io_flush(vd->vd_bdev, zio);
514 if (error == 0)
515 return ZIO_PIPELINE_STOP;
516
517 zio->io_error = error;
518 if (error == ENOTSUP)
519 v->vdev_nowritecache = B_TRUE;
520
521 break;
522
523 default:
524 zio->io_error = ENOTSUP;
525 }
526
527 return ZIO_PIPELINE_CONTINUE;
528
529 case ZIO_TYPE_WRITE:
530 flags = WRITE;
531 break;
532
533 case ZIO_TYPE_READ:
534 flags = READ;
535 break;
536
537 default:
538 zio->io_error = ENOTSUP;
539 return ZIO_PIPELINE_CONTINUE;
540 }
541
542 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
543 zio->io_size, zio->io_offset, flags);
544 if (error) {
545 zio->io_error = error;
546 return ZIO_PIPELINE_CONTINUE;
547 }
548
549 return ZIO_PIPELINE_STOP;
550 }
551
552 static void
553 vdev_disk_io_done(zio_t *zio)
554 {
555 /*
556 * If the device returned EIO, we revalidate the media. If it is
557 * determined the media has changed this triggers the asynchronous
558 * removal of the device from the configuration.
559 */
560 if (zio->io_error == EIO) {
561 vdev_t *v = zio->io_vd;
562 vdev_disk_t *vd = v->vdev_tsd;
563
564 if (check_disk_change(vd->vd_bdev)) {
565 vdev_bdev_invalidate(vd->vd_bdev);
566 v->vdev_remove_wanted = B_TRUE;
567 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
568 }
569 }
570 }
571
572 static void
573 vdev_disk_hold(vdev_t *vd)
574 {
575 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
576
577 /* We must have a pathname, and it must be absolute. */
578 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
579 return;
580
581 /*
582 * Only prefetch path and devid info if the device has
583 * never been opened.
584 */
585 if (vd->vdev_tsd != NULL)
586 return;
587
588 /* XXX: Implement me as a vnode lookup for the device */
589 vd->vdev_name_vp = NULL;
590 vd->vdev_devid_vp = NULL;
591 }
592
593 static void
594 vdev_disk_rele(vdev_t *vd)
595 {
596 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
597
598 /* XXX: Implement me as a vnode rele for the device */
599 }
600
601 vdev_ops_t vdev_disk_ops = {
602 vdev_disk_open,
603 vdev_disk_close,
604 vdev_default_asize,
605 vdev_disk_io_start,
606 vdev_disk_io_done,
607 NULL,
608 vdev_disk_hold,
609 vdev_disk_rele,
610 VDEV_TYPE_DISK, /* name of this vdev type */
611 B_TRUE /* leaf vdev */
612 };
613
614 /*
615 * Given the root disk device devid or pathname, read the label from
616 * the device, and construct a configuration nvlist.
617 */
618 int
619 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
620 {
621 struct block_device *bdev;
622 vdev_label_t *label;
623 uint64_t s, size;
624 int i;
625
626 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
627 if (IS_ERR(bdev))
628 return -PTR_ERR(bdev);
629
630 s = bdev_capacity(bdev) * vdev_bdev_block_size(bdev);
631 if (s == 0) {
632 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
633 return EIO;
634 }
635
636 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
637 label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
638
639 for (i = 0; i < VDEV_LABELS; i++) {
640 uint64_t offset, state, txg = 0;
641
642 /* read vdev label */
643 offset = vdev_label_offset(size, i, 0);
644 if (vdev_disk_physio(bdev, (caddr_t)label,
645 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
646 continue;
647
648 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
649 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
650 *config = NULL;
651 continue;
652 }
653
654 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
655 &state) != 0 || state >= POOL_STATE_DESTROYED) {
656 nvlist_free(*config);
657 *config = NULL;
658 continue;
659 }
660
661 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
662 &txg) != 0 || txg == 0) {
663 nvlist_free(*config);
664 *config = NULL;
665 continue;
666 }
667
668 break;
669 }
670
671 vmem_free(label, sizeof(vdev_label_t));
672 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
673
674 return 0;
675 }