]> git.proxmox.com Git - mirror_zfs-debian.git/blob - module/zfs/vdev_disk.c
Use stored whole_disk property when opening a vdev
[mirror_zfs-debian.git] / module / zfs / vdev_disk.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
25 * LLNL-CODE-403049.
26 */
27
28 #include <sys/zfs_context.h>
29 #include <sys/spa.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
33 #include <sys/zio.h>
34 #include <sys/sunldi.h>
35
36 /*
37 * Virtual device vector for disks.
38 */
39 typedef struct dio_request {
40 struct completion dr_comp; /* Completion for sync IO */
41 atomic_t dr_ref; /* References */
42 zio_t *dr_zio; /* Parent ZIO */
43 int dr_rw; /* Read/Write */
44 int dr_error; /* Bio error */
45 int dr_bio_count; /* Count of bio's */
46 struct bio *dr_bio[0]; /* Attached bio's */
47 } dio_request_t;
48
49
50 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
51 static fmode_t
52 vdev_bdev_mode(int smode)
53 {
54 fmode_t mode = 0;
55
56 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
57
58 if (smode & FREAD)
59 mode |= FMODE_READ;
60
61 if (smode & FWRITE)
62 mode |= FMODE_WRITE;
63
64 return mode;
65 }
66 #else
67 static int
68 vdev_bdev_mode(int smode)
69 {
70 int mode = 0;
71
72 ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
73
74 if ((smode & FREAD) && !(smode & FWRITE))
75 mode = MS_RDONLY;
76
77 return mode;
78 }
79 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
80
81 static uint64_t
82 bdev_capacity(struct block_device *bdev)
83 {
84 struct hd_struct *part = bdev->bd_part;
85
86 /* The partition capacity referenced by the block device */
87 if (part)
88 return part->nr_sects;
89
90 /* Otherwise assume the full device capacity */
91 return get_capacity(bdev->bd_disk);
92 }
93
94 static int
95 vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *ashift)
96 {
97 struct block_device *bdev;
98 vdev_disk_t *vd;
99 int mode, block_size;
100
101 /* Must have a pathname and it must be absolute. */
102 if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
103 v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
104 return EINVAL;
105 }
106
107 vd = kmem_zalloc(sizeof(vdev_disk_t), KM_SLEEP);
108 if (vd == NULL)
109 return ENOMEM;
110
111 /*
112 * Devices are always opened by the path provided at configuration
113 * time. This means that if the provided path is a udev by-id path
114 * then drives may be recabled without an issue. If the provided
115 * path is a udev by-path path then the physical location information
116 * will be preserved. This can be critical for more complicated
117 * configurations where drives are located in specific physical
118 * locations to maximize the systems tolerence to component failure.
119 * Alternately you can provide your own udev rule to flexibly map
120 * the drives as you see fit. It is not advised that you use the
121 * /dev/[hd]d devices which may be reorder due to probing order.
122 * Devices in the wrong locations will be detected by the higher
123 * level vdev validation.
124 */
125 mode = spa_mode(v->vdev_spa);
126 bdev = vdev_bdev_open(v->vdev_path, vdev_bdev_mode(mode), vd);
127 if (IS_ERR(bdev)) {
128 kmem_free(vd, sizeof(vdev_disk_t));
129 return -PTR_ERR(bdev);
130 }
131
132 v->vdev_tsd = vd;
133 vd->vd_bdev = bdev;
134 block_size = vdev_bdev_block_size(bdev);
135
136 /* We think the wholedisk property should always be set when this
137 * function is called. ASSERT here so if any legitimate cases exist
138 * where it's not set, we'll find them during debugging. If we never
139 * hit the ASSERT, this and the following conditional statement can be
140 * removed. */
141 ASSERT3S(v->vdev_wholedisk, !=, -1ULL);
142
143 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
144 * was unspecified. In that case, check if this is a whole device.
145 * When bdev->bd_contains == bdev we have a whole device and not simply
146 * a partition. */
147 if (v->vdev_wholedisk == -1ULL)
148 v->vdev_wholedisk = (bdev->bd_contains == bdev);
149
150 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
151 v->vdev_nowritecache = B_FALSE;
152
153 /* Physical volume size in bytes */
154 *psize = bdev_capacity(bdev) * block_size;
155
156 /* Based on the minimum sector size set the block size */
157 *ashift = highbit(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
158
159 return 0;
160 }
161
162 static void
163 vdev_disk_close(vdev_t *v)
164 {
165 vdev_disk_t *vd = v->vdev_tsd;
166
167 if (vd == NULL)
168 return;
169
170 if (vd->vd_bdev != NULL)
171 vdev_bdev_close(vd->vd_bdev,
172 vdev_bdev_mode(spa_mode(v->vdev_spa)));
173
174 kmem_free(vd, sizeof(vdev_disk_t));
175 v->vdev_tsd = NULL;
176 }
177
178 static dio_request_t *
179 vdev_disk_dio_alloc(int bio_count)
180 {
181 dio_request_t *dr;
182 int i;
183
184 dr = kmem_zalloc(sizeof(dio_request_t) +
185 sizeof(struct bio *) * bio_count, KM_SLEEP);
186 if (dr) {
187 init_completion(&dr->dr_comp);
188 atomic_set(&dr->dr_ref, 0);
189 dr->dr_bio_count = bio_count;
190 dr->dr_error = 0;
191
192 for (i = 0; i < dr->dr_bio_count; i++)
193 dr->dr_bio[i] = NULL;
194 }
195
196 return dr;
197 }
198
199 static void
200 vdev_disk_dio_free(dio_request_t *dr)
201 {
202 int i;
203
204 for (i = 0; i < dr->dr_bio_count; i++)
205 if (dr->dr_bio[i])
206 bio_put(dr->dr_bio[i]);
207
208 kmem_free(dr, sizeof(dio_request_t) +
209 sizeof(struct bio *) * dr->dr_bio_count);
210 }
211
212 static void
213 vdev_disk_dio_get(dio_request_t *dr)
214 {
215 atomic_inc(&dr->dr_ref);
216 }
217
218 static int
219 vdev_disk_dio_put(dio_request_t *dr)
220 {
221 int rc = atomic_dec_return(&dr->dr_ref);
222
223 /*
224 * Free the dio_request when the last reference is dropped and
225 * ensure zio_interpret is called only once with the correct zio
226 */
227 if (rc == 0) {
228 zio_t *zio = dr->dr_zio;
229 int error = dr->dr_error;
230
231 vdev_disk_dio_free(dr);
232
233 if (zio) {
234 zio->io_error = error;
235 zio_interrupt(zio);
236 }
237 }
238
239 return rc;
240 }
241
242 BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, size, error)
243 {
244 dio_request_t *dr = bio->bi_private;
245 int rc;
246
247 /* Fatal error but print some useful debugging before asserting */
248 if (dr == NULL)
249 PANIC("dr == NULL, bio->bi_private == NULL\n"
250 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
251 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
252 bio->bi_next, bio->bi_flags, bio->bi_rw, bio->bi_vcnt,
253 bio->bi_idx, bio->bi_size, bio->bi_end_io,
254 atomic_read(&bio->bi_cnt));
255
256 #ifndef HAVE_2ARGS_BIO_END_IO_T
257 if (bio->bi_size)
258 return 1;
259 #endif /* HAVE_2ARGS_BIO_END_IO_T */
260
261 if (error == 0 && !test_bit(BIO_UPTODATE, &bio->bi_flags))
262 error = EIO;
263
264 if (dr->dr_error == 0)
265 dr->dr_error = error;
266
267 /* Drop reference aquired by __vdev_disk_physio */
268 rc = vdev_disk_dio_put(dr);
269
270 /* Wake up synchronous waiter this is the last outstanding bio */
271 if ((rc == 1) && (dr->dr_rw & (1 << DIO_RW_SYNCIO)))
272 complete(&dr->dr_comp);
273
274 BIO_END_IO_RETURN(0);
275 }
276
277 static inline unsigned long
278 bio_nr_pages(void *bio_ptr, unsigned int bio_size)
279 {
280 return ((((unsigned long)bio_ptr + bio_size + PAGE_SIZE - 1) >>
281 PAGE_SHIFT) - ((unsigned long)bio_ptr >> PAGE_SHIFT));
282 }
283
284 static unsigned int
285 bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
286 {
287 unsigned int offset, size, i;
288 struct page *page;
289
290 offset = offset_in_page(bio_ptr);
291 for (i = 0; i < bio->bi_max_vecs; i++) {
292 size = PAGE_SIZE - offset;
293
294 if (bio_size <= 0)
295 break;
296
297 if (size > bio_size)
298 size = bio_size;
299
300 if (kmem_virt(bio_ptr))
301 page = vmalloc_to_page(bio_ptr);
302 else
303 page = virt_to_page(bio_ptr);
304
305 if (bio_add_page(bio, page, size, offset) != size)
306 break;
307
308 bio_ptr += size;
309 bio_size -= size;
310 offset = 0;
311 }
312
313 return bio_size;
314 }
315
316 static int
317 __vdev_disk_physio(struct block_device *bdev, zio_t *zio, caddr_t kbuf_ptr,
318 size_t kbuf_size, uint64_t kbuf_offset, int flags)
319 {
320 dio_request_t *dr;
321 caddr_t bio_ptr;
322 uint64_t bio_offset;
323 int bio_size, bio_count = 16;
324 int i = 0, error = 0, block_size;
325
326 retry:
327 dr = vdev_disk_dio_alloc(bio_count);
328 if (dr == NULL)
329 return ENOMEM;
330
331 dr->dr_zio = zio;
332 dr->dr_rw = flags;
333 block_size = vdev_bdev_block_size(bdev);
334
335 #ifdef BIO_RW_FAILFAST
336 if (flags & (1 << BIO_RW_FAILFAST))
337 dr->dr_rw |= 1 << BIO_RW_FAILFAST;
338 #endif /* BIO_RW_FAILFAST */
339
340 /*
341 * When the IO size exceeds the maximum bio size for the request
342 * queue we are forced to break the IO in multiple bio's and wait
343 * for them all to complete. Ideally, all pool users will set
344 * their volume block size to match the maximum request size and
345 * the common case will be one bio per vdev IO request.
346 */
347 bio_ptr = kbuf_ptr;
348 bio_offset = kbuf_offset;
349 bio_size = kbuf_size;
350 for (i = 0; i <= dr->dr_bio_count; i++) {
351
352 /* Finished constructing bio's for given buffer */
353 if (bio_size <= 0)
354 break;
355
356 /*
357 * By default only 'bio_count' bio's per dio are allowed.
358 * However, if we find ourselves in a situation where more
359 * are needed we allocate a larger dio and warn the user.
360 */
361 if (dr->dr_bio_count == i) {
362 vdev_disk_dio_free(dr);
363 bio_count *= 2;
364 printk("WARNING: Resized bio's/dio to %d\n",bio_count);
365 goto retry;
366 }
367
368 dr->dr_bio[i] = bio_alloc(GFP_NOIO,
369 bio_nr_pages(bio_ptr, bio_size));
370 if (dr->dr_bio[i] == NULL) {
371 vdev_disk_dio_free(dr);
372 return ENOMEM;
373 }
374
375 /* Matching put called by vdev_disk_physio_completion */
376 vdev_disk_dio_get(dr);
377
378 dr->dr_bio[i]->bi_bdev = bdev;
379 dr->dr_bio[i]->bi_sector = bio_offset / block_size;
380 dr->dr_bio[i]->bi_rw = dr->dr_rw;
381 dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
382 dr->dr_bio[i]->bi_private = dr;
383
384 /* Remaining size is returned to become the new size */
385 bio_size = bio_map(dr->dr_bio[i], bio_ptr, bio_size);
386
387 /* Advance in buffer and construct another bio if needed */
388 bio_ptr += dr->dr_bio[i]->bi_size;
389 bio_offset += dr->dr_bio[i]->bi_size;
390 }
391
392 /* Extra reference to protect dio_request during submit_bio */
393 vdev_disk_dio_get(dr);
394
395 /* Submit all bio's associated with this dio */
396 for (i = 0; i < dr->dr_bio_count; i++)
397 if (dr->dr_bio[i])
398 submit_bio(dr->dr_rw, dr->dr_bio[i]);
399
400 /*
401 * On synchronous blocking requests we wait for all bio the completion
402 * callbacks to run. We will be woken when the last callback runs
403 * for this dio. We are responsible for putting the last dio_request
404 * reference will in turn put back the last bio references. The
405 * only synchronous consumer is vdev_disk_read_rootlabel() all other
406 * IO originating from vdev_disk_io_start() is asynchronous.
407 */
408 if (dr->dr_rw & (1 << DIO_RW_SYNCIO)) {
409 wait_for_completion(&dr->dr_comp);
410 error = dr->dr_error;
411 ASSERT3S(atomic_read(&dr->dr_ref), ==, 1);
412 }
413
414 (void)vdev_disk_dio_put(dr);
415
416 return error;
417 }
418
419 int
420 vdev_disk_physio(struct block_device *bdev, caddr_t kbuf,
421 size_t size, uint64_t offset, int flags)
422 {
423 return __vdev_disk_physio(bdev, NULL, kbuf, size, offset, flags);
424 }
425
426 /* 2.6.24 API change */
427 #ifdef HAVE_BIO_EMPTY_BARRIER
428 BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, size, rc)
429 {
430 zio_t *zio = bio->bi_private;
431
432 zio->io_error = -rc;
433 if (rc && (rc == -EOPNOTSUPP))
434 zio->io_vd->vdev_nowritecache = B_TRUE;
435
436 bio_put(bio);
437 zio_interrupt(zio);
438
439 BIO_END_IO_RETURN(0);
440 }
441
442 static int
443 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
444 {
445 struct request_queue *q;
446 struct bio *bio;
447
448 q = bdev_get_queue(bdev);
449 if (!q)
450 return ENXIO;
451
452 bio = bio_alloc(GFP_KERNEL, 0);
453 if (!bio)
454 return ENOMEM;
455
456 bio->bi_end_io = vdev_disk_io_flush_completion;
457 bio->bi_private = zio;
458 bio->bi_bdev = bdev;
459 submit_bio(WRITE_BARRIER, bio);
460
461 return 0;
462 }
463 #else
464 static int
465 vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
466 {
467 return ENOTSUP;
468 }
469 #endif /* HAVE_BIO_EMPTY_BARRIER */
470
471 static int
472 vdev_disk_io_start(zio_t *zio)
473 {
474 vdev_t *v = zio->io_vd;
475 vdev_disk_t *vd = v->vdev_tsd;
476 int flags, error;
477
478 switch (zio->io_type) {
479 case ZIO_TYPE_IOCTL:
480
481 if (!vdev_readable(v)) {
482 zio->io_error = ENXIO;
483 return ZIO_PIPELINE_CONTINUE;
484 }
485
486 switch (zio->io_cmd) {
487 case DKIOCFLUSHWRITECACHE:
488
489 if (zfs_nocacheflush)
490 break;
491
492 if (v->vdev_nowritecache) {
493 zio->io_error = ENOTSUP;
494 break;
495 }
496
497 error = vdev_disk_io_flush(vd->vd_bdev, zio);
498 if (error == 0)
499 return ZIO_PIPELINE_STOP;
500
501 zio->io_error = error;
502 if (error == ENOTSUP)
503 v->vdev_nowritecache = B_TRUE;
504
505 break;
506
507 default:
508 zio->io_error = ENOTSUP;
509 }
510
511 return ZIO_PIPELINE_CONTINUE;
512
513 case ZIO_TYPE_WRITE:
514 flags = WRITE;
515 break;
516
517 case ZIO_TYPE_READ:
518 flags = READ;
519 break;
520
521 default:
522 zio->io_error = ENOTSUP;
523 return ZIO_PIPELINE_CONTINUE;
524 }
525
526 #ifdef BIO_RW_FAILFAST
527 if (zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))
528 flags |= (1 << BIO_RW_FAILFAST);
529 #endif /* BIO_RW_FAILFAST */
530
531 error = __vdev_disk_physio(vd->vd_bdev, zio, zio->io_data,
532 zio->io_size, zio->io_offset, flags);
533 if (error) {
534 zio->io_error = error;
535 return ZIO_PIPELINE_CONTINUE;
536 }
537
538 return ZIO_PIPELINE_STOP;
539 }
540
541 static void
542 vdev_disk_io_done(zio_t *zio)
543 {
544 /*
545 * If the device returned EIO, we revalidate the media. If it is
546 * determined the media has changed this triggers the asynchronous
547 * removal of the device from the configuration.
548 */
549 if (zio->io_error == EIO) {
550 vdev_t *v = zio->io_vd;
551 vdev_disk_t *vd = v->vdev_tsd;
552
553 if (check_disk_change(vd->vd_bdev)) {
554 vdev_bdev_invalidate(vd->vd_bdev);
555 v->vdev_remove_wanted = B_TRUE;
556 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
557 }
558 }
559 }
560
561 static void
562 vdev_disk_hold(vdev_t *vd)
563 {
564 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
565
566 /* We must have a pathname, and it must be absolute. */
567 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
568 return;
569
570 /*
571 * Only prefetch path and devid info if the device has
572 * never been opened.
573 */
574 if (vd->vdev_tsd != NULL)
575 return;
576
577 /* XXX: Implement me as a vnode lookup for the device */
578 vd->vdev_name_vp = NULL;
579 vd->vdev_devid_vp = NULL;
580 }
581
582 static void
583 vdev_disk_rele(vdev_t *vd)
584 {
585 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
586
587 /* XXX: Implement me as a vnode rele for the device */
588 }
589
590 vdev_ops_t vdev_disk_ops = {
591 vdev_disk_open,
592 vdev_disk_close,
593 vdev_default_asize,
594 vdev_disk_io_start,
595 vdev_disk_io_done,
596 NULL,
597 vdev_disk_hold,
598 vdev_disk_rele,
599 VDEV_TYPE_DISK, /* name of this vdev type */
600 B_TRUE /* leaf vdev */
601 };
602
603 /*
604 * Given the root disk device devid or pathname, read the label from
605 * the device, and construct a configuration nvlist.
606 */
607 int
608 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config)
609 {
610 struct block_device *bdev;
611 vdev_label_t *label;
612 uint64_t s, size;
613 int i;
614
615 bdev = vdev_bdev_open(devpath, vdev_bdev_mode(FREAD), NULL);
616 if (IS_ERR(bdev))
617 return -PTR_ERR(bdev);
618
619 s = bdev_capacity(bdev) * vdev_bdev_block_size(bdev);
620 if (s == 0) {
621 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
622 return EIO;
623 }
624
625 size = P2ALIGN_TYPED(s, sizeof(vdev_label_t), uint64_t);
626 label = vmem_alloc(sizeof(vdev_label_t), KM_SLEEP);
627
628 for (i = 0; i < VDEV_LABELS; i++) {
629 uint64_t offset, state, txg = 0;
630
631 /* read vdev label */
632 offset = vdev_label_offset(size, i, 0);
633 if (vdev_disk_physio(bdev, (caddr_t)label,
634 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, READ_SYNC) != 0)
635 continue;
636
637 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist,
638 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) {
639 *config = NULL;
640 continue;
641 }
642
643 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
644 &state) != 0 || state >= POOL_STATE_DESTROYED) {
645 nvlist_free(*config);
646 *config = NULL;
647 continue;
648 }
649
650 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
651 &txg) != 0 || txg == 0) {
652 nvlist_free(*config);
653 *config = NULL;
654 continue;
655 }
656
657 break;
658 }
659
660 vmem_free(label, sizeof(vdev_label_t));
661 vdev_bdev_close(bdev, vdev_bdev_mode(FREAD));
662
663 return 0;
664 }