4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
23 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
24 * Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
28 #include <sys/zfs_context.h>
30 #include <sys/vdev_disk.h>
31 #include <sys/vdev_impl.h>
32 #include <sys/fs/zfs.h>
34 #include <sys/sunldi.h>
37 * Virtual device vector for disks.
39 typedef struct dio_request
{
40 struct completion dr_comp
; /* Completion for sync IO */
41 atomic_t dr_ref
; /* References */
42 zio_t
*dr_zio
; /* Parent ZIO */
43 int dr_rw
; /* Read/Write */
44 int dr_error
; /* Bio error */
45 int dr_bio_count
; /* Count of bio's */
46 struct bio
*dr_bio
[0]; /* Attached bio's */
50 #ifdef HAVE_OPEN_BDEV_EXCLUSIVE
52 vdev_bdev_mode(int smode
)
56 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
68 vdev_bdev_mode(int smode
)
72 ASSERT3S(smode
& (FREAD
| FWRITE
), !=, 0);
74 if ((smode
& FREAD
) && !(smode
& FWRITE
))
79 #endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
82 bdev_capacity(struct block_device
*bdev
)
84 struct hd_struct
*part
= bdev
->bd_part
;
86 /* The partition capacity referenced by the block device */
88 return part
->nr_sects
;
90 /* Otherwise assume the full device capacity */
91 return get_capacity(bdev
->bd_disk
);
95 vdev_disk_error(zio_t
*zio
)
98 printk("ZFS: zio error=%d type=%d offset=%llu "
99 "size=%llu flags=%x\n", zio
->io_error
, zio
->io_type
,
100 (u_longlong_t
)zio
->io_offset
, (u_longlong_t
)zio
->io_size
,
106 vdev_disk_open(vdev_t
*v
, uint64_t *psize
, uint64_t *ashift
)
108 struct block_device
*bdev
;
110 int mode
, block_size
;
112 /* Must have a pathname and it must be absolute. */
113 if (v
->vdev_path
== NULL
|| v
->vdev_path
[0] != '/') {
114 v
->vdev_stat
.vs_aux
= VDEV_AUX_BAD_LABEL
;
118 vd
= kmem_zalloc(sizeof(vdev_disk_t
), KM_SLEEP
);
123 * Devices are always opened by the path provided at configuration
124 * time. This means that if the provided path is a udev by-id path
125 * then drives may be recabled without an issue. If the provided
126 * path is a udev by-path path then the physical location information
127 * will be preserved. This can be critical for more complicated
128 * configurations where drives are located in specific physical
129 * locations to maximize the systems tolerence to component failure.
130 * Alternately you can provide your own udev rule to flexibly map
131 * the drives as you see fit. It is not advised that you use the
132 * /dev/[hd]d devices which may be reorder due to probing order.
133 * Devices in the wrong locations will be detected by the higher
134 * level vdev validation.
136 mode
= spa_mode(v
->vdev_spa
);
137 bdev
= vdev_bdev_open(v
->vdev_path
, vdev_bdev_mode(mode
), vd
);
139 kmem_free(vd
, sizeof(vdev_disk_t
));
140 return -PTR_ERR(bdev
);
145 block_size
= vdev_bdev_block_size(bdev
);
147 /* We think the wholedisk property should always be set when this
148 * function is called. ASSERT here so if any legitimate cases exist
149 * where it's not set, we'll find them during debugging. If we never
150 * hit the ASSERT, this and the following conditional statement can be
152 ASSERT3S(v
->vdev_wholedisk
, !=, -1ULL);
154 /* The wholedisk property was initialized to -1 in vdev_alloc() if it
155 * was unspecified. In that case, check if this is a whole device.
156 * When bdev->bd_contains == bdev we have a whole device and not simply
158 if (v
->vdev_wholedisk
== -1ULL)
159 v
->vdev_wholedisk
= (bdev
->bd_contains
== bdev
);
161 /* Clear the nowritecache bit, causes vdev_reopen() to try again. */
162 v
->vdev_nowritecache
= B_FALSE
;
164 /* Physical volume size in bytes */
165 *psize
= bdev_capacity(bdev
) * block_size
;
167 /* Based on the minimum sector size set the block size */
168 *ashift
= highbit(MAX(block_size
, SPA_MINBLOCKSIZE
)) - 1;
174 vdev_disk_close(vdev_t
*v
)
176 vdev_disk_t
*vd
= v
->vdev_tsd
;
181 if (vd
->vd_bdev
!= NULL
)
182 vdev_bdev_close(vd
->vd_bdev
,
183 vdev_bdev_mode(spa_mode(v
->vdev_spa
)));
185 kmem_free(vd
, sizeof(vdev_disk_t
));
189 static dio_request_t
*
190 vdev_disk_dio_alloc(int bio_count
)
195 dr
= kmem_zalloc(sizeof(dio_request_t
) +
196 sizeof(struct bio
*) * bio_count
, KM_SLEEP
);
198 init_completion(&dr
->dr_comp
);
199 atomic_set(&dr
->dr_ref
, 0);
200 dr
->dr_bio_count
= bio_count
;
203 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
204 dr
->dr_bio
[i
] = NULL
;
211 vdev_disk_dio_free(dio_request_t
*dr
)
215 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
217 bio_put(dr
->dr_bio
[i
]);
219 kmem_free(dr
, sizeof(dio_request_t
) +
220 sizeof(struct bio
*) * dr
->dr_bio_count
);
224 vdev_disk_dio_get(dio_request_t
*dr
)
226 atomic_inc(&dr
->dr_ref
);
230 vdev_disk_dio_put(dio_request_t
*dr
)
232 int rc
= atomic_dec_return(&dr
->dr_ref
);
235 * Free the dio_request when the last reference is dropped and
236 * ensure zio_interpret is called only once with the correct zio
239 zio_t
*zio
= dr
->dr_zio
;
240 int error
= dr
->dr_error
;
242 vdev_disk_dio_free(dr
);
245 zio
->io_error
= error
;
246 ASSERT3S(zio
->io_error
, >=, 0);
248 vdev_disk_error(zio
);
256 BIO_END_IO_PROTO(vdev_disk_physio_completion
, bio
, size
, error
)
258 dio_request_t
*dr
= bio
->bi_private
;
261 /* Fatal error but print some useful debugging before asserting */
263 PANIC("dr == NULL, bio->bi_private == NULL\n"
264 "bi_next: %p, bi_flags: %lx, bi_rw: %lu, bi_vcnt: %d\n"
265 "bi_idx: %d, bi_size: %d, bi_end_io: %p, bi_cnt: %d\n",
266 bio
->bi_next
, bio
->bi_flags
, bio
->bi_rw
, bio
->bi_vcnt
,
267 bio
->bi_idx
, bio
->bi_size
, bio
->bi_end_io
,
268 atomic_read(&bio
->bi_cnt
));
270 #ifndef HAVE_2ARGS_BIO_END_IO_T
273 #endif /* HAVE_2ARGS_BIO_END_IO_T */
275 if (error
== 0 && !test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
278 if (dr
->dr_error
== 0)
279 dr
->dr_error
= -error
;
281 /* Drop reference aquired by __vdev_disk_physio */
282 rc
= vdev_disk_dio_put(dr
);
284 /* Wake up synchronous waiter this is the last outstanding bio */
285 if ((rc
== 1) && (dr
->dr_rw
& (1 << DIO_RW_SYNCIO
)))
286 complete(&dr
->dr_comp
);
288 BIO_END_IO_RETURN(0);
291 static inline unsigned long
292 bio_nr_pages(void *bio_ptr
, unsigned int bio_size
)
294 return ((((unsigned long)bio_ptr
+ bio_size
+ PAGE_SIZE
- 1) >>
295 PAGE_SHIFT
) - ((unsigned long)bio_ptr
>> PAGE_SHIFT
));
299 bio_map(struct bio
*bio
, void *bio_ptr
, unsigned int bio_size
)
301 unsigned int offset
, size
, i
;
304 offset
= offset_in_page(bio_ptr
);
305 for (i
= 0; i
< bio
->bi_max_vecs
; i
++) {
306 size
= PAGE_SIZE
- offset
;
314 if (kmem_virt(bio_ptr
))
315 page
= vmalloc_to_page(bio_ptr
);
317 page
= virt_to_page(bio_ptr
);
319 if (bio_add_page(bio
, page
, size
, offset
) != size
)
331 __vdev_disk_physio(struct block_device
*bdev
, zio_t
*zio
, caddr_t kbuf_ptr
,
332 size_t kbuf_size
, uint64_t kbuf_offset
, int flags
)
337 int bio_size
, bio_count
= 16;
338 int i
= 0, error
= 0, block_size
;
341 dr
= vdev_disk_dio_alloc(bio_count
);
345 if (zio
&& !(zio
->io_flags
& (ZIO_FLAG_IO_RETRY
| ZIO_FLAG_TRYHARD
)))
346 bio_set_flags_failfast(bdev
, &flags
);
350 block_size
= vdev_bdev_block_size(bdev
);
353 * When the IO size exceeds the maximum bio size for the request
354 * queue we are forced to break the IO in multiple bio's and wait
355 * for them all to complete. Ideally, all pool users will set
356 * their volume block size to match the maximum request size and
357 * the common case will be one bio per vdev IO request.
360 bio_offset
= kbuf_offset
;
361 bio_size
= kbuf_size
;
362 for (i
= 0; i
<= dr
->dr_bio_count
; i
++) {
364 /* Finished constructing bio's for given buffer */
369 * By default only 'bio_count' bio's per dio are allowed.
370 * However, if we find ourselves in a situation where more
371 * are needed we allocate a larger dio and warn the user.
373 if (dr
->dr_bio_count
== i
) {
374 vdev_disk_dio_free(dr
);
376 printk("WARNING: Resized bio's/dio to %d\n",bio_count
);
380 dr
->dr_bio
[i
] = bio_alloc(GFP_NOIO
,
381 bio_nr_pages(bio_ptr
, bio_size
));
382 if (dr
->dr_bio
[i
] == NULL
) {
383 vdev_disk_dio_free(dr
);
387 /* Matching put called by vdev_disk_physio_completion */
388 vdev_disk_dio_get(dr
);
390 dr
->dr_bio
[i
]->bi_bdev
= bdev
;
391 dr
->dr_bio
[i
]->bi_sector
= bio_offset
/ block_size
;
392 dr
->dr_bio
[i
]->bi_rw
= dr
->dr_rw
;
393 dr
->dr_bio
[i
]->bi_end_io
= vdev_disk_physio_completion
;
394 dr
->dr_bio
[i
]->bi_private
= dr
;
396 /* Remaining size is returned to become the new size */
397 bio_size
= bio_map(dr
->dr_bio
[i
], bio_ptr
, bio_size
);
399 /* Advance in buffer and construct another bio if needed */
400 bio_ptr
+= dr
->dr_bio
[i
]->bi_size
;
401 bio_offset
+= dr
->dr_bio
[i
]->bi_size
;
404 /* Extra reference to protect dio_request during submit_bio */
405 vdev_disk_dio_get(dr
);
407 /* Submit all bio's associated with this dio */
408 for (i
= 0; i
< dr
->dr_bio_count
; i
++)
410 submit_bio(dr
->dr_rw
, dr
->dr_bio
[i
]);
413 * On synchronous blocking requests we wait for all bio the completion
414 * callbacks to run. We will be woken when the last callback runs
415 * for this dio. We are responsible for putting the last dio_request
416 * reference will in turn put back the last bio references. The
417 * only synchronous consumer is vdev_disk_read_rootlabel() all other
418 * IO originating from vdev_disk_io_start() is asynchronous.
420 if (dr
->dr_rw
& (1 << DIO_RW_SYNCIO
)) {
421 wait_for_completion(&dr
->dr_comp
);
422 error
= dr
->dr_error
;
423 ASSERT3S(atomic_read(&dr
->dr_ref
), ==, 1);
426 (void)vdev_disk_dio_put(dr
);
432 vdev_disk_physio(struct block_device
*bdev
, caddr_t kbuf
,
433 size_t size
, uint64_t offset
, int flags
)
435 bio_set_flags_failfast(bdev
, &flags
);
436 return __vdev_disk_physio(bdev
, NULL
, kbuf
, size
, offset
, flags
);
439 /* 2.6.24 API change */
440 #ifdef HAVE_BIO_EMPTY_BARRIER
441 BIO_END_IO_PROTO(vdev_disk_io_flush_completion
, bio
, size
, rc
)
443 zio_t
*zio
= bio
->bi_private
;
446 if (rc
&& (rc
== -EOPNOTSUPP
))
447 zio
->io_vd
->vdev_nowritecache
= B_TRUE
;
450 ASSERT3S(zio
->io_error
, >=, 0);
452 vdev_disk_error(zio
);
455 BIO_END_IO_RETURN(0);
459 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
461 struct request_queue
*q
;
464 q
= bdev_get_queue(bdev
);
468 bio
= bio_alloc(GFP_KERNEL
, 0);
472 bio
->bi_end_io
= vdev_disk_io_flush_completion
;
473 bio
->bi_private
= zio
;
475 submit_bio(WRITE_BARRIER
, bio
);
481 vdev_disk_io_flush(struct block_device
*bdev
, zio_t
*zio
)
485 #endif /* HAVE_BIO_EMPTY_BARRIER */
488 vdev_disk_io_start(zio_t
*zio
)
490 vdev_t
*v
= zio
->io_vd
;
491 vdev_disk_t
*vd
= v
->vdev_tsd
;
494 switch (zio
->io_type
) {
497 if (!vdev_readable(v
)) {
498 zio
->io_error
= ENXIO
;
499 return ZIO_PIPELINE_CONTINUE
;
502 switch (zio
->io_cmd
) {
503 case DKIOCFLUSHWRITECACHE
:
505 if (zfs_nocacheflush
)
508 if (v
->vdev_nowritecache
) {
509 zio
->io_error
= ENOTSUP
;
513 error
= vdev_disk_io_flush(vd
->vd_bdev
, zio
);
515 return ZIO_PIPELINE_STOP
;
517 zio
->io_error
= error
;
518 if (error
== ENOTSUP
)
519 v
->vdev_nowritecache
= B_TRUE
;
524 zio
->io_error
= ENOTSUP
;
527 return ZIO_PIPELINE_CONTINUE
;
538 zio
->io_error
= ENOTSUP
;
539 return ZIO_PIPELINE_CONTINUE
;
542 error
= __vdev_disk_physio(vd
->vd_bdev
, zio
, zio
->io_data
,
543 zio
->io_size
, zio
->io_offset
, flags
);
545 zio
->io_error
= error
;
546 return ZIO_PIPELINE_CONTINUE
;
549 return ZIO_PIPELINE_STOP
;
553 vdev_disk_io_done(zio_t
*zio
)
556 * If the device returned EIO, we revalidate the media. If it is
557 * determined the media has changed this triggers the asynchronous
558 * removal of the device from the configuration.
560 if (zio
->io_error
== EIO
) {
561 vdev_t
*v
= zio
->io_vd
;
562 vdev_disk_t
*vd
= v
->vdev_tsd
;
564 if (check_disk_change(vd
->vd_bdev
)) {
565 vdev_bdev_invalidate(vd
->vd_bdev
);
566 v
->vdev_remove_wanted
= B_TRUE
;
567 spa_async_request(zio
->io_spa
, SPA_ASYNC_REMOVE
);
573 vdev_disk_hold(vdev_t
*vd
)
575 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
577 /* We must have a pathname, and it must be absolute. */
578 if (vd
->vdev_path
== NULL
|| vd
->vdev_path
[0] != '/')
582 * Only prefetch path and devid info if the device has
585 if (vd
->vdev_tsd
!= NULL
)
588 /* XXX: Implement me as a vnode lookup for the device */
589 vd
->vdev_name_vp
= NULL
;
590 vd
->vdev_devid_vp
= NULL
;
594 vdev_disk_rele(vdev_t
*vd
)
596 ASSERT(spa_config_held(vd
->vdev_spa
, SCL_STATE
, RW_WRITER
));
598 /* XXX: Implement me as a vnode rele for the device */
601 vdev_ops_t vdev_disk_ops
= {
610 VDEV_TYPE_DISK
, /* name of this vdev type */
611 B_TRUE
/* leaf vdev */
615 * Given the root disk device devid or pathname, read the label from
616 * the device, and construct a configuration nvlist.
619 vdev_disk_read_rootlabel(char *devpath
, char *devid
, nvlist_t
**config
)
621 struct block_device
*bdev
;
626 bdev
= vdev_bdev_open(devpath
, vdev_bdev_mode(FREAD
), NULL
);
628 return -PTR_ERR(bdev
);
630 s
= bdev_capacity(bdev
) * vdev_bdev_block_size(bdev
);
632 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));
636 size
= P2ALIGN_TYPED(s
, sizeof(vdev_label_t
), uint64_t);
637 label
= vmem_alloc(sizeof(vdev_label_t
), KM_SLEEP
);
639 for (i
= 0; i
< VDEV_LABELS
; i
++) {
640 uint64_t offset
, state
, txg
= 0;
642 /* read vdev label */
643 offset
= vdev_label_offset(size
, i
, 0);
644 if (vdev_disk_physio(bdev
, (caddr_t
)label
,
645 VDEV_SKIP_SIZE
+ VDEV_PHYS_SIZE
, offset
, READ_SYNC
) != 0)
648 if (nvlist_unpack(label
->vl_vdev_phys
.vp_nvlist
,
649 sizeof (label
->vl_vdev_phys
.vp_nvlist
), config
, 0) != 0) {
654 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_STATE
,
655 &state
) != 0 || state
>= POOL_STATE_DESTROYED
) {
656 nvlist_free(*config
);
661 if (nvlist_lookup_uint64(*config
, ZPOOL_CONFIG_POOL_TXG
,
662 &txg
) != 0 || txg
== 0) {
663 nvlist_free(*config
);
671 vmem_free(label
, sizeof(vdev_label_t
));
672 vdev_bdev_close(bdev
, vdev_bdev_mode(FREAD
));