4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2012 by Delphix. All rights reserved.
30 #include <sys/zfs_context.h>
31 #include <sys/vdev_impl.h>
36 * These tunables are for performance analysis.
39 * zfs_vdev_max_pending is the maximum number of i/os concurrently
40 * pending to each device. zfs_vdev_min_pending is the initial number
41 * of i/os pending to each device (before it starts ramping up to
44 int zfs_vdev_max_pending
= 10;
45 int zfs_vdev_min_pending
= 4;
48 * The deadlines are grouped into buckets based on zfs_vdev_time_shift:
49 * deadline = pri + gethrtime() >> time_shift)
51 int zfs_vdev_time_shift
= 29; /* each bucket is 0.537 seconds */
53 /* exponential I/O issue ramp-up rate */
54 int zfs_vdev_ramp_rate
= 2;
57 * To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
58 * For read I/Os, we also aggregate across small adjacency gaps; for writes
59 * we include spans of optional I/Os to aid aggregation at the disk even when
60 * they aren't able to help us aggregate at this level.
62 int zfs_vdev_aggregation_limit
= SPA_MAXBLOCKSIZE
;
63 int zfs_vdev_read_gap_limit
= 32 << 10;
64 int zfs_vdev_write_gap_limit
= 4 << 10;
67 * Virtual device vector for disk I/O scheduling.
70 vdev_queue_deadline_compare(const void *x1
, const void *x2
)
75 if (z1
->io_deadline
< z2
->io_deadline
)
77 if (z1
->io_deadline
> z2
->io_deadline
)
80 if (z1
->io_offset
< z2
->io_offset
)
82 if (z1
->io_offset
> z2
->io_offset
)
94 vdev_queue_offset_compare(const void *x1
, const void *x2
)
99 if (z1
->io_offset
< z2
->io_offset
)
101 if (z1
->io_offset
> z2
->io_offset
)
113 vdev_queue_init(vdev_t
*vd
)
115 vdev_queue_t
*vq
= &vd
->vdev_queue
;
118 mutex_init(&vq
->vq_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
120 avl_create(&vq
->vq_deadline_tree
, vdev_queue_deadline_compare
,
121 sizeof (zio_t
), offsetof(struct zio
, io_deadline_node
));
123 avl_create(&vq
->vq_read_tree
, vdev_queue_offset_compare
,
124 sizeof (zio_t
), offsetof(struct zio
, io_offset_node
));
126 avl_create(&vq
->vq_write_tree
, vdev_queue_offset_compare
,
127 sizeof (zio_t
), offsetof(struct zio
, io_offset_node
));
129 avl_create(&vq
->vq_pending_tree
, vdev_queue_offset_compare
,
130 sizeof (zio_t
), offsetof(struct zio
, io_offset_node
));
133 * A list of buffers which can be used for aggregate I/O, this
134 * avoids the need to allocate them on demand when memory is low.
136 list_create(&vq
->vq_io_list
, sizeof (vdev_io_t
),
137 offsetof(vdev_io_t
, vi_node
));
139 for (i
= 0; i
< zfs_vdev_max_pending
; i
++)
140 list_insert_tail(&vq
->vq_io_list
, zio_vdev_alloc());
144 vdev_queue_fini(vdev_t
*vd
)
146 vdev_queue_t
*vq
= &vd
->vdev_queue
;
149 avl_destroy(&vq
->vq_deadline_tree
);
150 avl_destroy(&vq
->vq_read_tree
);
151 avl_destroy(&vq
->vq_write_tree
);
152 avl_destroy(&vq
->vq_pending_tree
);
154 while ((vi
= list_head(&vq
->vq_io_list
)) != NULL
) {
155 list_remove(&vq
->vq_io_list
, vi
);
159 list_destroy(&vq
->vq_io_list
);
161 mutex_destroy(&vq
->vq_lock
);
165 vdev_queue_io_add(vdev_queue_t
*vq
, zio_t
*zio
)
167 avl_add(&vq
->vq_deadline_tree
, zio
);
168 avl_add(zio
->io_vdev_tree
, zio
);
172 vdev_queue_io_remove(vdev_queue_t
*vq
, zio_t
*zio
)
174 avl_remove(&vq
->vq_deadline_tree
, zio
);
175 avl_remove(zio
->io_vdev_tree
, zio
);
179 vdev_queue_agg_io_done(zio_t
*aio
)
181 vdev_queue_t
*vq
= &aio
->io_vd
->vdev_queue
;
182 vdev_io_t
*vi
= aio
->io_data
;
185 while ((pio
= zio_walk_parents(aio
)) != NULL
)
186 if (aio
->io_type
== ZIO_TYPE_READ
)
187 bcopy((char *)aio
->io_data
+ (pio
->io_offset
-
188 aio
->io_offset
), pio
->io_data
, pio
->io_size
);
190 mutex_enter(&vq
->vq_lock
);
191 list_insert_tail(&vq
->vq_io_list
, vi
);
192 mutex_exit(&vq
->vq_lock
);
196 * Compute the range spanned by two i/os, which is the endpoint of the last
197 * (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
198 * Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
199 * thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
201 #define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
202 #define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
205 vdev_queue_io_to_issue(vdev_queue_t
*vq
, uint64_t pending_limit
)
207 zio_t
*fio
, *lio
, *aio
, *dio
, *nio
, *mio
;
211 uint64_t maxspan
= MIN(zfs_vdev_aggregation_limit
, SPA_MAXBLOCKSIZE
);
216 ASSERT(MUTEX_HELD(&vq
->vq_lock
));
218 if (avl_numnodes(&vq
->vq_pending_tree
) >= pending_limit
||
219 avl_numnodes(&vq
->vq_deadline_tree
) == 0)
222 fio
= lio
= avl_first(&vq
->vq_deadline_tree
);
224 t
= fio
->io_vdev_tree
;
225 flags
= fio
->io_flags
& ZIO_FLAG_AGG_INHERIT
;
226 maxgap
= (t
== &vq
->vq_read_tree
) ? zfs_vdev_read_gap_limit
: 0;
228 vi
= list_head(&vq
->vq_io_list
);
230 vi
= zio_vdev_alloc();
231 list_insert_head(&vq
->vq_io_list
, vi
);
234 if (!(flags
& ZIO_FLAG_DONT_AGGREGATE
)) {
236 * We can aggregate I/Os that are sufficiently adjacent and of
237 * the same flavor, as expressed by the AGG_INHERIT flags.
238 * The latter requirement is necessary so that certain
239 * attributes of the I/O, such as whether it's a normal I/O
240 * or a scrub/resilver, can be preserved in the aggregate.
241 * We can include optional I/Os, but don't allow them
242 * to begin a range as they add no benefit in that situation.
246 * We keep track of the last non-optional I/O.
248 mio
= (fio
->io_flags
& ZIO_FLAG_OPTIONAL
) ? NULL
: fio
;
251 * Walk backwards through sufficiently contiguous I/Os
252 * recording the last non-option I/O.
254 while ((dio
= AVL_PREV(t
, fio
)) != NULL
&&
255 (dio
->io_flags
& ZIO_FLAG_AGG_INHERIT
) == flags
&&
256 IO_SPAN(dio
, lio
) <= maxspan
&&
257 IO_GAP(dio
, fio
) <= maxgap
) {
259 if (mio
== NULL
&& !(fio
->io_flags
& ZIO_FLAG_OPTIONAL
))
264 * Skip any initial optional I/Os.
266 while ((fio
->io_flags
& ZIO_FLAG_OPTIONAL
) && fio
!= lio
) {
267 fio
= AVL_NEXT(t
, fio
);
272 * Walk forward through sufficiently contiguous I/Os.
274 while ((dio
= AVL_NEXT(t
, lio
)) != NULL
&&
275 (dio
->io_flags
& ZIO_FLAG_AGG_INHERIT
) == flags
&&
276 IO_SPAN(fio
, dio
) <= maxspan
&&
277 IO_GAP(lio
, dio
) <= maxgap
) {
279 if (!(lio
->io_flags
& ZIO_FLAG_OPTIONAL
))
284 * Now that we've established the range of the I/O aggregation
285 * we must decide what to do with trailing optional I/Os.
286 * For reads, there's nothing to do. While we are unable to
287 * aggregate further, it's possible that a trailing optional
288 * I/O would allow the underlying device to aggregate with
289 * subsequent I/Os. We must therefore determine if the next
290 * non-optional I/O is close enough to make aggregation
294 if (t
!= &vq
->vq_read_tree
&& mio
!= NULL
) {
296 while ((dio
= AVL_NEXT(t
, nio
)) != NULL
&&
297 IO_GAP(nio
, dio
) == 0 &&
298 IO_GAP(mio
, dio
) <= zfs_vdev_write_gap_limit
) {
300 if (!(nio
->io_flags
& ZIO_FLAG_OPTIONAL
)) {
308 /* This may be a no-op. */
309 VERIFY((dio
= AVL_NEXT(t
, lio
)) != NULL
);
310 dio
->io_flags
&= ~ZIO_FLAG_OPTIONAL
;
312 while (lio
!= mio
&& lio
!= fio
) {
313 ASSERT(lio
->io_flags
& ZIO_FLAG_OPTIONAL
);
314 lio
= AVL_PREV(t
, lio
);
321 uint64_t size
= IO_SPAN(fio
, lio
);
322 ASSERT(size
<= maxspan
);
325 aio
= zio_vdev_delegated_io(fio
->io_vd
, fio
->io_offset
,
326 vi
, size
, fio
->io_type
, ZIO_PRIORITY_AGG
,
327 flags
| ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_DONT_QUEUE
,
328 vdev_queue_agg_io_done
, NULL
);
329 aio
->io_timestamp
= fio
->io_timestamp
;
334 nio
= AVL_NEXT(t
, dio
);
335 ASSERT(dio
->io_type
== aio
->io_type
);
336 ASSERT(dio
->io_vdev_tree
== t
);
338 if (dio
->io_flags
& ZIO_FLAG_NODATA
) {
339 ASSERT(dio
->io_type
== ZIO_TYPE_WRITE
);
340 bzero((char *)aio
->io_data
+ (dio
->io_offset
-
341 aio
->io_offset
), dio
->io_size
);
342 } else if (dio
->io_type
== ZIO_TYPE_WRITE
) {
343 bcopy(dio
->io_data
, (char *)aio
->io_data
+
344 (dio
->io_offset
- aio
->io_offset
),
348 zio_add_child(dio
, aio
);
349 vdev_queue_io_remove(vq
, dio
);
350 zio_vdev_io_bypass(dio
);
352 } while (dio
!= lio
);
354 avl_add(&vq
->vq_pending_tree
, aio
);
355 list_remove(&vq
->vq_io_list
, vi
);
360 ASSERT(fio
->io_vdev_tree
== t
);
361 vdev_queue_io_remove(vq
, fio
);
364 * If the I/O is or was optional and therefore has no data, we need to
365 * simply discard it. We need to drop the vdev queue's lock to avoid a
366 * deadlock that we could encounter since this I/O will complete
369 if (fio
->io_flags
& ZIO_FLAG_NODATA
) {
370 mutex_exit(&vq
->vq_lock
);
371 zio_vdev_io_bypass(fio
);
373 mutex_enter(&vq
->vq_lock
);
377 avl_add(&vq
->vq_pending_tree
, fio
);
383 vdev_queue_io(zio_t
*zio
)
385 vdev_queue_t
*vq
= &zio
->io_vd
->vdev_queue
;
388 ASSERT(zio
->io_type
== ZIO_TYPE_READ
|| zio
->io_type
== ZIO_TYPE_WRITE
);
390 if (zio
->io_flags
& ZIO_FLAG_DONT_QUEUE
)
393 zio
->io_flags
|= ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_DONT_QUEUE
;
395 if (zio
->io_type
== ZIO_TYPE_READ
)
396 zio
->io_vdev_tree
= &vq
->vq_read_tree
;
398 zio
->io_vdev_tree
= &vq
->vq_write_tree
;
400 mutex_enter(&vq
->vq_lock
);
402 zio
->io_timestamp
= gethrtime();
403 zio
->io_deadline
= (zio
->io_timestamp
>> zfs_vdev_time_shift
) +
406 vdev_queue_io_add(vq
, zio
);
408 nio
= vdev_queue_io_to_issue(vq
, zfs_vdev_min_pending
);
410 mutex_exit(&vq
->vq_lock
);
415 if (nio
->io_done
== vdev_queue_agg_io_done
) {
424 vdev_queue_io_done(zio_t
*zio
)
426 vdev_queue_t
*vq
= &zio
->io_vd
->vdev_queue
;
429 if (zio_injection_enabled
)
430 delay(SEC_TO_TICK(zio_handle_io_delay(zio
)));
432 mutex_enter(&vq
->vq_lock
);
434 avl_remove(&vq
->vq_pending_tree
, zio
);
436 zio
->io_delta
= gethrtime() - zio
->io_timestamp
;
437 vq
->vq_io_complete_ts
= gethrtime();
438 vq
->vq_io_delta_ts
= vq
->vq_io_complete_ts
- zio
->io_timestamp
;
440 for (i
= 0; i
< zfs_vdev_ramp_rate
; i
++) {
441 zio_t
*nio
= vdev_queue_io_to_issue(vq
, zfs_vdev_max_pending
);
444 mutex_exit(&vq
->vq_lock
);
445 if (nio
->io_done
== vdev_queue_agg_io_done
) {
448 zio_vdev_io_reissue(nio
);
451 mutex_enter(&vq
->vq_lock
);
454 mutex_exit(&vq
->vq_lock
);
457 #if defined(_KERNEL) && defined(HAVE_SPL)
458 module_param(zfs_vdev_max_pending
, int, 0644);
459 MODULE_PARM_DESC(zfs_vdev_max_pending
, "Max pending per-vdev I/Os");
461 module_param(zfs_vdev_min_pending
, int, 0644);
462 MODULE_PARM_DESC(zfs_vdev_min_pending
, "Min pending per-vdev I/Os");
464 module_param(zfs_vdev_aggregation_limit
, int, 0644);
465 MODULE_PARM_DESC(zfs_vdev_aggregation_limit
, "Max vdev I/O aggregation size");
467 module_param(zfs_vdev_time_shift
, int, 0644);
468 MODULE_PARM_DESC(zfs_vdev_time_shift
, "Deadline time shift for vdev I/O");
470 module_param(zfs_vdev_ramp_rate
, int, 0644);
471 MODULE_PARM_DESC(zfs_vdev_ramp_rate
, "Exponential I/O issue ramp-up rate");
473 module_param(zfs_vdev_read_gap_limit
, int, 0644);
474 MODULE_PARM_DESC(zfs_vdev_read_gap_limit
, "Aggregate read I/O over gap");
476 module_param(zfs_vdev_write_gap_limit
, int, 0644);
477 MODULE_PARM_DESC(zfs_vdev_write_gap_limit
, "Aggregate write I/O over gap");