4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2013 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
31 #include <sys/vdev_impl.h>
33 #include <sys/kstat.h>
36 * Virtual device read-ahead caching.
38 * This file implements a simple LRU read-ahead cache. When the DMU reads
39 * a given block, it will often want other, nearby blocks soon thereafter.
40 * We take advantage of this by reading a larger disk region and caching
41 * the result. In the best case, this can turn 128 back-to-back 512-byte
42 * reads into a single 64k read followed by 127 cache hits; this reduces
43 * latency dramatically. In the worst case, it can turn an isolated 512-byte
44 * read into a 64k read, which doesn't affect latency all that much but is
45 * terribly wasteful of bandwidth. A more intelligent version of the cache
46 * could keep track of access patterns and not do read-ahead unless it sees
47 * at least two temporally close I/Os to the same region. Currently, only
48 * metadata I/O is inflated. A futher enhancement could take advantage of
49 * more semantic information about the I/O. And it could use something
50 * faster than an AVL tree; that was chosen solely for convenience.
52 * There are five cache operations: allocate, fill, read, write, evict.
54 * (1) Allocate. This reserves a cache entry for the specified region.
55 * We separate the allocate and fill operations so that multiple threads
56 * don't generate I/O for the same cache miss.
58 * (2) Fill. When the I/O for a cache miss completes, the fill routine
59 * places the data in the previously allocated cache entry.
61 * (3) Read. Read data from the cache.
63 * (4) Write. Update cache contents after write completion.
65 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
66 * if the total cache size exceeds zfs_vdev_cache_size.
70 * These tunables are for performance analysis.
73 * All i/os smaller than zfs_vdev_cache_max will be turned into
74 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
75 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
78 * TODO: Note that with the current ZFS code, it turns out that the
79 * vdev cache is not helpful, and in some cases actually harmful. It
80 * is better if we disable this. Once some time has passed, we should
81 * actually remove this to simplify the code. For now we just disable
82 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
83 * has made these same changes.
85 int zfs_vdev_cache_max
= 1<<14; /* 16KB */
86 int zfs_vdev_cache_size
= 0;
87 int zfs_vdev_cache_bshift
= 16;
89 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
91 kstat_t
*vdc_ksp
= NULL
;
93 typedef struct vdc_stats
{
94 kstat_named_t vdc_stat_delegations
;
95 kstat_named_t vdc_stat_hits
;
96 kstat_named_t vdc_stat_misses
;
99 static vdc_stats_t vdc_stats
= {
100 { "delegations", KSTAT_DATA_UINT64
},
101 { "hits", KSTAT_DATA_UINT64
},
102 { "misses", KSTAT_DATA_UINT64
}
105 #define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1);
108 vdev_cache_offset_compare(const void *a1
, const void *a2
)
110 const vdev_cache_entry_t
*ve1
= a1
;
111 const vdev_cache_entry_t
*ve2
= a2
;
113 if (ve1
->ve_offset
< ve2
->ve_offset
)
115 if (ve1
->ve_offset
> ve2
->ve_offset
)
121 vdev_cache_lastused_compare(const void *a1
, const void *a2
)
123 const vdev_cache_entry_t
*ve1
= a1
;
124 const vdev_cache_entry_t
*ve2
= a2
;
126 if (ddi_time_before(ve1
->ve_lastused
, ve2
->ve_lastused
))
128 if (ddi_time_after(ve1
->ve_lastused
, ve2
->ve_lastused
))
132 * Among equally old entries, sort by offset to ensure uniqueness.
134 return (vdev_cache_offset_compare(a1
, a2
));
138 * Evict the specified entry from the cache.
141 vdev_cache_evict(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
)
143 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
144 ASSERT(ve
->ve_fill_io
== NULL
);
145 ASSERT(ve
->ve_data
!= NULL
);
147 avl_remove(&vc
->vc_lastused_tree
, ve
);
148 avl_remove(&vc
->vc_offset_tree
, ve
);
149 zio_buf_free(ve
->ve_data
, VCBS
);
150 kmem_free(ve
, sizeof (vdev_cache_entry_t
));
154 * Allocate an entry in the cache. At the point we don't have the data,
155 * we're just creating a placeholder so that multiple threads don't all
156 * go off and read the same blocks.
158 static vdev_cache_entry_t
*
159 vdev_cache_allocate(zio_t
*zio
)
161 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
162 uint64_t offset
= P2ALIGN(zio
->io_offset
, VCBS
);
163 vdev_cache_entry_t
*ve
;
165 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
167 if (zfs_vdev_cache_size
== 0)
171 * If adding a new entry would exceed the cache size,
172 * evict the oldest entry (LRU).
174 if ((avl_numnodes(&vc
->vc_lastused_tree
) << zfs_vdev_cache_bshift
) >
175 zfs_vdev_cache_size
) {
176 ve
= avl_first(&vc
->vc_lastused_tree
);
177 if (ve
->ve_fill_io
!= NULL
)
179 ASSERT(ve
->ve_hits
!= 0);
180 vdev_cache_evict(vc
, ve
);
183 ve
= kmem_zalloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
184 ve
->ve_offset
= offset
;
185 ve
->ve_lastused
= ddi_get_lbolt();
186 ve
->ve_data
= zio_buf_alloc(VCBS
);
188 avl_add(&vc
->vc_offset_tree
, ve
);
189 avl_add(&vc
->vc_lastused_tree
, ve
);
195 vdev_cache_hit(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
, zio_t
*zio
)
197 uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);
199 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
200 ASSERT(ve
->ve_fill_io
== NULL
);
202 if (ve
->ve_lastused
!= ddi_get_lbolt()) {
203 avl_remove(&vc
->vc_lastused_tree
, ve
);
204 ve
->ve_lastused
= ddi_get_lbolt();
205 avl_add(&vc
->vc_lastused_tree
, ve
);
209 bcopy(ve
->ve_data
+ cache_phase
, zio
->io_data
, zio
->io_size
);
213 * Fill a previously allocated cache entry with data.
216 vdev_cache_fill(zio_t
*fio
)
218 vdev_t
*vd
= fio
->io_vd
;
219 vdev_cache_t
*vc
= &vd
->vdev_cache
;
220 vdev_cache_entry_t
*ve
= fio
->io_private
;
223 ASSERT(fio
->io_size
== VCBS
);
226 * Add data to the cache.
228 mutex_enter(&vc
->vc_lock
);
230 ASSERT(ve
->ve_fill_io
== fio
);
231 ASSERT(ve
->ve_offset
== fio
->io_offset
);
232 ASSERT(ve
->ve_data
== fio
->io_data
);
234 ve
->ve_fill_io
= NULL
;
237 * Even if this cache line was invalidated by a missed write update,
238 * any reads that were queued up before the missed update are still
239 * valid, so we can satisfy them from this line before we evict it.
241 while ((pio
= zio_walk_parents(fio
)) != NULL
)
242 vdev_cache_hit(vc
, ve
, pio
);
244 if (fio
->io_error
|| ve
->ve_missed_update
)
245 vdev_cache_evict(vc
, ve
);
247 mutex_exit(&vc
->vc_lock
);
251 * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss.
254 vdev_cache_read(zio_t
*zio
)
256 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
257 vdev_cache_entry_t
*ve
, *ve_search
;
258 uint64_t cache_offset
= P2ALIGN(zio
->io_offset
, VCBS
);
260 ASSERTV(uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
));
262 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
264 if (zio
->io_flags
& ZIO_FLAG_DONT_CACHE
)
267 if (zio
->io_size
> zfs_vdev_cache_max
)
271 * If the I/O straddles two or more cache blocks, don't cache it.
273 if (P2BOUNDARY(zio
->io_offset
, zio
->io_size
, VCBS
))
276 ASSERT(cache_phase
+ zio
->io_size
<= VCBS
);
278 mutex_enter(&vc
->vc_lock
);
280 ve_search
= kmem_alloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
281 ve_search
->ve_offset
= cache_offset
;
282 ve
= avl_find(&vc
->vc_offset_tree
, ve_search
, NULL
);
283 kmem_free(ve_search
, sizeof (vdev_cache_entry_t
));
286 if (ve
->ve_missed_update
) {
287 mutex_exit(&vc
->vc_lock
);
291 if ((fio
= ve
->ve_fill_io
) != NULL
) {
292 zio_vdev_io_bypass(zio
);
293 zio_add_child(zio
, fio
);
294 mutex_exit(&vc
->vc_lock
);
295 VDCSTAT_BUMP(vdc_stat_delegations
);
299 vdev_cache_hit(vc
, ve
, zio
);
300 zio_vdev_io_bypass(zio
);
302 mutex_exit(&vc
->vc_lock
);
303 VDCSTAT_BUMP(vdc_stat_hits
);
307 ve
= vdev_cache_allocate(zio
);
310 mutex_exit(&vc
->vc_lock
);
314 fio
= zio_vdev_delegated_io(zio
->io_vd
, cache_offset
,
315 ve
->ve_data
, VCBS
, ZIO_TYPE_READ
, ZIO_PRIORITY_NOW
,
316 ZIO_FLAG_DONT_CACHE
, vdev_cache_fill
, ve
);
318 ve
->ve_fill_io
= fio
;
319 zio_vdev_io_bypass(zio
);
320 zio_add_child(zio
, fio
);
322 mutex_exit(&vc
->vc_lock
);
324 VDCSTAT_BUMP(vdc_stat_misses
);
330 * Update cache contents upon write completion.
333 vdev_cache_write(zio_t
*zio
)
335 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
336 vdev_cache_entry_t
*ve
, ve_search
;
337 uint64_t io_start
= zio
->io_offset
;
338 uint64_t io_end
= io_start
+ zio
->io_size
;
339 uint64_t min_offset
= P2ALIGN(io_start
, VCBS
);
340 uint64_t max_offset
= P2ROUNDUP(io_end
, VCBS
);
343 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
345 mutex_enter(&vc
->vc_lock
);
347 ve_search
.ve_offset
= min_offset
;
348 ve
= avl_find(&vc
->vc_offset_tree
, &ve_search
, &where
);
351 ve
= avl_nearest(&vc
->vc_offset_tree
, where
, AVL_AFTER
);
353 while (ve
!= NULL
&& ve
->ve_offset
< max_offset
) {
354 uint64_t start
= MAX(ve
->ve_offset
, io_start
);
355 uint64_t end
= MIN(ve
->ve_offset
+ VCBS
, io_end
);
357 if (ve
->ve_fill_io
!= NULL
) {
358 ve
->ve_missed_update
= 1;
360 bcopy((char *)zio
->io_data
+ start
- io_start
,
361 ve
->ve_data
+ start
- ve
->ve_offset
, end
- start
);
363 ve
= AVL_NEXT(&vc
->vc_offset_tree
, ve
);
365 mutex_exit(&vc
->vc_lock
);
369 vdev_cache_purge(vdev_t
*vd
)
371 vdev_cache_t
*vc
= &vd
->vdev_cache
;
372 vdev_cache_entry_t
*ve
;
374 mutex_enter(&vc
->vc_lock
);
375 while ((ve
= avl_first(&vc
->vc_offset_tree
)) != NULL
)
376 vdev_cache_evict(vc
, ve
);
377 mutex_exit(&vc
->vc_lock
);
381 vdev_cache_init(vdev_t
*vd
)
383 vdev_cache_t
*vc
= &vd
->vdev_cache
;
385 mutex_init(&vc
->vc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
387 avl_create(&vc
->vc_offset_tree
, vdev_cache_offset_compare
,
388 sizeof (vdev_cache_entry_t
),
389 offsetof(struct vdev_cache_entry
, ve_offset_node
));
391 avl_create(&vc
->vc_lastused_tree
, vdev_cache_lastused_compare
,
392 sizeof (vdev_cache_entry_t
),
393 offsetof(struct vdev_cache_entry
, ve_lastused_node
));
397 vdev_cache_fini(vdev_t
*vd
)
399 vdev_cache_t
*vc
= &vd
->vdev_cache
;
401 vdev_cache_purge(vd
);
403 avl_destroy(&vc
->vc_offset_tree
);
404 avl_destroy(&vc
->vc_lastused_tree
);
406 mutex_destroy(&vc
->vc_lock
);
410 vdev_cache_stat_init(void)
412 vdc_ksp
= kstat_create("zfs", 0, "vdev_cache_stats", "misc",
413 KSTAT_TYPE_NAMED
, sizeof (vdc_stats
) / sizeof (kstat_named_t
),
415 if (vdc_ksp
!= NULL
) {
416 vdc_ksp
->ks_data
= &vdc_stats
;
417 kstat_install(vdc_ksp
);
422 vdev_cache_stat_fini(void)
424 if (vdc_ksp
!= NULL
) {
425 kstat_delete(vdc_ksp
);
430 #if defined(_KERNEL) && defined(HAVE_SPL)
431 module_param(zfs_vdev_cache_max
, int, 0644);
432 MODULE_PARM_DESC(zfs_vdev_cache_max
, "Inflate reads small than max");
434 module_param(zfs_vdev_cache_size
, int, 0444);
435 MODULE_PARM_DESC(zfs_vdev_cache_size
, "Total size of the per-disk cache");
437 module_param(zfs_vdev_cache_bshift
, int, 0644);
438 MODULE_PARM_DESC(zfs_vdev_cache_bshift
, "Shift size to inflate reads too");