4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 * Copyright (c) 2013, 2016 by Delphix. All rights reserved.
29 #include <sys/zfs_context.h>
31 #include <sys/vdev_impl.h>
33 #include <sys/kstat.h>
37 * Virtual device read-ahead caching.
39 * This file implements a simple LRU read-ahead cache. When the DMU reads
40 * a given block, it will often want other, nearby blocks soon thereafter.
41 * We take advantage of this by reading a larger disk region and caching
42 * the result. In the best case, this can turn 128 back-to-back 512-byte
43 * reads into a single 64k read followed by 127 cache hits; this reduces
44 * latency dramatically. In the worst case, it can turn an isolated 512-byte
45 * read into a 64k read, which doesn't affect latency all that much but is
46 * terribly wasteful of bandwidth. A more intelligent version of the cache
47 * could keep track of access patterns and not do read-ahead unless it sees
48 * at least two temporally close I/Os to the same region. Currently, only
49 * metadata I/O is inflated. A futher enhancement could take advantage of
50 * more semantic information about the I/O. And it could use something
51 * faster than an AVL tree; that was chosen solely for convenience.
53 * There are five cache operations: allocate, fill, read, write, evict.
55 * (1) Allocate. This reserves a cache entry for the specified region.
56 * We separate the allocate and fill operations so that multiple threads
57 * don't generate I/O for the same cache miss.
59 * (2) Fill. When the I/O for a cache miss completes, the fill routine
60 * places the data in the previously allocated cache entry.
62 * (3) Read. Read data from the cache.
64 * (4) Write. Update cache contents after write completion.
66 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
67 * if the total cache size exceeds zfs_vdev_cache_size.
71 * These tunables are for performance analysis.
74 * All i/os smaller than zfs_vdev_cache_max will be turned into
75 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
76 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
79 * TODO: Note that with the current ZFS code, it turns out that the
80 * vdev cache is not helpful, and in some cases actually harmful. It
81 * is better if we disable this. Once some time has passed, we should
82 * actually remove this to simplify the code. For now we just disable
83 * it by setting the zfs_vdev_cache_size to zero. Note that Solaris 11
84 * has made these same changes.
86 int zfs_vdev_cache_max
= 1<<14; /* 16KB */
87 int zfs_vdev_cache_size
= 0;
88 int zfs_vdev_cache_bshift
= 16;
90 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
92 kstat_t
*vdc_ksp
= NULL
;
94 typedef struct vdc_stats
{
95 kstat_named_t vdc_stat_delegations
;
96 kstat_named_t vdc_stat_hits
;
97 kstat_named_t vdc_stat_misses
;
100 static vdc_stats_t vdc_stats
= {
101 { "delegations", KSTAT_DATA_UINT64
},
102 { "hits", KSTAT_DATA_UINT64
},
103 { "misses", KSTAT_DATA_UINT64
}
106 #define VDCSTAT_BUMP(stat) atomic_inc_64(&vdc_stats.stat.value.ui64);
109 vdev_cache_offset_compare(const void *a1
, const void *a2
)
111 const vdev_cache_entry_t
*ve1
= (const vdev_cache_entry_t
*)a1
;
112 const vdev_cache_entry_t
*ve2
= (const vdev_cache_entry_t
*)a2
;
114 return (AVL_CMP(ve1
->ve_offset
, ve2
->ve_offset
));
118 vdev_cache_lastused_compare(const void *a1
, const void *a2
)
120 const vdev_cache_entry_t
*ve1
= (const vdev_cache_entry_t
*)a1
;
121 const vdev_cache_entry_t
*ve2
= (const vdev_cache_entry_t
*)a2
;
123 int cmp
= AVL_CMP(ve1
->ve_lastused
, ve2
->ve_lastused
);
128 * Among equally old entries, sort by offset to ensure uniqueness.
130 return (vdev_cache_offset_compare(a1
, a2
));
134 * Evict the specified entry from the cache.
137 vdev_cache_evict(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
)
139 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
140 ASSERT3P(ve
->ve_fill_io
, ==, NULL
);
141 ASSERT3P(ve
->ve_abd
, !=, NULL
);
143 avl_remove(&vc
->vc_lastused_tree
, ve
);
144 avl_remove(&vc
->vc_offset_tree
, ve
);
145 abd_free(ve
->ve_abd
);
146 kmem_free(ve
, sizeof (vdev_cache_entry_t
));
150 * Allocate an entry in the cache. At the point we don't have the data,
151 * we're just creating a placeholder so that multiple threads don't all
152 * go off and read the same blocks.
154 static vdev_cache_entry_t
*
155 vdev_cache_allocate(zio_t
*zio
)
157 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
158 uint64_t offset
= P2ALIGN(zio
->io_offset
, VCBS
);
159 vdev_cache_entry_t
*ve
;
161 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
163 if (zfs_vdev_cache_size
== 0)
167 * If adding a new entry would exceed the cache size,
168 * evict the oldest entry (LRU).
170 if ((avl_numnodes(&vc
->vc_lastused_tree
) << zfs_vdev_cache_bshift
) >
171 zfs_vdev_cache_size
) {
172 ve
= avl_first(&vc
->vc_lastused_tree
);
173 if (ve
->ve_fill_io
!= NULL
)
175 ASSERT3U(ve
->ve_hits
, !=, 0);
176 vdev_cache_evict(vc
, ve
);
179 ve
= kmem_zalloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
180 ve
->ve_offset
= offset
;
181 ve
->ve_lastused
= ddi_get_lbolt();
182 ve
->ve_abd
= abd_alloc_for_io(VCBS
, B_TRUE
);
184 avl_add(&vc
->vc_offset_tree
, ve
);
185 avl_add(&vc
->vc_lastused_tree
, ve
);
191 vdev_cache_hit(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
, zio_t
*zio
)
193 uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);
195 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
196 ASSERT3P(ve
->ve_fill_io
, ==, NULL
);
198 if (ve
->ve_lastused
!= ddi_get_lbolt()) {
199 avl_remove(&vc
->vc_lastused_tree
, ve
);
200 ve
->ve_lastused
= ddi_get_lbolt();
201 avl_add(&vc
->vc_lastused_tree
, ve
);
205 abd_copy_off(zio
->io_abd
, ve
->ve_abd
, 0, cache_phase
, zio
->io_size
);
209 * Fill a previously allocated cache entry with data.
212 vdev_cache_fill(zio_t
*fio
)
214 vdev_t
*vd
= fio
->io_vd
;
215 vdev_cache_t
*vc
= &vd
->vdev_cache
;
216 vdev_cache_entry_t
*ve
= fio
->io_private
;
219 ASSERT3U(fio
->io_size
, ==, VCBS
);
222 * Add data to the cache.
224 mutex_enter(&vc
->vc_lock
);
226 ASSERT3P(ve
->ve_fill_io
, ==, fio
);
227 ASSERT3U(ve
->ve_offset
, ==, fio
->io_offset
);
228 ASSERT3P(ve
->ve_abd
, ==, fio
->io_abd
);
230 ve
->ve_fill_io
= NULL
;
233 * Even if this cache line was invalidated by a missed write update,
234 * any reads that were queued up before the missed update are still
235 * valid, so we can satisfy them from this line before we evict it.
237 zio_link_t
*zl
= NULL
;
238 while ((pio
= zio_walk_parents(fio
, &zl
)) != NULL
)
239 vdev_cache_hit(vc
, ve
, pio
);
241 if (fio
->io_error
|| ve
->ve_missed_update
)
242 vdev_cache_evict(vc
, ve
);
244 mutex_exit(&vc
->vc_lock
);
248 * Read data from the cache. Returns B_TRUE cache hit, B_FALSE on miss.
251 vdev_cache_read(zio_t
*zio
)
253 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
254 vdev_cache_entry_t
*ve
, *ve_search
;
255 uint64_t cache_offset
= P2ALIGN(zio
->io_offset
, VCBS
);
257 ASSERTV(uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
));
259 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_READ
);
261 if (zio
->io_flags
& ZIO_FLAG_DONT_CACHE
)
264 if (zio
->io_size
> zfs_vdev_cache_max
)
268 * If the I/O straddles two or more cache blocks, don't cache it.
270 if (P2BOUNDARY(zio
->io_offset
, zio
->io_size
, VCBS
))
273 ASSERT3U(cache_phase
+ zio
->io_size
, <=, VCBS
);
275 mutex_enter(&vc
->vc_lock
);
277 ve_search
= kmem_alloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
278 ve_search
->ve_offset
= cache_offset
;
279 ve
= avl_find(&vc
->vc_offset_tree
, ve_search
, NULL
);
280 kmem_free(ve_search
, sizeof (vdev_cache_entry_t
));
283 if (ve
->ve_missed_update
) {
284 mutex_exit(&vc
->vc_lock
);
288 if ((fio
= ve
->ve_fill_io
) != NULL
) {
289 zio_vdev_io_bypass(zio
);
290 zio_add_child(zio
, fio
);
291 mutex_exit(&vc
->vc_lock
);
292 VDCSTAT_BUMP(vdc_stat_delegations
);
296 vdev_cache_hit(vc
, ve
, zio
);
297 zio_vdev_io_bypass(zio
);
299 mutex_exit(&vc
->vc_lock
);
300 VDCSTAT_BUMP(vdc_stat_hits
);
304 ve
= vdev_cache_allocate(zio
);
307 mutex_exit(&vc
->vc_lock
);
311 fio
= zio_vdev_delegated_io(zio
->io_vd
, cache_offset
,
312 ve
->ve_abd
, VCBS
, ZIO_TYPE_READ
, ZIO_PRIORITY_NOW
,
313 ZIO_FLAG_DONT_CACHE
, vdev_cache_fill
, ve
);
315 ve
->ve_fill_io
= fio
;
316 zio_vdev_io_bypass(zio
);
317 zio_add_child(zio
, fio
);
319 mutex_exit(&vc
->vc_lock
);
321 VDCSTAT_BUMP(vdc_stat_misses
);
327 * Update cache contents upon write completion.
330 vdev_cache_write(zio_t
*zio
)
332 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
333 vdev_cache_entry_t
*ve
, ve_search
;
334 uint64_t io_start
= zio
->io_offset
;
335 uint64_t io_end
= io_start
+ zio
->io_size
;
336 uint64_t min_offset
= P2ALIGN(io_start
, VCBS
);
337 uint64_t max_offset
= P2ROUNDUP(io_end
, VCBS
);
340 ASSERT3U(zio
->io_type
, ==, ZIO_TYPE_WRITE
);
342 mutex_enter(&vc
->vc_lock
);
344 ve_search
.ve_offset
= min_offset
;
345 ve
= avl_find(&vc
->vc_offset_tree
, &ve_search
, &where
);
348 ve
= avl_nearest(&vc
->vc_offset_tree
, where
, AVL_AFTER
);
350 while (ve
!= NULL
&& ve
->ve_offset
< max_offset
) {
351 uint64_t start
= MAX(ve
->ve_offset
, io_start
);
352 uint64_t end
= MIN(ve
->ve_offset
+ VCBS
, io_end
);
354 if (ve
->ve_fill_io
!= NULL
) {
355 ve
->ve_missed_update
= 1;
357 abd_copy_off(ve
->ve_abd
, zio
->io_abd
,
358 start
- ve
->ve_offset
, start
- io_start
,
361 ve
= AVL_NEXT(&vc
->vc_offset_tree
, ve
);
363 mutex_exit(&vc
->vc_lock
);
367 vdev_cache_purge(vdev_t
*vd
)
369 vdev_cache_t
*vc
= &vd
->vdev_cache
;
370 vdev_cache_entry_t
*ve
;
372 mutex_enter(&vc
->vc_lock
);
373 while ((ve
= avl_first(&vc
->vc_offset_tree
)) != NULL
)
374 vdev_cache_evict(vc
, ve
);
375 mutex_exit(&vc
->vc_lock
);
379 vdev_cache_init(vdev_t
*vd
)
381 vdev_cache_t
*vc
= &vd
->vdev_cache
;
383 mutex_init(&vc
->vc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
385 avl_create(&vc
->vc_offset_tree
, vdev_cache_offset_compare
,
386 sizeof (vdev_cache_entry_t
),
387 offsetof(struct vdev_cache_entry
, ve_offset_node
));
389 avl_create(&vc
->vc_lastused_tree
, vdev_cache_lastused_compare
,
390 sizeof (vdev_cache_entry_t
),
391 offsetof(struct vdev_cache_entry
, ve_lastused_node
));
395 vdev_cache_fini(vdev_t
*vd
)
397 vdev_cache_t
*vc
= &vd
->vdev_cache
;
399 vdev_cache_purge(vd
);
401 avl_destroy(&vc
->vc_offset_tree
);
402 avl_destroy(&vc
->vc_lastused_tree
);
404 mutex_destroy(&vc
->vc_lock
);
408 vdev_cache_stat_init(void)
410 vdc_ksp
= kstat_create("zfs", 0, "vdev_cache_stats", "misc",
411 KSTAT_TYPE_NAMED
, sizeof (vdc_stats
) / sizeof (kstat_named_t
),
413 if (vdc_ksp
!= NULL
) {
414 vdc_ksp
->ks_data
= &vdc_stats
;
415 kstat_install(vdc_ksp
);
420 vdev_cache_stat_fini(void)
422 if (vdc_ksp
!= NULL
) {
423 kstat_delete(vdc_ksp
);
428 #if defined(_KERNEL) && defined(HAVE_SPL)
429 module_param(zfs_vdev_cache_max
, int, 0644);
430 MODULE_PARM_DESC(zfs_vdev_cache_max
, "Inflate reads small than max");
432 module_param(zfs_vdev_cache_size
, int, 0444);
433 MODULE_PARM_DESC(zfs_vdev_cache_size
, "Total size of the per-disk cache");
435 module_param(zfs_vdev_cache_bshift
, int, 0644);
436 MODULE_PARM_DESC(zfs_vdev_cache_bshift
, "Shift size to inflate reads too");