4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #include <sys/zfs_context.h>
28 #include <sys/vdev_impl.h>
30 #include <sys/kstat.h>
33 * Virtual device read-ahead caching.
35 * This file implements a simple LRU read-ahead cache. When the DMU reads
36 * a given block, it will often want other, nearby blocks soon thereafter.
37 * We take advantage of this by reading a larger disk region and caching
38 * the result. In the best case, this can turn 128 back-to-back 512-byte
39 * reads into a single 64k read followed by 127 cache hits; this reduces
40 * latency dramatically. In the worst case, it can turn an isolated 512-byte
41 * read into a 64k read, which doesn't affect latency all that much but is
42 * terribly wasteful of bandwidth. A more intelligent version of the cache
43 * could keep track of access patterns and not do read-ahead unless it sees
44 * at least two temporally close I/Os to the same region. Currently, only
45 * metadata I/O is inflated. A futher enhancement could take advantage of
46 * more semantic information about the I/O. And it could use something
47 * faster than an AVL tree; that was chosen solely for convenience.
49 * There are five cache operations: allocate, fill, read, write, evict.
51 * (1) Allocate. This reserves a cache entry for the specified region.
52 * We separate the allocate and fill operations so that multiple threads
53 * don't generate I/O for the same cache miss.
55 * (2) Fill. When the I/O for a cache miss completes, the fill routine
56 * places the data in the previously allocated cache entry.
58 * (3) Read. Read data from the cache.
60 * (4) Write. Update cache contents after write completion.
62 * (5) Evict. When allocating a new entry, we evict the oldest (LRU) entry
63 * if the total cache size exceeds zfs_vdev_cache_size.
67 * These tunables are for performance analysis.
70 * All i/os smaller than zfs_vdev_cache_max will be turned into
71 * 1<<zfs_vdev_cache_bshift byte reads by the vdev_cache (aka software
72 * track buffer). At most zfs_vdev_cache_size bytes will be kept in each
75 int zfs_vdev_cache_max
= 1<<14; /* 16KB */
76 int zfs_vdev_cache_size
= 10ULL << 20; /* 10MB */
77 int zfs_vdev_cache_bshift
= 16;
79 #define VCBS (1 << zfs_vdev_cache_bshift) /* 64KB */
81 kstat_t
*vdc_ksp
= NULL
;
83 typedef struct vdc_stats
{
84 kstat_named_t vdc_stat_delegations
;
85 kstat_named_t vdc_stat_hits
;
86 kstat_named_t vdc_stat_misses
;
89 static vdc_stats_t vdc_stats
= {
90 { "delegations", KSTAT_DATA_UINT64
},
91 { "hits", KSTAT_DATA_UINT64
},
92 { "misses", KSTAT_DATA_UINT64
}
95 #define VDCSTAT_BUMP(stat) atomic_add_64(&vdc_stats.stat.value.ui64, 1);
98 vdev_cache_offset_compare(const void *a1
, const void *a2
)
100 const vdev_cache_entry_t
*ve1
= a1
;
101 const vdev_cache_entry_t
*ve2
= a2
;
103 if (ve1
->ve_offset
< ve2
->ve_offset
)
105 if (ve1
->ve_offset
> ve2
->ve_offset
)
111 vdev_cache_lastused_compare(const void *a1
, const void *a2
)
113 const vdev_cache_entry_t
*ve1
= a1
;
114 const vdev_cache_entry_t
*ve2
= a2
;
116 if (ve1
->ve_lastused
< ve2
->ve_lastused
)
118 if (ve1
->ve_lastused
> ve2
->ve_lastused
)
122 * Among equally old entries, sort by offset to ensure uniqueness.
124 return (vdev_cache_offset_compare(a1
, a2
));
128 * Evict the specified entry from the cache.
131 vdev_cache_evict(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
)
133 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
134 ASSERT(ve
->ve_fill_io
== NULL
);
135 ASSERT(ve
->ve_data
!= NULL
);
137 avl_remove(&vc
->vc_lastused_tree
, ve
);
138 avl_remove(&vc
->vc_offset_tree
, ve
);
139 zio_buf_free(ve
->ve_data
, VCBS
);
140 kmem_free(ve
, sizeof (vdev_cache_entry_t
));
144 * Allocate an entry in the cache. At the point we don't have the data,
145 * we're just creating a placeholder so that multiple threads don't all
146 * go off and read the same blocks.
148 static vdev_cache_entry_t
*
149 vdev_cache_allocate(zio_t
*zio
)
151 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
152 uint64_t offset
= P2ALIGN(zio
->io_offset
, VCBS
);
153 vdev_cache_entry_t
*ve
;
155 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
157 if (zfs_vdev_cache_size
== 0)
161 * If adding a new entry would exceed the cache size,
162 * evict the oldest entry (LRU).
164 if ((avl_numnodes(&vc
->vc_lastused_tree
) << zfs_vdev_cache_bshift
) >
165 zfs_vdev_cache_size
) {
166 ve
= avl_first(&vc
->vc_lastused_tree
);
167 if (ve
->ve_fill_io
!= NULL
)
169 ASSERT(ve
->ve_hits
!= 0);
170 vdev_cache_evict(vc
, ve
);
173 ve
= kmem_zalloc(sizeof (vdev_cache_entry_t
), KM_SLEEP
);
174 ve
->ve_offset
= offset
;
175 ve
->ve_lastused
= ddi_get_lbolt();
176 ve
->ve_data
= zio_buf_alloc(VCBS
);
178 avl_add(&vc
->vc_offset_tree
, ve
);
179 avl_add(&vc
->vc_lastused_tree
, ve
);
185 vdev_cache_hit(vdev_cache_t
*vc
, vdev_cache_entry_t
*ve
, zio_t
*zio
)
187 uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);
189 ASSERT(MUTEX_HELD(&vc
->vc_lock
));
190 ASSERT(ve
->ve_fill_io
== NULL
);
192 if (ve
->ve_lastused
!= ddi_get_lbolt()) {
193 avl_remove(&vc
->vc_lastused_tree
, ve
);
194 ve
->ve_lastused
= ddi_get_lbolt();
195 avl_add(&vc
->vc_lastused_tree
, ve
);
199 bcopy(ve
->ve_data
+ cache_phase
, zio
->io_data
, zio
->io_size
);
203 * Fill a previously allocated cache entry with data.
206 vdev_cache_fill(zio_t
*fio
)
208 vdev_t
*vd
= fio
->io_vd
;
209 vdev_cache_t
*vc
= &vd
->vdev_cache
;
210 vdev_cache_entry_t
*ve
= fio
->io_private
;
213 ASSERT(fio
->io_size
== VCBS
);
216 * Add data to the cache.
218 mutex_enter(&vc
->vc_lock
);
220 ASSERT(ve
->ve_fill_io
== fio
);
221 ASSERT(ve
->ve_offset
== fio
->io_offset
);
222 ASSERT(ve
->ve_data
== fio
->io_data
);
224 ve
->ve_fill_io
= NULL
;
227 * Even if this cache line was invalidated by a missed write update,
228 * any reads that were queued up before the missed update are still
229 * valid, so we can satisfy them from this line before we evict it.
231 while ((pio
= zio_walk_parents(fio
)) != NULL
)
232 vdev_cache_hit(vc
, ve
, pio
);
234 if (fio
->io_error
|| ve
->ve_missed_update
)
235 vdev_cache_evict(vc
, ve
);
237 mutex_exit(&vc
->vc_lock
);
241 * Read data from the cache. Returns 0 on cache hit, errno on a miss.
244 vdev_cache_read(zio_t
*zio
)
246 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
247 vdev_cache_entry_t
*ve
, *ve_search
;
248 uint64_t cache_offset
= P2ALIGN(zio
->io_offset
, VCBS
);
249 ASSERTV(uint64_t cache_phase
= P2PHASE(zio
->io_offset
, VCBS
);)
252 ASSERT(zio
->io_type
== ZIO_TYPE_READ
);
254 if (zio
->io_flags
& ZIO_FLAG_DONT_CACHE
)
257 if (zio
->io_size
> zfs_vdev_cache_max
)
261 * If the I/O straddles two or more cache blocks, don't cache it.
263 if (P2BOUNDARY(zio
->io_offset
, zio
->io_size
, VCBS
))
266 ASSERT(cache_phase
+ zio
->io_size
<= VCBS
);
268 mutex_enter(&vc
->vc_lock
);
270 ve_search
= kmem_alloc(sizeof(vdev_cache_entry_t
), KM_SLEEP
);
271 ve_search
->ve_offset
= cache_offset
;
272 ve
= avl_find(&vc
->vc_offset_tree
, ve_search
, NULL
);
273 kmem_free(ve_search
, sizeof(vdev_cache_entry_t
));
276 if (ve
->ve_missed_update
) {
277 mutex_exit(&vc
->vc_lock
);
281 if ((fio
= ve
->ve_fill_io
) != NULL
) {
282 zio_vdev_io_bypass(zio
);
283 zio_add_child(zio
, fio
);
284 mutex_exit(&vc
->vc_lock
);
285 VDCSTAT_BUMP(vdc_stat_delegations
);
289 vdev_cache_hit(vc
, ve
, zio
);
290 zio_vdev_io_bypass(zio
);
292 mutex_exit(&vc
->vc_lock
);
293 VDCSTAT_BUMP(vdc_stat_hits
);
297 ve
= vdev_cache_allocate(zio
);
300 mutex_exit(&vc
->vc_lock
);
304 fio
= zio_vdev_delegated_io(zio
->io_vd
, cache_offset
,
305 ve
->ve_data
, VCBS
, ZIO_TYPE_READ
, ZIO_PRIORITY_CACHE_FILL
,
306 ZIO_FLAG_DONT_CACHE
, vdev_cache_fill
, ve
);
308 ve
->ve_fill_io
= fio
;
309 zio_vdev_io_bypass(zio
);
310 zio_add_child(zio
, fio
);
312 mutex_exit(&vc
->vc_lock
);
314 VDCSTAT_BUMP(vdc_stat_misses
);
320 * Update cache contents upon write completion.
323 vdev_cache_write(zio_t
*zio
)
325 vdev_cache_t
*vc
= &zio
->io_vd
->vdev_cache
;
326 vdev_cache_entry_t
*ve
, ve_search
;
327 uint64_t io_start
= zio
->io_offset
;
328 uint64_t io_end
= io_start
+ zio
->io_size
;
329 uint64_t min_offset
= P2ALIGN(io_start
, VCBS
);
330 uint64_t max_offset
= P2ROUNDUP(io_end
, VCBS
);
333 ASSERT(zio
->io_type
== ZIO_TYPE_WRITE
);
335 mutex_enter(&vc
->vc_lock
);
337 ve_search
.ve_offset
= min_offset
;
338 ve
= avl_find(&vc
->vc_offset_tree
, &ve_search
, &where
);
341 ve
= avl_nearest(&vc
->vc_offset_tree
, where
, AVL_AFTER
);
343 while (ve
!= NULL
&& ve
->ve_offset
< max_offset
) {
344 uint64_t start
= MAX(ve
->ve_offset
, io_start
);
345 uint64_t end
= MIN(ve
->ve_offset
+ VCBS
, io_end
);
347 if (ve
->ve_fill_io
!= NULL
) {
348 ve
->ve_missed_update
= 1;
350 bcopy((char *)zio
->io_data
+ start
- io_start
,
351 ve
->ve_data
+ start
- ve
->ve_offset
, end
- start
);
353 ve
= AVL_NEXT(&vc
->vc_offset_tree
, ve
);
355 mutex_exit(&vc
->vc_lock
);
359 vdev_cache_purge(vdev_t
*vd
)
361 vdev_cache_t
*vc
= &vd
->vdev_cache
;
362 vdev_cache_entry_t
*ve
;
364 mutex_enter(&vc
->vc_lock
);
365 while ((ve
= avl_first(&vc
->vc_offset_tree
)) != NULL
)
366 vdev_cache_evict(vc
, ve
);
367 mutex_exit(&vc
->vc_lock
);
371 vdev_cache_init(vdev_t
*vd
)
373 vdev_cache_t
*vc
= &vd
->vdev_cache
;
375 mutex_init(&vc
->vc_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
377 avl_create(&vc
->vc_offset_tree
, vdev_cache_offset_compare
,
378 sizeof (vdev_cache_entry_t
),
379 offsetof(struct vdev_cache_entry
, ve_offset_node
));
381 avl_create(&vc
->vc_lastused_tree
, vdev_cache_lastused_compare
,
382 sizeof (vdev_cache_entry_t
),
383 offsetof(struct vdev_cache_entry
, ve_lastused_node
));
387 vdev_cache_fini(vdev_t
*vd
)
389 vdev_cache_t
*vc
= &vd
->vdev_cache
;
391 vdev_cache_purge(vd
);
393 avl_destroy(&vc
->vc_offset_tree
);
394 avl_destroy(&vc
->vc_lastused_tree
);
396 mutex_destroy(&vc
->vc_lock
);
400 vdev_cache_stat_init(void)
402 vdc_ksp
= kstat_create("zfs", 0, "vdev_cache_stats", "misc",
403 KSTAT_TYPE_NAMED
, sizeof (vdc_stats
) / sizeof (kstat_named_t
),
405 if (vdc_ksp
!= NULL
) {
406 vdc_ksp
->ks_data
= &vdc_stats
;
407 kstat_install(vdc_ksp
);
412 vdev_cache_stat_fini(void)
414 if (vdc_ksp
!= NULL
) {
415 kstat_delete(vdc_ksp
);
420 #if defined(_KERNEL) && defined(HAVE_SPL)
421 module_param(zfs_vdev_cache_max
, int, 0644);
422 MODULE_PARM_DESC(zfs_vdev_cache_max
, "Inflate reads small than max");
424 module_param(zfs_vdev_cache_size
, int, 0444);
425 MODULE_PARM_DESC(zfs_vdev_cache_size
, "Total size of the per-disk cache");
427 module_param(zfs_vdev_cache_bshift
, int, 0644);
428 MODULE_PARM_DESC(zfs_vdev_cache_bshift
, "Shift size to inflate reads too");