4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * DVA-based Adjustable Replacement Cache
28 * While much of the theory of operation used here is
29 * based on the self-tuning, low overhead replacement cache
30 * presented by Megiddo and Modha at FAST 2003, there are some
31 * significant differences:
33 * 1. The Megiddo and Modha model assumes any page is evictable.
34 * Pages in its cache cannot be "locked" into memory. This makes
35 * the eviction algorithm simple: evict the last page in the list.
36 * This also make the performance characteristics easy to reason
37 * about. Our cache is not so simple. At any given moment, some
38 * subset of the blocks in the cache are un-evictable because we
39 * have handed out a reference to them. Blocks are only evictable
40 * when there are no external references active. This makes
41 * eviction far more problematic: we choose to evict the evictable
42 * blocks that are the "lowest" in the list.
44 * There are times when it is not possible to evict the requested
45 * space. In these circumstances we are unable to adjust the cache
46 * size. To prevent the cache growing unbounded at these times we
47 * implement a "cache throttle" that slows the flow of new data
48 * into the cache until we can make space available.
50 * 2. The Megiddo and Modha model assumes a fixed cache size.
51 * Pages are evicted when the cache is full and there is a cache
52 * miss. Our model has a variable sized cache. It grows with
53 * high use, but also tries to react to memory pressure from the
54 * operating system: decreasing its size when system memory is
57 * 3. The Megiddo and Modha model assumes a fixed page size. All
58 * elements of the cache are therefor exactly the same size. So
59 * when adjusting the cache size following a cache miss, its simply
60 * a matter of choosing a single page to evict. In our model, we
61 * have variable sized cache blocks (rangeing from 512 bytes to
62 * 128K bytes). We therefor choose a set of blocks to evict to make
63 * space for a cache miss that approximates as closely as possible
64 * the space used by the new block.
66 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
67 * by N. Megiddo & D. Modha, FAST 2003
73 * A new reference to a cache buffer can be obtained in two
74 * ways: 1) via a hash table lookup using the DVA as a key,
75 * or 2) via one of the ARC lists. The arc_read() interface
76 * uses method 1, while the internal arc algorithms for
77 * adjusting the cache use method 2. We therefor provide two
78 * types of locks: 1) the hash table lock array, and 2) the
81 * Buffers do not have their own mutexs, rather they rely on the
82 * hash table mutexs for the bulk of their protection (i.e. most
83 * fields in the arc_buf_hdr_t are protected by these mutexs).
85 * buf_hash_find() returns the appropriate mutex (held) when it
86 * locates the requested buffer in the hash table. It returns
87 * NULL for the mutex if the buffer was not in the table.
89 * buf_hash_remove() expects the appropriate hash mutex to be
90 * already held before it is invoked.
92 * Each arc state also has a mutex which is used to protect the
93 * buffer list associated with the state. When attempting to
94 * obtain a hash table lock while holding an arc list lock you
95 * must use: mutex_tryenter() to avoid deadlock. Also note that
96 * the active state mutex must be held before the ghost state mutex.
98 * Arc buffers may have an associated eviction callback function.
99 * This function will be invoked prior to removing the buffer (e.g.
100 * in arc_do_user_evicts()). Note however that the data associated
101 * with the buffer may be evicted prior to the callback. The callback
102 * must be made with *no locks held* (to prevent deadlock). Additionally,
103 * the users of callbacks must ensure that their private data is
104 * protected from simultaneous callbacks from arc_buf_evict()
105 * and arc_do_user_evicts().
107 * It as also possible to register a callback which is run when the
108 * arc_meta_limit is reached and no buffers can be safely evicted. In
109 * this case the arc user should drop a reference on some arc buffers so
110 * they can be reclaimed and the arc_meta_limit honored. For example,
111 * when using the ZPL each dentry holds a references on a znode. These
112 * dentries must be pruned before the arc buffer holding the znode can
115 * Note that the majority of the performance stats are manipulated
116 * with atomic operations.
118 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
120 * - L2ARC buflist creation
121 * - L2ARC buflist eviction
122 * - L2ARC write completion, which walks L2ARC buflists
123 * - ARC header destruction, as it removes from L2ARC buflists
124 * - ARC header release, as it removes from L2ARC buflists
129 #include <sys/zfs_context.h>
131 #include <sys/vdev.h>
132 #include <sys/vdev_impl.h>
134 #include <sys/vmsystm.h>
136 #include <sys/fs/swapnode.h>
139 #include <sys/callb.h>
140 #include <sys/kstat.h>
141 #include <sys/dmu_tx.h>
142 #include <zfs_fletcher.h>
144 static kmutex_t arc_reclaim_thr_lock
;
145 static kcondvar_t arc_reclaim_thr_cv
; /* used to signal reclaim thr */
146 static uint8_t arc_thread_exit
;
148 extern int zfs_write_limit_shift
;
149 extern uint64_t zfs_write_limit_max
;
150 extern kmutex_t zfs_write_limit_lock
;
152 /* number of bytes to prune from caches when at arc_meta_limit is reached */
153 uint_t arc_meta_prune
= 1048576;
155 typedef enum arc_reclaim_strategy
{
156 ARC_RECLAIM_AGGR
, /* Aggressive reclaim strategy */
157 ARC_RECLAIM_CONS
/* Conservative reclaim strategy */
158 } arc_reclaim_strategy_t
;
160 /* number of seconds before growing cache again */
161 static int arc_grow_retry
= 5;
163 /* expiration time for arc_no_grow */
164 static clock_t arc_grow_time
= 0;
166 /* shift of arc_c for calculating both min and max arc_p */
167 static int arc_p_min_shift
= 4;
169 /* log2(fraction of arc to reclaim) */
170 static int arc_shrink_shift
= 5;
173 * minimum lifespan of a prefetch block in clock ticks
174 * (initialized in arc_init())
176 static int arc_min_prefetch_lifespan
;
181 * The arc has filled available memory and has now warmed up.
183 static boolean_t arc_warm
;
186 * These tunables are for performance analysis.
188 unsigned long zfs_arc_max
= 0;
189 unsigned long zfs_arc_min
= 0;
190 unsigned long zfs_arc_meta_limit
= 0;
191 int zfs_arc_grow_retry
= 0;
192 int zfs_arc_shrink_shift
= 0;
193 int zfs_arc_p_min_shift
= 0;
194 int zfs_arc_meta_prune
= 0;
197 * Note that buffers can be in one of 6 states:
198 * ARC_anon - anonymous (discussed below)
199 * ARC_mru - recently used, currently cached
200 * ARC_mru_ghost - recentely used, no longer in cache
201 * ARC_mfu - frequently used, currently cached
202 * ARC_mfu_ghost - frequently used, no longer in cache
203 * ARC_l2c_only - exists in L2ARC but not other states
204 * When there are no active references to the buffer, they are
205 * are linked onto a list in one of these arc states. These are
206 * the only buffers that can be evicted or deleted. Within each
207 * state there are multiple lists, one for meta-data and one for
208 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
209 * etc.) is tracked separately so that it can be managed more
210 * explicitly: favored over data, limited explicitly.
212 * Anonymous buffers are buffers that are not associated with
213 * a DVA. These are buffers that hold dirty block copies
214 * before they are written to stable storage. By definition,
215 * they are "ref'd" and are considered part of arc_mru
216 * that cannot be freed. Generally, they will aquire a DVA
217 * as they are written and migrate onto the arc_mru list.
219 * The ARC_l2c_only state is for buffers that are in the second
220 * level ARC but no longer in any of the ARC_m* lists. The second
221 * level ARC itself may also contain buffers that are in any of
222 * the ARC_m* states - meaning that a buffer can exist in two
223 * places. The reason for the ARC_l2c_only state is to keep the
224 * buffer header in the hash table, so that reads that hit the
225 * second level ARC benefit from these fast lookups.
228 typedef struct arc_state
{
229 list_t arcs_list
[ARC_BUFC_NUMTYPES
]; /* list of evictable buffers */
230 uint64_t arcs_lsize
[ARC_BUFC_NUMTYPES
]; /* amount of evictable data */
231 uint64_t arcs_size
; /* total amount of data in this state */
236 static arc_state_t ARC_anon
;
237 static arc_state_t ARC_mru
;
238 static arc_state_t ARC_mru_ghost
;
239 static arc_state_t ARC_mfu
;
240 static arc_state_t ARC_mfu_ghost
;
241 static arc_state_t ARC_l2c_only
;
243 typedef struct arc_stats
{
244 kstat_named_t arcstat_hits
;
245 kstat_named_t arcstat_misses
;
246 kstat_named_t arcstat_demand_data_hits
;
247 kstat_named_t arcstat_demand_data_misses
;
248 kstat_named_t arcstat_demand_metadata_hits
;
249 kstat_named_t arcstat_demand_metadata_misses
;
250 kstat_named_t arcstat_prefetch_data_hits
;
251 kstat_named_t arcstat_prefetch_data_misses
;
252 kstat_named_t arcstat_prefetch_metadata_hits
;
253 kstat_named_t arcstat_prefetch_metadata_misses
;
254 kstat_named_t arcstat_mru_hits
;
255 kstat_named_t arcstat_mru_ghost_hits
;
256 kstat_named_t arcstat_mfu_hits
;
257 kstat_named_t arcstat_mfu_ghost_hits
;
258 kstat_named_t arcstat_deleted
;
259 kstat_named_t arcstat_recycle_miss
;
260 kstat_named_t arcstat_mutex_miss
;
261 kstat_named_t arcstat_evict_skip
;
262 kstat_named_t arcstat_evict_l2_cached
;
263 kstat_named_t arcstat_evict_l2_eligible
;
264 kstat_named_t arcstat_evict_l2_ineligible
;
265 kstat_named_t arcstat_hash_elements
;
266 kstat_named_t arcstat_hash_elements_max
;
267 kstat_named_t arcstat_hash_collisions
;
268 kstat_named_t arcstat_hash_chains
;
269 kstat_named_t arcstat_hash_chain_max
;
270 kstat_named_t arcstat_p
;
271 kstat_named_t arcstat_c
;
272 kstat_named_t arcstat_c_min
;
273 kstat_named_t arcstat_c_max
;
274 kstat_named_t arcstat_size
;
275 kstat_named_t arcstat_hdr_size
;
276 kstat_named_t arcstat_data_size
;
277 kstat_named_t arcstat_other_size
;
278 kstat_named_t arcstat_anon_size
;
279 kstat_named_t arcstat_anon_evict_data
;
280 kstat_named_t arcstat_anon_evict_metadata
;
281 kstat_named_t arcstat_mru_size
;
282 kstat_named_t arcstat_mru_evict_data
;
283 kstat_named_t arcstat_mru_evict_metadata
;
284 kstat_named_t arcstat_mru_ghost_size
;
285 kstat_named_t arcstat_mru_ghost_evict_data
;
286 kstat_named_t arcstat_mru_ghost_evict_metadata
;
287 kstat_named_t arcstat_mfu_size
;
288 kstat_named_t arcstat_mfu_evict_data
;
289 kstat_named_t arcstat_mfu_evict_metadata
;
290 kstat_named_t arcstat_mfu_ghost_size
;
291 kstat_named_t arcstat_mfu_ghost_evict_data
;
292 kstat_named_t arcstat_mfu_ghost_evict_metadata
;
293 kstat_named_t arcstat_l2_hits
;
294 kstat_named_t arcstat_l2_misses
;
295 kstat_named_t arcstat_l2_feeds
;
296 kstat_named_t arcstat_l2_rw_clash
;
297 kstat_named_t arcstat_l2_read_bytes
;
298 kstat_named_t arcstat_l2_write_bytes
;
299 kstat_named_t arcstat_l2_writes_sent
;
300 kstat_named_t arcstat_l2_writes_done
;
301 kstat_named_t arcstat_l2_writes_error
;
302 kstat_named_t arcstat_l2_writes_hdr_miss
;
303 kstat_named_t arcstat_l2_evict_lock_retry
;
304 kstat_named_t arcstat_l2_evict_reading
;
305 kstat_named_t arcstat_l2_free_on_write
;
306 kstat_named_t arcstat_l2_abort_lowmem
;
307 kstat_named_t arcstat_l2_cksum_bad
;
308 kstat_named_t arcstat_l2_io_error
;
309 kstat_named_t arcstat_l2_size
;
310 kstat_named_t arcstat_l2_hdr_size
;
311 kstat_named_t arcstat_memory_throttle_count
;
312 kstat_named_t arcstat_memory_direct_count
;
313 kstat_named_t arcstat_memory_indirect_count
;
314 kstat_named_t arcstat_no_grow
;
315 kstat_named_t arcstat_tempreserve
;
316 kstat_named_t arcstat_loaned_bytes
;
317 kstat_named_t arcstat_prune
;
318 kstat_named_t arcstat_meta_used
;
319 kstat_named_t arcstat_meta_limit
;
320 kstat_named_t arcstat_meta_max
;
323 static arc_stats_t arc_stats
= {
324 { "hits", KSTAT_DATA_UINT64
},
325 { "misses", KSTAT_DATA_UINT64
},
326 { "demand_data_hits", KSTAT_DATA_UINT64
},
327 { "demand_data_misses", KSTAT_DATA_UINT64
},
328 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
329 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
330 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
331 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
332 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
333 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
334 { "mru_hits", KSTAT_DATA_UINT64
},
335 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
336 { "mfu_hits", KSTAT_DATA_UINT64
},
337 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
338 { "deleted", KSTAT_DATA_UINT64
},
339 { "recycle_miss", KSTAT_DATA_UINT64
},
340 { "mutex_miss", KSTAT_DATA_UINT64
},
341 { "evict_skip", KSTAT_DATA_UINT64
},
342 { "evict_l2_cached", KSTAT_DATA_UINT64
},
343 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
344 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
345 { "hash_elements", KSTAT_DATA_UINT64
},
346 { "hash_elements_max", KSTAT_DATA_UINT64
},
347 { "hash_collisions", KSTAT_DATA_UINT64
},
348 { "hash_chains", KSTAT_DATA_UINT64
},
349 { "hash_chain_max", KSTAT_DATA_UINT64
},
350 { "p", KSTAT_DATA_UINT64
},
351 { "c", KSTAT_DATA_UINT64
},
352 { "c_min", KSTAT_DATA_UINT64
},
353 { "c_max", KSTAT_DATA_UINT64
},
354 { "size", KSTAT_DATA_UINT64
},
355 { "hdr_size", KSTAT_DATA_UINT64
},
356 { "data_size", KSTAT_DATA_UINT64
},
357 { "other_size", KSTAT_DATA_UINT64
},
358 { "anon_size", KSTAT_DATA_UINT64
},
359 { "anon_evict_data", KSTAT_DATA_UINT64
},
360 { "anon_evict_metadata", KSTAT_DATA_UINT64
},
361 { "mru_size", KSTAT_DATA_UINT64
},
362 { "mru_evict_data", KSTAT_DATA_UINT64
},
363 { "mru_evict_metadata", KSTAT_DATA_UINT64
},
364 { "mru_ghost_size", KSTAT_DATA_UINT64
},
365 { "mru_ghost_evict_data", KSTAT_DATA_UINT64
},
366 { "mru_ghost_evict_metadata", KSTAT_DATA_UINT64
},
367 { "mfu_size", KSTAT_DATA_UINT64
},
368 { "mfu_evict_data", KSTAT_DATA_UINT64
},
369 { "mfu_evict_metadata", KSTAT_DATA_UINT64
},
370 { "mfu_ghost_size", KSTAT_DATA_UINT64
},
371 { "mfu_ghost_evict_data", KSTAT_DATA_UINT64
},
372 { "mfu_ghost_evict_metadata", KSTAT_DATA_UINT64
},
373 { "l2_hits", KSTAT_DATA_UINT64
},
374 { "l2_misses", KSTAT_DATA_UINT64
},
375 { "l2_feeds", KSTAT_DATA_UINT64
},
376 { "l2_rw_clash", KSTAT_DATA_UINT64
},
377 { "l2_read_bytes", KSTAT_DATA_UINT64
},
378 { "l2_write_bytes", KSTAT_DATA_UINT64
},
379 { "l2_writes_sent", KSTAT_DATA_UINT64
},
380 { "l2_writes_done", KSTAT_DATA_UINT64
},
381 { "l2_writes_error", KSTAT_DATA_UINT64
},
382 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64
},
383 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
384 { "l2_evict_reading", KSTAT_DATA_UINT64
},
385 { "l2_free_on_write", KSTAT_DATA_UINT64
},
386 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
387 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
388 { "l2_io_error", KSTAT_DATA_UINT64
},
389 { "l2_size", KSTAT_DATA_UINT64
},
390 { "l2_hdr_size", KSTAT_DATA_UINT64
},
391 { "memory_throttle_count", KSTAT_DATA_UINT64
},
392 { "memory_direct_count", KSTAT_DATA_UINT64
},
393 { "memory_indirect_count", KSTAT_DATA_UINT64
},
394 { "arc_no_grow", KSTAT_DATA_UINT64
},
395 { "arc_tempreserve", KSTAT_DATA_UINT64
},
396 { "arc_loaned_bytes", KSTAT_DATA_UINT64
},
397 { "arc_prune", KSTAT_DATA_UINT64
},
398 { "arc_meta_used", KSTAT_DATA_UINT64
},
399 { "arc_meta_limit", KSTAT_DATA_UINT64
},
400 { "arc_meta_max", KSTAT_DATA_UINT64
},
403 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
405 #define ARCSTAT_INCR(stat, val) \
406 atomic_add_64(&arc_stats.stat.value.ui64, (val));
408 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
409 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
411 #define ARCSTAT_MAX(stat, val) { \
413 while ((val) > (m = arc_stats.stat.value.ui64) && \
414 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
418 #define ARCSTAT_MAXSTAT(stat) \
419 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
422 * We define a macro to allow ARC hits/misses to be easily broken down by
423 * two separate conditions, giving a total of four different subtypes for
424 * each of hits and misses (so eight statistics total).
426 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
429 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
431 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
435 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
437 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
442 static arc_state_t
*arc_anon
;
443 static arc_state_t
*arc_mru
;
444 static arc_state_t
*arc_mru_ghost
;
445 static arc_state_t
*arc_mfu
;
446 static arc_state_t
*arc_mfu_ghost
;
447 static arc_state_t
*arc_l2c_only
;
450 * There are several ARC variables that are critical to export as kstats --
451 * but we don't want to have to grovel around in the kstat whenever we wish to
452 * manipulate them. For these variables, we therefore define them to be in
453 * terms of the statistic variable. This assures that we are not introducing
454 * the possibility of inconsistency by having shadow copies of the variables,
455 * while still allowing the code to be readable.
457 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
458 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
459 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
460 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
461 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
462 #define arc_no_grow ARCSTAT(arcstat_no_grow)
463 #define arc_tempreserve ARCSTAT(arcstat_tempreserve)
464 #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
465 #define arc_meta_used ARCSTAT(arcstat_meta_used)
466 #define arc_meta_limit ARCSTAT(arcstat_meta_limit)
467 #define arc_meta_max ARCSTAT(arcstat_meta_max)
469 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t
;
471 typedef struct arc_callback arc_callback_t
;
473 struct arc_callback
{
475 arc_done_func_t
*acb_done
;
477 zio_t
*acb_zio_dummy
;
478 arc_callback_t
*acb_next
;
481 typedef struct arc_write_callback arc_write_callback_t
;
483 struct arc_write_callback
{
485 arc_done_func_t
*awcb_ready
;
486 arc_done_func_t
*awcb_done
;
491 /* protected by hash lock */
496 kmutex_t b_freeze_lock
;
497 zio_cksum_t
*b_freeze_cksum
;
500 arc_buf_hdr_t
*b_hash_next
;
505 arc_callback_t
*b_acb
;
509 arc_buf_contents_t b_type
;
513 /* protected by arc state mutex */
514 arc_state_t
*b_state
;
515 list_node_t b_arc_node
;
517 /* updated atomically */
518 clock_t b_arc_access
;
520 /* self protecting */
523 l2arc_buf_hdr_t
*b_l2hdr
;
524 list_node_t b_l2node
;
527 static list_t arc_prune_list
;
528 static kmutex_t arc_prune_mtx
;
529 static arc_buf_t
*arc_eviction_list
;
530 static kmutex_t arc_eviction_mtx
;
531 static arc_buf_hdr_t arc_eviction_hdr
;
532 static void arc_get_data_buf(arc_buf_t
*buf
);
533 static void arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
);
534 static int arc_evict_needed(arc_buf_contents_t type
);
535 static void arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
);
537 static boolean_t
l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
);
539 #define GHOST_STATE(state) \
540 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
541 (state) == arc_l2c_only)
544 * Private ARC flags. These flags are private ARC only flags that will show up
545 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
546 * be passed in as arc_flags in things like arc_read. However, these flags
547 * should never be passed and should only be set by ARC code. When adding new
548 * public flags, make sure not to smash the private ones.
551 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
552 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
553 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
554 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
555 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
556 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
557 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
558 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
559 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
560 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
562 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
563 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
564 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
565 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
566 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
567 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
568 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
569 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
570 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
571 (hdr)->b_l2hdr != NULL)
572 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
573 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
574 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
580 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
581 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
584 * Hash table routines
587 #define HT_LOCK_ALIGN 64
588 #define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN)))
593 unsigned char pad
[HT_LOCK_PAD
];
597 #define BUF_LOCKS 256
598 typedef struct buf_hash_table
{
600 arc_buf_hdr_t
**ht_table
;
601 struct ht_lock ht_locks
[BUF_LOCKS
];
604 static buf_hash_table_t buf_hash_table
;
606 #define BUF_HASH_INDEX(spa, dva, birth) \
607 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
608 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
609 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
610 #define HDR_LOCK(hdr) \
611 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
613 uint64_t zfs_crc64_table
[256];
619 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
620 #define L2ARC_HEADROOM 2 /* num of writes */
621 #define L2ARC_FEED_SECS 1 /* caching interval secs */
622 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
624 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
625 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
628 * L2ARC Performance Tunables
630 unsigned long l2arc_write_max
= L2ARC_WRITE_SIZE
; /* def max write size */
631 unsigned long l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra warmup write */
632 unsigned long l2arc_headroom
= L2ARC_HEADROOM
; /* # of dev writes */
633 unsigned long l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
634 unsigned long l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval msecs */
635 int l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
636 int l2arc_feed_again
= B_TRUE
; /* turbo warmup */
637 int l2arc_norw
= B_TRUE
; /* no reads during writes */
642 typedef struct l2arc_dev
{
643 vdev_t
*l2ad_vdev
; /* vdev */
644 spa_t
*l2ad_spa
; /* spa */
645 uint64_t l2ad_hand
; /* next write location */
646 uint64_t l2ad_write
; /* desired write size, bytes */
647 uint64_t l2ad_boost
; /* warmup write boost, bytes */
648 uint64_t l2ad_start
; /* first addr on device */
649 uint64_t l2ad_end
; /* last addr on device */
650 uint64_t l2ad_evict
; /* last addr eviction reached */
651 boolean_t l2ad_first
; /* first sweep through */
652 boolean_t l2ad_writing
; /* currently writing */
653 list_t
*l2ad_buflist
; /* buffer list */
654 list_node_t l2ad_node
; /* device list node */
657 static list_t L2ARC_dev_list
; /* device list */
658 static list_t
*l2arc_dev_list
; /* device list pointer */
659 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
660 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
661 static kmutex_t l2arc_buflist_mtx
; /* mutex for all buflists */
662 static list_t L2ARC_free_on_write
; /* free after write buf list */
663 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
664 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
665 static uint64_t l2arc_ndev
; /* number of devices */
667 typedef struct l2arc_read_callback
{
668 arc_buf_t
*l2rcb_buf
; /* read buffer */
669 spa_t
*l2rcb_spa
; /* spa */
670 blkptr_t l2rcb_bp
; /* original blkptr */
671 zbookmark_t l2rcb_zb
; /* original bookmark */
672 int l2rcb_flags
; /* original flags */
673 } l2arc_read_callback_t
;
675 typedef struct l2arc_write_callback
{
676 l2arc_dev_t
*l2wcb_dev
; /* device info */
677 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
678 } l2arc_write_callback_t
;
680 struct l2arc_buf_hdr
{
681 /* protected by arc_buf_hdr mutex */
682 l2arc_dev_t
*b_dev
; /* L2ARC device */
683 uint64_t b_daddr
; /* disk address, offset byte */
686 typedef struct l2arc_data_free
{
687 /* protected by l2arc_free_on_write_mtx */
690 void (*l2df_func
)(void *, size_t);
691 list_node_t l2df_list_node
;
694 static kmutex_t l2arc_feed_thr_lock
;
695 static kcondvar_t l2arc_feed_thr_cv
;
696 static uint8_t l2arc_thread_exit
;
698 static void l2arc_read_done(zio_t
*zio
);
699 static void l2arc_hdr_stat_add(void);
700 static void l2arc_hdr_stat_remove(void);
703 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
705 uint8_t *vdva
= (uint8_t *)dva
;
706 uint64_t crc
= -1ULL;
709 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
711 for (i
= 0; i
< sizeof (dva_t
); i
++)
712 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
714 crc
^= (spa
>>8) ^ birth
;
719 #define BUF_EMPTY(buf) \
720 ((buf)->b_dva.dva_word[0] == 0 && \
721 (buf)->b_dva.dva_word[1] == 0 && \
724 #define BUF_EQUAL(spa, dva, birth, buf) \
725 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
726 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
727 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
730 buf_discard_identity(arc_buf_hdr_t
*hdr
)
732 hdr
->b_dva
.dva_word
[0] = 0;
733 hdr
->b_dva
.dva_word
[1] = 0;
738 static arc_buf_hdr_t
*
739 buf_hash_find(uint64_t spa
, const dva_t
*dva
, uint64_t birth
, kmutex_t
**lockp
)
741 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
742 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
745 mutex_enter(hash_lock
);
746 for (buf
= buf_hash_table
.ht_table
[idx
]; buf
!= NULL
;
747 buf
= buf
->b_hash_next
) {
748 if (BUF_EQUAL(spa
, dva
, birth
, buf
)) {
753 mutex_exit(hash_lock
);
759 * Insert an entry into the hash table. If there is already an element
760 * equal to elem in the hash table, then the already existing element
761 * will be returned and the new element will not be inserted.
762 * Otherwise returns NULL.
764 static arc_buf_hdr_t
*
765 buf_hash_insert(arc_buf_hdr_t
*buf
, kmutex_t
**lockp
)
767 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
768 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
772 ASSERT(!HDR_IN_HASH_TABLE(buf
));
774 mutex_enter(hash_lock
);
775 for (fbuf
= buf_hash_table
.ht_table
[idx
], i
= 0; fbuf
!= NULL
;
776 fbuf
= fbuf
->b_hash_next
, i
++) {
777 if (BUF_EQUAL(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
, fbuf
))
781 buf
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
782 buf_hash_table
.ht_table
[idx
] = buf
;
783 buf
->b_flags
|= ARC_IN_HASH_TABLE
;
785 /* collect some hash table performance data */
787 ARCSTAT_BUMP(arcstat_hash_collisions
);
789 ARCSTAT_BUMP(arcstat_hash_chains
);
791 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
794 ARCSTAT_BUMP(arcstat_hash_elements
);
795 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
801 buf_hash_remove(arc_buf_hdr_t
*buf
)
803 arc_buf_hdr_t
*fbuf
, **bufp
;
804 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
806 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
807 ASSERT(HDR_IN_HASH_TABLE(buf
));
809 bufp
= &buf_hash_table
.ht_table
[idx
];
810 while ((fbuf
= *bufp
) != buf
) {
811 ASSERT(fbuf
!= NULL
);
812 bufp
= &fbuf
->b_hash_next
;
814 *bufp
= buf
->b_hash_next
;
815 buf
->b_hash_next
= NULL
;
816 buf
->b_flags
&= ~ARC_IN_HASH_TABLE
;
818 /* collect some hash table performance data */
819 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
821 if (buf_hash_table
.ht_table
[idx
] &&
822 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
823 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
827 * Global data structures and functions for the buf kmem cache.
829 static kmem_cache_t
*hdr_cache
;
830 static kmem_cache_t
*buf_cache
;
837 #if defined(_KERNEL) && defined(HAVE_SPL)
838 /* Large allocations which do not require contiguous pages
839 * should be using vmem_free() in the linux kernel */
840 vmem_free(buf_hash_table
.ht_table
,
841 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
843 kmem_free(buf_hash_table
.ht_table
,
844 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
846 for (i
= 0; i
< BUF_LOCKS
; i
++)
847 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
848 kmem_cache_destroy(hdr_cache
);
849 kmem_cache_destroy(buf_cache
);
853 * Constructor callback - called when the cache is empty
854 * and a new buf is requested.
858 hdr_cons(void *vbuf
, void *unused
, int kmflag
)
860 arc_buf_hdr_t
*buf
= vbuf
;
862 bzero(buf
, sizeof (arc_buf_hdr_t
));
863 refcount_create(&buf
->b_refcnt
);
864 cv_init(&buf
->b_cv
, NULL
, CV_DEFAULT
, NULL
);
865 mutex_init(&buf
->b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
866 list_link_init(&buf
->b_arc_node
);
867 list_link_init(&buf
->b_l2node
);
868 arc_space_consume(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
875 buf_cons(void *vbuf
, void *unused
, int kmflag
)
877 arc_buf_t
*buf
= vbuf
;
879 bzero(buf
, sizeof (arc_buf_t
));
880 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
881 rw_init(&buf
->b_data_lock
, NULL
, RW_DEFAULT
, NULL
);
882 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
888 * Destructor callback - called when a cached buf is
889 * no longer required.
893 hdr_dest(void *vbuf
, void *unused
)
895 arc_buf_hdr_t
*buf
= vbuf
;
897 ASSERT(BUF_EMPTY(buf
));
898 refcount_destroy(&buf
->b_refcnt
);
899 cv_destroy(&buf
->b_cv
);
900 mutex_destroy(&buf
->b_freeze_lock
);
901 arc_space_return(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
906 buf_dest(void *vbuf
, void *unused
)
908 arc_buf_t
*buf
= vbuf
;
910 mutex_destroy(&buf
->b_evict_lock
);
911 rw_destroy(&buf
->b_data_lock
);
912 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
919 uint64_t hsize
= 1ULL << 12;
923 * The hash table is big enough to fill all of physical memory
924 * with an average 64K block size. The table will take up
925 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
927 while (hsize
* 65536 < physmem
* PAGESIZE
)
930 buf_hash_table
.ht_mask
= hsize
- 1;
931 #if defined(_KERNEL) && defined(HAVE_SPL)
932 /* Large allocations which do not require contiguous pages
933 * should be using vmem_alloc() in the linux kernel */
934 buf_hash_table
.ht_table
=
935 vmem_zalloc(hsize
* sizeof (void*), KM_SLEEP
);
937 buf_hash_table
.ht_table
=
938 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
940 if (buf_hash_table
.ht_table
== NULL
) {
941 ASSERT(hsize
> (1ULL << 8));
946 hdr_cache
= kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t
),
947 0, hdr_cons
, hdr_dest
, NULL
, NULL
, NULL
, 0);
948 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
949 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
951 for (i
= 0; i
< 256; i
++)
952 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
953 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
955 for (i
= 0; i
< BUF_LOCKS
; i
++) {
956 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
957 NULL
, MUTEX_DEFAULT
, NULL
);
961 #define ARC_MINTIME (hz>>4) /* 62 ms */
964 arc_cksum_verify(arc_buf_t
*buf
)
968 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
971 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
972 if (buf
->b_hdr
->b_freeze_cksum
== NULL
||
973 (buf
->b_hdr
->b_flags
& ARC_IO_ERROR
)) {
974 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
977 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
978 if (!ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
))
979 panic("buffer modified while frozen!");
980 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
984 arc_cksum_equal(arc_buf_t
*buf
)
989 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
990 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
991 equal
= ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
);
992 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
998 arc_cksum_compute(arc_buf_t
*buf
, boolean_t force
)
1000 if (!force
&& !(zfs_flags
& ZFS_DEBUG_MODIFY
))
1003 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
1004 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1005 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1008 buf
->b_hdr
->b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
),
1010 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
,
1011 buf
->b_hdr
->b_freeze_cksum
);
1012 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1016 arc_buf_thaw(arc_buf_t
*buf
)
1018 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1019 if (buf
->b_hdr
->b_state
!= arc_anon
)
1020 panic("modifying non-anon buffer!");
1021 if (buf
->b_hdr
->b_flags
& ARC_IO_IN_PROGRESS
)
1022 panic("modifying buffer while i/o in progress!");
1023 arc_cksum_verify(buf
);
1026 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
1027 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
1028 kmem_free(buf
->b_hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1029 buf
->b_hdr
->b_freeze_cksum
= NULL
;
1032 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1033 if (buf
->b_hdr
->b_thawed
)
1034 kmem_free(buf
->b_hdr
->b_thawed
, 1);
1035 buf
->b_hdr
->b_thawed
= kmem_alloc(1, KM_SLEEP
);
1038 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1042 arc_buf_freeze(arc_buf_t
*buf
)
1044 kmutex_t
*hash_lock
;
1046 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1049 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1050 mutex_enter(hash_lock
);
1052 ASSERT(buf
->b_hdr
->b_freeze_cksum
!= NULL
||
1053 buf
->b_hdr
->b_state
== arc_anon
);
1054 arc_cksum_compute(buf
, B_FALSE
);
1055 mutex_exit(hash_lock
);
1059 add_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1061 ASSERT(MUTEX_HELD(hash_lock
));
1063 if ((refcount_add(&ab
->b_refcnt
, tag
) == 1) &&
1064 (ab
->b_state
!= arc_anon
)) {
1065 uint64_t delta
= ab
->b_size
* ab
->b_datacnt
;
1066 list_t
*list
= &ab
->b_state
->arcs_list
[ab
->b_type
];
1067 uint64_t *size
= &ab
->b_state
->arcs_lsize
[ab
->b_type
];
1069 ASSERT(!MUTEX_HELD(&ab
->b_state
->arcs_mtx
));
1070 mutex_enter(&ab
->b_state
->arcs_mtx
);
1071 ASSERT(list_link_active(&ab
->b_arc_node
));
1072 list_remove(list
, ab
);
1073 if (GHOST_STATE(ab
->b_state
)) {
1074 ASSERT3U(ab
->b_datacnt
, ==, 0);
1075 ASSERT3P(ab
->b_buf
, ==, NULL
);
1079 ASSERT3U(*size
, >=, delta
);
1080 atomic_add_64(size
, -delta
);
1081 mutex_exit(&ab
->b_state
->arcs_mtx
);
1082 /* remove the prefetch flag if we get a reference */
1083 if (ab
->b_flags
& ARC_PREFETCH
)
1084 ab
->b_flags
&= ~ARC_PREFETCH
;
1089 remove_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1092 arc_state_t
*state
= ab
->b_state
;
1094 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
1095 ASSERT(!GHOST_STATE(state
));
1097 if (((cnt
= refcount_remove(&ab
->b_refcnt
, tag
)) == 0) &&
1098 (state
!= arc_anon
)) {
1099 uint64_t *size
= &state
->arcs_lsize
[ab
->b_type
];
1101 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1102 mutex_enter(&state
->arcs_mtx
);
1103 ASSERT(!list_link_active(&ab
->b_arc_node
));
1104 list_insert_head(&state
->arcs_list
[ab
->b_type
], ab
);
1105 ASSERT(ab
->b_datacnt
> 0);
1106 atomic_add_64(size
, ab
->b_size
* ab
->b_datacnt
);
1107 mutex_exit(&state
->arcs_mtx
);
1113 * Move the supplied buffer to the indicated state. The mutex
1114 * for the buffer must be held by the caller.
1117 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
)
1119 arc_state_t
*old_state
= ab
->b_state
;
1120 int64_t refcnt
= refcount_count(&ab
->b_refcnt
);
1121 uint64_t from_delta
, to_delta
;
1123 ASSERT(MUTEX_HELD(hash_lock
));
1124 ASSERT(new_state
!= old_state
);
1125 ASSERT(refcnt
== 0 || ab
->b_datacnt
> 0);
1126 ASSERT(ab
->b_datacnt
== 0 || !GHOST_STATE(new_state
));
1127 ASSERT(ab
->b_datacnt
<= 1 || old_state
!= arc_anon
);
1129 from_delta
= to_delta
= ab
->b_datacnt
* ab
->b_size
;
1132 * If this buffer is evictable, transfer it from the
1133 * old state list to the new state list.
1136 if (old_state
!= arc_anon
) {
1137 int use_mutex
= !MUTEX_HELD(&old_state
->arcs_mtx
);
1138 uint64_t *size
= &old_state
->arcs_lsize
[ab
->b_type
];
1141 mutex_enter(&old_state
->arcs_mtx
);
1143 ASSERT(list_link_active(&ab
->b_arc_node
));
1144 list_remove(&old_state
->arcs_list
[ab
->b_type
], ab
);
1147 * If prefetching out of the ghost cache,
1148 * we will have a non-zero datacnt.
1150 if (GHOST_STATE(old_state
) && ab
->b_datacnt
== 0) {
1151 /* ghost elements have a ghost size */
1152 ASSERT(ab
->b_buf
== NULL
);
1153 from_delta
= ab
->b_size
;
1155 ASSERT3U(*size
, >=, from_delta
);
1156 atomic_add_64(size
, -from_delta
);
1159 mutex_exit(&old_state
->arcs_mtx
);
1161 if (new_state
!= arc_anon
) {
1162 int use_mutex
= !MUTEX_HELD(&new_state
->arcs_mtx
);
1163 uint64_t *size
= &new_state
->arcs_lsize
[ab
->b_type
];
1166 mutex_enter(&new_state
->arcs_mtx
);
1168 list_insert_head(&new_state
->arcs_list
[ab
->b_type
], ab
);
1170 /* ghost elements have a ghost size */
1171 if (GHOST_STATE(new_state
)) {
1172 ASSERT(ab
->b_datacnt
== 0);
1173 ASSERT(ab
->b_buf
== NULL
);
1174 to_delta
= ab
->b_size
;
1176 atomic_add_64(size
, to_delta
);
1179 mutex_exit(&new_state
->arcs_mtx
);
1183 ASSERT(!BUF_EMPTY(ab
));
1184 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(ab
))
1185 buf_hash_remove(ab
);
1187 /* adjust state sizes */
1189 atomic_add_64(&new_state
->arcs_size
, to_delta
);
1191 ASSERT3U(old_state
->arcs_size
, >=, from_delta
);
1192 atomic_add_64(&old_state
->arcs_size
, -from_delta
);
1194 ab
->b_state
= new_state
;
1196 /* adjust l2arc hdr stats */
1197 if (new_state
== arc_l2c_only
)
1198 l2arc_hdr_stat_add();
1199 else if (old_state
== arc_l2c_only
)
1200 l2arc_hdr_stat_remove();
1204 arc_space_consume(uint64_t space
, arc_space_type_t type
)
1206 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1211 case ARC_SPACE_DATA
:
1212 ARCSTAT_INCR(arcstat_data_size
, space
);
1214 case ARC_SPACE_OTHER
:
1215 ARCSTAT_INCR(arcstat_other_size
, space
);
1217 case ARC_SPACE_HDRS
:
1218 ARCSTAT_INCR(arcstat_hdr_size
, space
);
1220 case ARC_SPACE_L2HDRS
:
1221 ARCSTAT_INCR(arcstat_l2_hdr_size
, space
);
1225 atomic_add_64(&arc_meta_used
, space
);
1226 atomic_add_64(&arc_size
, space
);
1230 arc_space_return(uint64_t space
, arc_space_type_t type
)
1232 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1237 case ARC_SPACE_DATA
:
1238 ARCSTAT_INCR(arcstat_data_size
, -space
);
1240 case ARC_SPACE_OTHER
:
1241 ARCSTAT_INCR(arcstat_other_size
, -space
);
1243 case ARC_SPACE_HDRS
:
1244 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
1246 case ARC_SPACE_L2HDRS
:
1247 ARCSTAT_INCR(arcstat_l2_hdr_size
, -space
);
1251 ASSERT(arc_meta_used
>= space
);
1252 if (arc_meta_max
< arc_meta_used
)
1253 arc_meta_max
= arc_meta_used
;
1254 atomic_add_64(&arc_meta_used
, -space
);
1255 ASSERT(arc_size
>= space
);
1256 atomic_add_64(&arc_size
, -space
);
1260 arc_data_buf_alloc(uint64_t size
)
1262 if (arc_evict_needed(ARC_BUFC_DATA
))
1263 cv_signal(&arc_reclaim_thr_cv
);
1264 atomic_add_64(&arc_size
, size
);
1265 return (zio_data_buf_alloc(size
));
1269 arc_data_buf_free(void *buf
, uint64_t size
)
1271 zio_data_buf_free(buf
, size
);
1272 ASSERT(arc_size
>= size
);
1273 atomic_add_64(&arc_size
, -size
);
1277 arc_buf_alloc(spa_t
*spa
, int size
, void *tag
, arc_buf_contents_t type
)
1282 ASSERT3U(size
, >, 0);
1283 hdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
1284 ASSERT(BUF_EMPTY(hdr
));
1287 hdr
->b_spa
= spa_guid(spa
);
1288 hdr
->b_state
= arc_anon
;
1289 hdr
->b_arc_access
= 0;
1290 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1293 buf
->b_efunc
= NULL
;
1294 buf
->b_private
= NULL
;
1297 arc_get_data_buf(buf
);
1300 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1301 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1306 static char *arc_onloan_tag
= "onloan";
1309 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1310 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1311 * buffers must be returned to the arc before they can be used by the DMU or
1315 arc_loan_buf(spa_t
*spa
, int size
)
1319 buf
= arc_buf_alloc(spa
, size
, arc_onloan_tag
, ARC_BUFC_DATA
);
1321 atomic_add_64(&arc_loaned_bytes
, size
);
1326 * Return a loaned arc buffer to the arc.
1329 arc_return_buf(arc_buf_t
*buf
, void *tag
)
1331 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1333 ASSERT(buf
->b_data
!= NULL
);
1334 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1335 (void) refcount_remove(&hdr
->b_refcnt
, arc_onloan_tag
);
1337 atomic_add_64(&arc_loaned_bytes
, -hdr
->b_size
);
1340 /* Detach an arc_buf from a dbuf (tag) */
1342 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
1346 ASSERT(buf
->b_data
!= NULL
);
1348 (void) refcount_add(&hdr
->b_refcnt
, arc_onloan_tag
);
1349 (void) refcount_remove(&hdr
->b_refcnt
, tag
);
1350 buf
->b_efunc
= NULL
;
1351 buf
->b_private
= NULL
;
1353 atomic_add_64(&arc_loaned_bytes
, hdr
->b_size
);
1357 arc_buf_clone(arc_buf_t
*from
)
1360 arc_buf_hdr_t
*hdr
= from
->b_hdr
;
1361 uint64_t size
= hdr
->b_size
;
1363 ASSERT(hdr
->b_state
!= arc_anon
);
1365 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1368 buf
->b_efunc
= NULL
;
1369 buf
->b_private
= NULL
;
1370 buf
->b_next
= hdr
->b_buf
;
1372 arc_get_data_buf(buf
);
1373 bcopy(from
->b_data
, buf
->b_data
, size
);
1374 hdr
->b_datacnt
+= 1;
1379 arc_buf_add_ref(arc_buf_t
*buf
, void* tag
)
1382 kmutex_t
*hash_lock
;
1385 * Check to see if this buffer is evicted. Callers
1386 * must verify b_data != NULL to know if the add_ref
1389 mutex_enter(&buf
->b_evict_lock
);
1390 if (buf
->b_data
== NULL
) {
1391 mutex_exit(&buf
->b_evict_lock
);
1394 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1395 mutex_enter(hash_lock
);
1397 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1398 mutex_exit(&buf
->b_evict_lock
);
1400 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
1401 add_reference(hdr
, hash_lock
, tag
);
1402 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
1403 arc_access(hdr
, hash_lock
);
1404 mutex_exit(hash_lock
);
1405 ARCSTAT_BUMP(arcstat_hits
);
1406 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
1407 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
1408 data
, metadata
, hits
);
1412 * Free the arc data buffer. If it is an l2arc write in progress,
1413 * the buffer is placed on l2arc_free_on_write to be freed later.
1416 arc_buf_data_free(arc_buf_hdr_t
*hdr
, void (*free_func
)(void *, size_t),
1417 void *data
, size_t size
)
1419 if (HDR_L2_WRITING(hdr
)) {
1420 l2arc_data_free_t
*df
;
1421 df
= kmem_alloc(sizeof (l2arc_data_free_t
), KM_SLEEP
);
1422 df
->l2df_data
= data
;
1423 df
->l2df_size
= size
;
1424 df
->l2df_func
= free_func
;
1425 mutex_enter(&l2arc_free_on_write_mtx
);
1426 list_insert_head(l2arc_free_on_write
, df
);
1427 mutex_exit(&l2arc_free_on_write_mtx
);
1428 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
1430 free_func(data
, size
);
1435 arc_buf_destroy(arc_buf_t
*buf
, boolean_t recycle
, boolean_t all
)
1439 /* free up data associated with the buf */
1441 arc_state_t
*state
= buf
->b_hdr
->b_state
;
1442 uint64_t size
= buf
->b_hdr
->b_size
;
1443 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
1445 arc_cksum_verify(buf
);
1448 if (type
== ARC_BUFC_METADATA
) {
1449 arc_buf_data_free(buf
->b_hdr
, zio_buf_free
,
1451 arc_space_return(size
, ARC_SPACE_DATA
);
1453 ASSERT(type
== ARC_BUFC_DATA
);
1454 arc_buf_data_free(buf
->b_hdr
,
1455 zio_data_buf_free
, buf
->b_data
, size
);
1456 ARCSTAT_INCR(arcstat_data_size
, -size
);
1457 atomic_add_64(&arc_size
, -size
);
1460 if (list_link_active(&buf
->b_hdr
->b_arc_node
)) {
1461 uint64_t *cnt
= &state
->arcs_lsize
[type
];
1463 ASSERT(refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
1464 ASSERT(state
!= arc_anon
);
1466 ASSERT3U(*cnt
, >=, size
);
1467 atomic_add_64(cnt
, -size
);
1469 ASSERT3U(state
->arcs_size
, >=, size
);
1470 atomic_add_64(&state
->arcs_size
, -size
);
1472 ASSERT(buf
->b_hdr
->b_datacnt
> 0);
1473 buf
->b_hdr
->b_datacnt
-= 1;
1476 /* only remove the buf if requested */
1480 /* remove the buf from the hdr list */
1481 for (bufp
= &buf
->b_hdr
->b_buf
; *bufp
!= buf
; bufp
= &(*bufp
)->b_next
)
1483 *bufp
= buf
->b_next
;
1486 ASSERT(buf
->b_efunc
== NULL
);
1488 /* clean up the buf */
1490 kmem_cache_free(buf_cache
, buf
);
1494 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
1496 l2arc_buf_hdr_t
*l2hdr
= hdr
->b_l2hdr
;
1498 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1499 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
1500 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1502 if (l2hdr
!= NULL
) {
1503 boolean_t buflist_held
= MUTEX_HELD(&l2arc_buflist_mtx
);
1505 * To prevent arc_free() and l2arc_evict() from
1506 * attempting to free the same buffer at the same time,
1507 * a FREE_IN_PROGRESS flag is given to arc_free() to
1508 * give it priority. l2arc_evict() can't destroy this
1509 * header while we are waiting on l2arc_buflist_mtx.
1511 * The hdr may be removed from l2ad_buflist before we
1512 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1514 if (!buflist_held
) {
1515 mutex_enter(&l2arc_buflist_mtx
);
1516 l2hdr
= hdr
->b_l2hdr
;
1519 if (l2hdr
!= NULL
) {
1520 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
1521 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
1522 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
1523 if (hdr
->b_state
== arc_l2c_only
)
1524 l2arc_hdr_stat_remove();
1525 hdr
->b_l2hdr
= NULL
;
1529 mutex_exit(&l2arc_buflist_mtx
);
1532 if (!BUF_EMPTY(hdr
)) {
1533 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1534 buf_discard_identity(hdr
);
1536 while (hdr
->b_buf
) {
1537 arc_buf_t
*buf
= hdr
->b_buf
;
1540 mutex_enter(&arc_eviction_mtx
);
1541 mutex_enter(&buf
->b_evict_lock
);
1542 ASSERT(buf
->b_hdr
!= NULL
);
1543 arc_buf_destroy(hdr
->b_buf
, FALSE
, FALSE
);
1544 hdr
->b_buf
= buf
->b_next
;
1545 buf
->b_hdr
= &arc_eviction_hdr
;
1546 buf
->b_next
= arc_eviction_list
;
1547 arc_eviction_list
= buf
;
1548 mutex_exit(&buf
->b_evict_lock
);
1549 mutex_exit(&arc_eviction_mtx
);
1551 arc_buf_destroy(hdr
->b_buf
, FALSE
, TRUE
);
1554 if (hdr
->b_freeze_cksum
!= NULL
) {
1555 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1556 hdr
->b_freeze_cksum
= NULL
;
1558 if (hdr
->b_thawed
) {
1559 kmem_free(hdr
->b_thawed
, 1);
1560 hdr
->b_thawed
= NULL
;
1563 ASSERT(!list_link_active(&hdr
->b_arc_node
));
1564 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
1565 ASSERT3P(hdr
->b_acb
, ==, NULL
);
1566 kmem_cache_free(hdr_cache
, hdr
);
1570 arc_buf_free(arc_buf_t
*buf
, void *tag
)
1572 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1573 int hashed
= hdr
->b_state
!= arc_anon
;
1575 ASSERT(buf
->b_efunc
== NULL
);
1576 ASSERT(buf
->b_data
!= NULL
);
1579 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1581 mutex_enter(hash_lock
);
1583 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1585 (void) remove_reference(hdr
, hash_lock
, tag
);
1586 if (hdr
->b_datacnt
> 1) {
1587 arc_buf_destroy(buf
, FALSE
, TRUE
);
1589 ASSERT(buf
== hdr
->b_buf
);
1590 ASSERT(buf
->b_efunc
== NULL
);
1591 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1593 mutex_exit(hash_lock
);
1594 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
1597 * We are in the middle of an async write. Don't destroy
1598 * this buffer unless the write completes before we finish
1599 * decrementing the reference count.
1601 mutex_enter(&arc_eviction_mtx
);
1602 (void) remove_reference(hdr
, NULL
, tag
);
1603 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1604 destroy_hdr
= !HDR_IO_IN_PROGRESS(hdr
);
1605 mutex_exit(&arc_eviction_mtx
);
1607 arc_hdr_destroy(hdr
);
1609 if (remove_reference(hdr
, NULL
, tag
) > 0)
1610 arc_buf_destroy(buf
, FALSE
, TRUE
);
1612 arc_hdr_destroy(hdr
);
1617 arc_buf_remove_ref(arc_buf_t
*buf
, void* tag
)
1619 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1620 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1621 int no_callback
= (buf
->b_efunc
== NULL
);
1623 if (hdr
->b_state
== arc_anon
) {
1624 ASSERT(hdr
->b_datacnt
== 1);
1625 arc_buf_free(buf
, tag
);
1626 return (no_callback
);
1629 mutex_enter(hash_lock
);
1631 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1632 ASSERT(hdr
->b_state
!= arc_anon
);
1633 ASSERT(buf
->b_data
!= NULL
);
1635 (void) remove_reference(hdr
, hash_lock
, tag
);
1636 if (hdr
->b_datacnt
> 1) {
1638 arc_buf_destroy(buf
, FALSE
, TRUE
);
1639 } else if (no_callback
) {
1640 ASSERT(hdr
->b_buf
== buf
&& buf
->b_next
== NULL
);
1641 ASSERT(buf
->b_efunc
== NULL
);
1642 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1644 ASSERT(no_callback
|| hdr
->b_datacnt
> 1 ||
1645 refcount_is_zero(&hdr
->b_refcnt
));
1646 mutex_exit(hash_lock
);
1647 return (no_callback
);
1651 arc_buf_size(arc_buf_t
*buf
)
1653 return (buf
->b_hdr
->b_size
);
1657 * Evict buffers from list until we've removed the specified number of
1658 * bytes. Move the removed buffers to the appropriate evict state.
1659 * If the recycle flag is set, then attempt to "recycle" a buffer:
1660 * - look for a buffer to evict that is `bytes' long.
1661 * - return the data block from this buffer rather than freeing it.
1662 * This flag is used by callers that are trying to make space for a
1663 * new buffer in a full arc cache.
1665 * This function makes a "best effort". It skips over any buffers
1666 * it can't get a hash_lock on, and so may not catch all candidates.
1667 * It may also return without evicting as much space as requested.
1670 arc_evict(arc_state_t
*state
, uint64_t spa
, int64_t bytes
, boolean_t recycle
,
1671 arc_buf_contents_t type
)
1673 arc_state_t
*evicted_state
;
1674 uint64_t bytes_evicted
= 0, skipped
= 0, missed
= 0;
1675 arc_buf_hdr_t
*ab
, *ab_prev
= NULL
;
1676 list_t
*list
= &state
->arcs_list
[type
];
1677 kmutex_t
*hash_lock
;
1678 boolean_t have_lock
;
1679 void *stolen
= NULL
;
1681 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
1683 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
1685 mutex_enter(&state
->arcs_mtx
);
1686 mutex_enter(&evicted_state
->arcs_mtx
);
1688 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1689 ab_prev
= list_prev(list
, ab
);
1690 /* prefetch buffers have a minimum lifespan */
1691 if (HDR_IO_IN_PROGRESS(ab
) ||
1692 (spa
&& ab
->b_spa
!= spa
) ||
1693 (ab
->b_flags
& (ARC_PREFETCH
|ARC_INDIRECT
) &&
1694 ddi_get_lbolt() - ab
->b_arc_access
<
1695 arc_min_prefetch_lifespan
)) {
1699 /* "lookahead" for better eviction candidate */
1700 if (recycle
&& ab
->b_size
!= bytes
&&
1701 ab_prev
&& ab_prev
->b_size
== bytes
)
1703 hash_lock
= HDR_LOCK(ab
);
1704 have_lock
= MUTEX_HELD(hash_lock
);
1705 if (have_lock
|| mutex_tryenter(hash_lock
)) {
1706 ASSERT3U(refcount_count(&ab
->b_refcnt
), ==, 0);
1707 ASSERT(ab
->b_datacnt
> 0);
1709 arc_buf_t
*buf
= ab
->b_buf
;
1710 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
1715 bytes_evicted
+= ab
->b_size
;
1716 if (recycle
&& ab
->b_type
== type
&&
1717 ab
->b_size
== bytes
&&
1718 !HDR_L2_WRITING(ab
)) {
1719 stolen
= buf
->b_data
;
1724 mutex_enter(&arc_eviction_mtx
);
1725 arc_buf_destroy(buf
,
1726 buf
->b_data
== stolen
, FALSE
);
1727 ab
->b_buf
= buf
->b_next
;
1728 buf
->b_hdr
= &arc_eviction_hdr
;
1729 buf
->b_next
= arc_eviction_list
;
1730 arc_eviction_list
= buf
;
1731 mutex_exit(&arc_eviction_mtx
);
1732 mutex_exit(&buf
->b_evict_lock
);
1734 mutex_exit(&buf
->b_evict_lock
);
1735 arc_buf_destroy(buf
,
1736 buf
->b_data
== stolen
, TRUE
);
1741 ARCSTAT_INCR(arcstat_evict_l2_cached
,
1744 if (l2arc_write_eligible(ab
->b_spa
, ab
)) {
1745 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
1749 arcstat_evict_l2_ineligible
,
1754 if (ab
->b_datacnt
== 0) {
1755 arc_change_state(evicted_state
, ab
, hash_lock
);
1756 ASSERT(HDR_IN_HASH_TABLE(ab
));
1757 ab
->b_flags
|= ARC_IN_HASH_TABLE
;
1758 ab
->b_flags
&= ~ARC_BUF_AVAILABLE
;
1759 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, ab
);
1762 mutex_exit(hash_lock
);
1763 if (bytes
>= 0 && bytes_evicted
>= bytes
)
1770 mutex_exit(&evicted_state
->arcs_mtx
);
1771 mutex_exit(&state
->arcs_mtx
);
1773 if (bytes_evicted
< bytes
)
1774 dprintf("only evicted %lld bytes from %x\n",
1775 (longlong_t
)bytes_evicted
, state
);
1778 ARCSTAT_INCR(arcstat_evict_skip
, skipped
);
1781 ARCSTAT_INCR(arcstat_mutex_miss
, missed
);
1784 * We have just evicted some date into the ghost state, make
1785 * sure we also adjust the ghost state size if necessary.
1788 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
> arc_c
) {
1789 int64_t mru_over
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
+
1790 arc_mru_ghost
->arcs_size
- arc_c
;
1792 if (mru_over
> 0 && arc_mru_ghost
->arcs_lsize
[type
] > 0) {
1794 MIN(arc_mru_ghost
->arcs_lsize
[type
], mru_over
);
1795 arc_evict_ghost(arc_mru_ghost
, 0, todelete
);
1796 } else if (arc_mfu_ghost
->arcs_lsize
[type
] > 0) {
1797 int64_t todelete
= MIN(arc_mfu_ghost
->arcs_lsize
[type
],
1798 arc_mru_ghost
->arcs_size
+
1799 arc_mfu_ghost
->arcs_size
- arc_c
);
1800 arc_evict_ghost(arc_mfu_ghost
, 0, todelete
);
1808 * Remove buffers from list until we've removed the specified number of
1809 * bytes. Destroy the buffers that are removed.
1812 arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
)
1814 arc_buf_hdr_t
*ab
, *ab_prev
;
1815 arc_buf_hdr_t marker
;
1816 list_t
*list
= &state
->arcs_list
[ARC_BUFC_DATA
];
1817 kmutex_t
*hash_lock
;
1818 uint64_t bytes_deleted
= 0;
1819 uint64_t bufs_skipped
= 0;
1821 ASSERT(GHOST_STATE(state
));
1822 bzero(&marker
, sizeof(marker
));
1824 mutex_enter(&state
->arcs_mtx
);
1825 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1826 ab_prev
= list_prev(list
, ab
);
1827 if (spa
&& ab
->b_spa
!= spa
)
1830 /* ignore markers */
1834 hash_lock
= HDR_LOCK(ab
);
1835 /* caller may be trying to modify this buffer, skip it */
1836 if (MUTEX_HELD(hash_lock
))
1838 if (mutex_tryenter(hash_lock
)) {
1839 ASSERT(!HDR_IO_IN_PROGRESS(ab
));
1840 ASSERT(ab
->b_buf
== NULL
);
1841 ARCSTAT_BUMP(arcstat_deleted
);
1842 bytes_deleted
+= ab
->b_size
;
1844 if (ab
->b_l2hdr
!= NULL
) {
1846 * This buffer is cached on the 2nd Level ARC;
1847 * don't destroy the header.
1849 arc_change_state(arc_l2c_only
, ab
, hash_lock
);
1850 mutex_exit(hash_lock
);
1852 arc_change_state(arc_anon
, ab
, hash_lock
);
1853 mutex_exit(hash_lock
);
1854 arc_hdr_destroy(ab
);
1857 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, ab
);
1858 if (bytes
>= 0 && bytes_deleted
>= bytes
)
1860 } else if (bytes
< 0) {
1862 * Insert a list marker and then wait for the
1863 * hash lock to become available. Once its
1864 * available, restart from where we left off.
1866 list_insert_after(list
, ab
, &marker
);
1867 mutex_exit(&state
->arcs_mtx
);
1868 mutex_enter(hash_lock
);
1869 mutex_exit(hash_lock
);
1870 mutex_enter(&state
->arcs_mtx
);
1871 ab_prev
= list_prev(list
, &marker
);
1872 list_remove(list
, &marker
);
1876 mutex_exit(&state
->arcs_mtx
);
1878 if (list
== &state
->arcs_list
[ARC_BUFC_DATA
] &&
1879 (bytes
< 0 || bytes_deleted
< bytes
)) {
1880 list
= &state
->arcs_list
[ARC_BUFC_METADATA
];
1885 ARCSTAT_INCR(arcstat_mutex_miss
, bufs_skipped
);
1889 if (bytes_deleted
< bytes
)
1890 dprintf("only deleted %lld bytes from %p\n",
1891 (longlong_t
)bytes_deleted
, state
);
1897 int64_t adjustment
, delta
;
1903 adjustment
= MIN((int64_t)(arc_size
- arc_c
),
1904 (int64_t)(arc_anon
->arcs_size
+ arc_mru
->arcs_size
+ arc_meta_used
-
1907 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1908 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_DATA
], adjustment
);
1909 (void) arc_evict(arc_mru
, 0, delta
, FALSE
, ARC_BUFC_DATA
);
1910 adjustment
-= delta
;
1913 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1914 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
1915 (void) arc_evict(arc_mru
, 0, delta
, FALSE
,
1923 adjustment
= arc_size
- arc_c
;
1925 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1926 delta
= MIN(adjustment
, arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
]);
1927 (void) arc_evict(arc_mfu
, 0, delta
, FALSE
, ARC_BUFC_DATA
);
1928 adjustment
-= delta
;
1931 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1932 int64_t delta
= MIN(adjustment
,
1933 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
]);
1934 (void) arc_evict(arc_mfu
, 0, delta
, FALSE
,
1939 * Adjust ghost lists
1942 adjustment
= arc_mru
->arcs_size
+ arc_mru_ghost
->arcs_size
- arc_c
;
1944 if (adjustment
> 0 && arc_mru_ghost
->arcs_size
> 0) {
1945 delta
= MIN(arc_mru_ghost
->arcs_size
, adjustment
);
1946 arc_evict_ghost(arc_mru_ghost
, 0, delta
);
1950 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
- arc_c
;
1952 if (adjustment
> 0 && arc_mfu_ghost
->arcs_size
> 0) {
1953 delta
= MIN(arc_mfu_ghost
->arcs_size
, adjustment
);
1954 arc_evict_ghost(arc_mfu_ghost
, 0, delta
);
1959 * Request that arc user drop references so that N bytes can be released
1960 * from the cache. This provides a mechanism to ensure the arc can honor
1961 * the arc_meta_limit and reclaim buffers which are pinned in the cache
1962 * by higher layers. (i.e. the zpl)
1965 arc_do_user_prune(int64_t adjustment
)
1967 arc_prune_func_t
*func
;
1969 arc_prune_t
*cp
, *np
;
1971 mutex_enter(&arc_prune_mtx
);
1973 cp
= list_head(&arc_prune_list
);
1974 while (cp
!= NULL
) {
1976 private = cp
->p_private
;
1977 np
= list_next(&arc_prune_list
, cp
);
1978 refcount_add(&cp
->p_refcnt
, func
);
1979 mutex_exit(&arc_prune_mtx
);
1982 func(adjustment
, private);
1984 mutex_enter(&arc_prune_mtx
);
1986 /* User removed prune callback concurrently with execution */
1987 if (refcount_remove(&cp
->p_refcnt
, func
) == 0) {
1988 ASSERT(!list_link_active(&cp
->p_node
));
1989 refcount_destroy(&cp
->p_refcnt
);
1990 kmem_free(cp
, sizeof (*cp
));
1996 ARCSTAT_BUMP(arcstat_prune
);
1997 mutex_exit(&arc_prune_mtx
);
2001 arc_do_user_evicts(void)
2003 mutex_enter(&arc_eviction_mtx
);
2004 while (arc_eviction_list
!= NULL
) {
2005 arc_buf_t
*buf
= arc_eviction_list
;
2006 arc_eviction_list
= buf
->b_next
;
2007 mutex_enter(&buf
->b_evict_lock
);
2009 mutex_exit(&buf
->b_evict_lock
);
2010 mutex_exit(&arc_eviction_mtx
);
2012 if (buf
->b_efunc
!= NULL
)
2013 VERIFY(buf
->b_efunc(buf
) == 0);
2015 buf
->b_efunc
= NULL
;
2016 buf
->b_private
= NULL
;
2017 kmem_cache_free(buf_cache
, buf
);
2018 mutex_enter(&arc_eviction_mtx
);
2020 mutex_exit(&arc_eviction_mtx
);
2024 * Evict only meta data objects from the cache leaving the data objects.
2025 * This is only used to enforce the tunable arc_meta_limit, if we are
2026 * unable to evict enough buffers notify the user via the prune callback.
2029 arc_adjust_meta(int64_t adjustment
, boolean_t may_prune
)
2033 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2034 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
2035 arc_evict(arc_mru
, 0, delta
, FALSE
, ARC_BUFC_METADATA
);
2036 adjustment
-= delta
;
2039 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
2040 delta
= MIN(arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
2041 arc_evict(arc_mfu
, 0, delta
, FALSE
, ARC_BUFC_METADATA
);
2042 adjustment
-= delta
;
2045 if (may_prune
&& (adjustment
> 0) && (arc_meta_used
> arc_meta_limit
))
2046 arc_do_user_prune(arc_meta_prune
);
2050 * Flush all *evictable* data from the cache for the given spa.
2051 * NOTE: this will not touch "active" (i.e. referenced) data.
2054 arc_flush(spa_t
*spa
)
2059 guid
= spa_guid(spa
);
2061 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_DATA
])) {
2062 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2066 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
])) {
2067 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2071 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
])) {
2072 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
2076 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
])) {
2077 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
2082 arc_evict_ghost(arc_mru_ghost
, guid
, -1);
2083 arc_evict_ghost(arc_mfu_ghost
, guid
, -1);
2085 mutex_enter(&arc_reclaim_thr_lock
);
2086 arc_do_user_evicts();
2087 mutex_exit(&arc_reclaim_thr_lock
);
2088 ASSERT(spa
|| arc_eviction_list
== NULL
);
2092 arc_shrink(uint64_t bytes
)
2094 if (arc_c
> arc_c_min
) {
2097 to_free
= bytes
? bytes
: arc_c
>> arc_shrink_shift
;
2099 if (arc_c
> arc_c_min
+ to_free
)
2100 atomic_add_64(&arc_c
, -to_free
);
2104 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
2105 if (arc_c
> arc_size
)
2106 arc_c
= MAX(arc_size
, arc_c_min
);
2108 arc_p
= (arc_c
>> 1);
2109 ASSERT(arc_c
>= arc_c_min
);
2110 ASSERT((int64_t)arc_p
>= 0);
2113 if (arc_size
> arc_c
)
2118 arc_kmem_reap_now(arc_reclaim_strategy_t strat
, uint64_t bytes
)
2121 kmem_cache_t
*prev_cache
= NULL
;
2122 kmem_cache_t
*prev_data_cache
= NULL
;
2123 extern kmem_cache_t
*zio_buf_cache
[];
2124 extern kmem_cache_t
*zio_data_buf_cache
[];
2127 * An aggressive reclamation will shrink the cache size as well as
2128 * reap free buffers from the arc kmem caches.
2130 if (strat
== ARC_RECLAIM_AGGR
)
2133 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
2134 if (zio_buf_cache
[i
] != prev_cache
) {
2135 prev_cache
= zio_buf_cache
[i
];
2136 kmem_cache_reap_now(zio_buf_cache
[i
]);
2138 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
2139 prev_data_cache
= zio_data_buf_cache
[i
];
2140 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
2144 kmem_cache_reap_now(buf_cache
);
2145 kmem_cache_reap_now(hdr_cache
);
2149 * Unlike other ZFS implementations this thread is only responsible for
2150 * adapting the target ARC size on Linux. The responsibility for memory
2151 * reclamation has been entirely delegated to the arc_shrinker_func()
2152 * which is registered with the VM. To reflect this change in behavior
2153 * the arc_reclaim thread has been renamed to arc_adapt.
2156 arc_adapt_thread(void)
2161 CALLB_CPR_INIT(&cpr
, &arc_reclaim_thr_lock
, callb_generic_cpr
, FTAG
);
2163 mutex_enter(&arc_reclaim_thr_lock
);
2164 while (arc_thread_exit
== 0) {
2166 arc_reclaim_strategy_t last_reclaim
= ARC_RECLAIM_CONS
;
2168 if (spa_get_random(100) == 0) {
2171 if (last_reclaim
== ARC_RECLAIM_CONS
) {
2172 last_reclaim
= ARC_RECLAIM_AGGR
;
2174 last_reclaim
= ARC_RECLAIM_CONS
;
2178 last_reclaim
= ARC_RECLAIM_AGGR
;
2182 /* reset the growth delay for every reclaim */
2183 arc_grow_time
= ddi_get_lbolt()+(arc_grow_retry
* hz
);
2185 arc_kmem_reap_now(last_reclaim
, 0);
2188 #endif /* !_KERNEL */
2190 /* No recent memory pressure allow the ARC to grow. */
2191 if (arc_no_grow
&& ddi_get_lbolt() >= arc_grow_time
)
2192 arc_no_grow
= FALSE
;
2195 * Keep meta data usage within limits, arc_shrink() is not
2196 * used to avoid collapsing the arc_c value when only the
2197 * arc_meta_limit is being exceeded.
2199 prune
= (int64_t)arc_meta_used
- (int64_t)arc_meta_limit
;
2201 arc_adjust_meta(prune
, B_TRUE
);
2205 if (arc_eviction_list
!= NULL
)
2206 arc_do_user_evicts();
2208 /* block until needed, or one second, whichever is shorter */
2209 CALLB_CPR_SAFE_BEGIN(&cpr
);
2210 (void) cv_timedwait_interruptible(&arc_reclaim_thr_cv
,
2211 &arc_reclaim_thr_lock
, (ddi_get_lbolt() + hz
));
2212 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_thr_lock
);
2215 arc_thread_exit
= 0;
2216 cv_broadcast(&arc_reclaim_thr_cv
);
2217 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_thr_lock */
2223 * Determine the amount of memory eligible for eviction contained in the
2224 * ARC. All clean data reported by the ghost lists can always be safely
2225 * evicted. Due to arc_c_min, the same does not hold for all clean data
2226 * contained by the regular mru and mfu lists.
2228 * In the case of the regular mru and mfu lists, we need to report as
2229 * much clean data as possible, such that evicting that same reported
2230 * data will not bring arc_size below arc_c_min. Thus, in certain
2231 * circumstances, the total amount of clean data in the mru and mfu
2232 * lists might not actually be evictable.
2234 * The following two distinct cases are accounted for:
2236 * 1. The sum of the amount of dirty data contained by both the mru and
2237 * mfu lists, plus the ARC's other accounting (e.g. the anon list),
2238 * is greater than or equal to arc_c_min.
2239 * (i.e. amount of dirty data >= arc_c_min)
2241 * This is the easy case; all clean data contained by the mru and mfu
2242 * lists is evictable. Evicting all clean data can only drop arc_size
2243 * to the amount of dirty data, which is greater than arc_c_min.
2245 * 2. The sum of the amount of dirty data contained by both the mru and
2246 * mfu lists, plus the ARC's other accounting (e.g. the anon list),
2247 * is less than arc_c_min.
2248 * (i.e. arc_c_min > amount of dirty data)
2250 * 2.1. arc_size is greater than or equal arc_c_min.
2251 * (i.e. arc_size >= arc_c_min > amount of dirty data)
2253 * In this case, not all clean data from the regular mru and mfu
2254 * lists is actually evictable; we must leave enough clean data
2255 * to keep arc_size above arc_c_min. Thus, the maximum amount of
2256 * evictable data from the two lists combined, is exactly the
2257 * difference between arc_size and arc_c_min.
2259 * 2.2. arc_size is less than arc_c_min
2260 * (i.e. arc_c_min > arc_size > amount of dirty data)
2262 * In this case, none of the data contained in the mru and mfu
2263 * lists is evictable, even if it's clean. Since arc_size is
2264 * already below arc_c_min, evicting any more would only
2265 * increase this negative difference.
2268 arc_evictable_memory(void) {
2269 uint64_t arc_clean
=
2270 arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] +
2271 arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] +
2272 arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] +
2273 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
];
2274 uint64_t ghost_clean
=
2275 arc_mru_ghost
->arcs_lsize
[ARC_BUFC_DATA
] +
2276 arc_mru_ghost
->arcs_lsize
[ARC_BUFC_METADATA
] +
2277 arc_mfu_ghost
->arcs_lsize
[ARC_BUFC_DATA
] +
2278 arc_mfu_ghost
->arcs_lsize
[ARC_BUFC_METADATA
];
2279 uint64_t arc_dirty
= MAX((int64_t)arc_size
- (int64_t)arc_clean
, 0);
2281 if (arc_dirty
>= arc_c_min
)
2282 return (ghost_clean
+ arc_clean
);
2284 return (ghost_clean
+ MAX((int64_t)arc_size
- (int64_t)arc_c_min
, 0));
2288 __arc_shrinker_func(struct shrinker
*shrink
, struct shrink_control
*sc
)
2292 /* The arc is considered warm once reclaim has occurred */
2293 if (unlikely(arc_warm
== B_FALSE
))
2296 /* Return the potential number of reclaimable pages */
2297 pages
= btop(arc_evictable_memory());
2298 if (sc
->nr_to_scan
== 0)
2301 /* Not allowed to perform filesystem reclaim */
2302 if (!(sc
->gfp_mask
& __GFP_FS
))
2305 /* Reclaim in progress */
2306 if (mutex_tryenter(&arc_reclaim_thr_lock
) == 0)
2310 * Evict the requested number of pages by shrinking arc_c the
2311 * requested amount. If there is nothing left to evict just
2312 * reap whatever we can from the various arc slabs.
2315 arc_kmem_reap_now(ARC_RECLAIM_AGGR
, ptob(sc
->nr_to_scan
));
2316 pages
= btop(arc_evictable_memory());
2318 arc_kmem_reap_now(ARC_RECLAIM_CONS
, ptob(sc
->nr_to_scan
));
2323 * When direct reclaim is observed it usually indicates a rapid
2324 * increase in memory pressure. This occurs because the kswapd
2325 * threads were unable to asynchronously keep enough free memory
2326 * available. In this case set arc_no_grow to briefly pause arc
2327 * growth to avoid compounding the memory pressure.
2329 if (current_is_kswapd()) {
2330 ARCSTAT_BUMP(arcstat_memory_indirect_count
);
2332 arc_no_grow
= B_TRUE
;
2333 arc_grow_time
= ddi_get_lbolt() + (arc_grow_retry
* hz
);
2334 ARCSTAT_BUMP(arcstat_memory_direct_count
);
2337 mutex_exit(&arc_reclaim_thr_lock
);
2341 SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func
);
2343 SPL_SHRINKER_DECLARE(arc_shrinker
, arc_shrinker_func
, DEFAULT_SEEKS
);
2344 #endif /* _KERNEL */
2347 * Adapt arc info given the number of bytes we are trying to add and
2348 * the state that we are comming from. This function is only called
2349 * when we are adding new content to the cache.
2352 arc_adapt(int bytes
, arc_state_t
*state
)
2355 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
2357 if (state
== arc_l2c_only
)
2362 * Adapt the target size of the MRU list:
2363 * - if we just hit in the MRU ghost list, then increase
2364 * the target size of the MRU list.
2365 * - if we just hit in the MFU ghost list, then increase
2366 * the target size of the MFU list by decreasing the
2367 * target size of the MRU list.
2369 if (state
== arc_mru_ghost
) {
2370 mult
= ((arc_mru_ghost
->arcs_size
>= arc_mfu_ghost
->arcs_size
) ?
2371 1 : (arc_mfu_ghost
->arcs_size
/arc_mru_ghost
->arcs_size
));
2372 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
2374 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
2375 } else if (state
== arc_mfu_ghost
) {
2378 mult
= ((arc_mfu_ghost
->arcs_size
>= arc_mru_ghost
->arcs_size
) ?
2379 1 : (arc_mru_ghost
->arcs_size
/arc_mfu_ghost
->arcs_size
));
2380 mult
= MIN(mult
, 10);
2382 delta
= MIN(bytes
* mult
, arc_p
);
2383 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
2385 ASSERT((int64_t)arc_p
>= 0);
2390 if (arc_c
>= arc_c_max
)
2394 * If we're within (2 * maxblocksize) bytes of the target
2395 * cache size, increment the target cache size
2397 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
2398 atomic_add_64(&arc_c
, (int64_t)bytes
);
2399 if (arc_c
> arc_c_max
)
2401 else if (state
== arc_anon
)
2402 atomic_add_64(&arc_p
, (int64_t)bytes
);
2406 ASSERT((int64_t)arc_p
>= 0);
2410 * Check if the cache has reached its limits and eviction is required
2414 arc_evict_needed(arc_buf_contents_t type
)
2416 if (type
== ARC_BUFC_METADATA
&& arc_meta_used
>= arc_meta_limit
)
2421 * If zio data pages are being allocated out of a separate heap segment,
2422 * then enforce that the size of available vmem for this area remains
2423 * above about 1/32nd free.
2425 if (type
== ARC_BUFC_DATA
&& zio_arena
!= NULL
&&
2426 vmem_size(zio_arena
, VMEM_FREE
) <
2427 (vmem_size(zio_arena
, VMEM_ALLOC
) >> 5))
2434 return (arc_size
> arc_c
);
2438 * The buffer, supplied as the first argument, needs a data block.
2439 * So, if we are at cache max, determine which cache should be victimized.
2440 * We have the following cases:
2442 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2443 * In this situation if we're out of space, but the resident size of the MFU is
2444 * under the limit, victimize the MFU cache to satisfy this insertion request.
2446 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2447 * Here, we've used up all of the available space for the MRU, so we need to
2448 * evict from our own cache instead. Evict from the set of resident MRU
2451 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2452 * c minus p represents the MFU space in the cache, since p is the size of the
2453 * cache that is dedicated to the MRU. In this situation there's still space on
2454 * the MFU side, so the MRU side needs to be victimized.
2456 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2457 * MFU's resident set is consuming more space than it has been allotted. In
2458 * this situation, we must victimize our own cache, the MFU, for this insertion.
2461 arc_get_data_buf(arc_buf_t
*buf
)
2463 arc_state_t
*state
= buf
->b_hdr
->b_state
;
2464 uint64_t size
= buf
->b_hdr
->b_size
;
2465 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
2467 arc_adapt(size
, state
);
2470 * We have not yet reached cache maximum size,
2471 * just allocate a new buffer.
2473 if (!arc_evict_needed(type
)) {
2474 if (type
== ARC_BUFC_METADATA
) {
2475 buf
->b_data
= zio_buf_alloc(size
);
2476 arc_space_consume(size
, ARC_SPACE_DATA
);
2478 ASSERT(type
== ARC_BUFC_DATA
);
2479 buf
->b_data
= zio_data_buf_alloc(size
);
2480 ARCSTAT_INCR(arcstat_data_size
, size
);
2481 atomic_add_64(&arc_size
, size
);
2487 * If we are prefetching from the mfu ghost list, this buffer
2488 * will end up on the mru list; so steal space from there.
2490 if (state
== arc_mfu_ghost
)
2491 state
= buf
->b_hdr
->b_flags
& ARC_PREFETCH
? arc_mru
: arc_mfu
;
2492 else if (state
== arc_mru_ghost
)
2495 if (state
== arc_mru
|| state
== arc_anon
) {
2496 uint64_t mru_used
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
2497 state
= (arc_mfu
->arcs_lsize
[type
] >= size
&&
2498 arc_p
> mru_used
) ? arc_mfu
: arc_mru
;
2501 uint64_t mfu_space
= arc_c
- arc_p
;
2502 state
= (arc_mru
->arcs_lsize
[type
] >= size
&&
2503 mfu_space
> arc_mfu
->arcs_size
) ? arc_mru
: arc_mfu
;
2506 if ((buf
->b_data
= arc_evict(state
, 0, size
, TRUE
, type
)) == NULL
) {
2507 if (type
== ARC_BUFC_METADATA
) {
2508 buf
->b_data
= zio_buf_alloc(size
);
2509 arc_space_consume(size
, ARC_SPACE_DATA
);
2512 * If we are unable to recycle an existing meta buffer
2513 * signal the reclaim thread. It will notify users
2514 * via the prune callback to drop references. The
2515 * prune callback in run in the context of the reclaim
2516 * thread to avoid deadlocking on the hash_lock.
2518 cv_signal(&arc_reclaim_thr_cv
);
2520 ASSERT(type
== ARC_BUFC_DATA
);
2521 buf
->b_data
= zio_data_buf_alloc(size
);
2522 ARCSTAT_INCR(arcstat_data_size
, size
);
2523 atomic_add_64(&arc_size
, size
);
2526 ARCSTAT_BUMP(arcstat_recycle_miss
);
2528 ASSERT(buf
->b_data
!= NULL
);
2531 * Update the state size. Note that ghost states have a
2532 * "ghost size" and so don't need to be updated.
2534 if (!GHOST_STATE(buf
->b_hdr
->b_state
)) {
2535 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2537 atomic_add_64(&hdr
->b_state
->arcs_size
, size
);
2538 if (list_link_active(&hdr
->b_arc_node
)) {
2539 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
2540 atomic_add_64(&hdr
->b_state
->arcs_lsize
[type
], size
);
2543 * If we are growing the cache, and we are adding anonymous
2544 * data, and we have outgrown arc_p, update arc_p
2546 if (arc_size
< arc_c
&& hdr
->b_state
== arc_anon
&&
2547 arc_anon
->arcs_size
+ arc_mru
->arcs_size
> arc_p
)
2548 arc_p
= MIN(arc_c
, arc_p
+ size
);
2553 * This routine is called whenever a buffer is accessed.
2554 * NOTE: the hash lock is dropped in this function.
2557 arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
)
2561 ASSERT(MUTEX_HELD(hash_lock
));
2563 if (buf
->b_state
== arc_anon
) {
2565 * This buffer is not in the cache, and does not
2566 * appear in our "ghost" list. Add the new buffer
2570 ASSERT(buf
->b_arc_access
== 0);
2571 buf
->b_arc_access
= ddi_get_lbolt();
2572 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2573 arc_change_state(arc_mru
, buf
, hash_lock
);
2575 } else if (buf
->b_state
== arc_mru
) {
2576 now
= ddi_get_lbolt();
2579 * If this buffer is here because of a prefetch, then either:
2580 * - clear the flag if this is a "referencing" read
2581 * (any subsequent access will bump this into the MFU state).
2583 * - move the buffer to the head of the list if this is
2584 * another prefetch (to make it less likely to be evicted).
2586 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2587 if (refcount_count(&buf
->b_refcnt
) == 0) {
2588 ASSERT(list_link_active(&buf
->b_arc_node
));
2590 buf
->b_flags
&= ~ARC_PREFETCH
;
2591 ARCSTAT_BUMP(arcstat_mru_hits
);
2593 buf
->b_arc_access
= now
;
2598 * This buffer has been "accessed" only once so far,
2599 * but it is still in the cache. Move it to the MFU
2602 if (now
> buf
->b_arc_access
+ ARC_MINTIME
) {
2604 * More than 125ms have passed since we
2605 * instantiated this buffer. Move it to the
2606 * most frequently used state.
2608 buf
->b_arc_access
= now
;
2609 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2610 arc_change_state(arc_mfu
, buf
, hash_lock
);
2612 ARCSTAT_BUMP(arcstat_mru_hits
);
2613 } else if (buf
->b_state
== arc_mru_ghost
) {
2614 arc_state_t
*new_state
;
2616 * This buffer has been "accessed" recently, but
2617 * was evicted from the cache. Move it to the
2621 if (buf
->b_flags
& ARC_PREFETCH
) {
2622 new_state
= arc_mru
;
2623 if (refcount_count(&buf
->b_refcnt
) > 0)
2624 buf
->b_flags
&= ~ARC_PREFETCH
;
2625 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2627 new_state
= arc_mfu
;
2628 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2631 buf
->b_arc_access
= ddi_get_lbolt();
2632 arc_change_state(new_state
, buf
, hash_lock
);
2634 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
2635 } else if (buf
->b_state
== arc_mfu
) {
2637 * This buffer has been accessed more than once and is
2638 * still in the cache. Keep it in the MFU state.
2640 * NOTE: an add_reference() that occurred when we did
2641 * the arc_read() will have kicked this off the list.
2642 * If it was a prefetch, we will explicitly move it to
2643 * the head of the list now.
2645 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2646 ASSERT(refcount_count(&buf
->b_refcnt
) == 0);
2647 ASSERT(list_link_active(&buf
->b_arc_node
));
2649 ARCSTAT_BUMP(arcstat_mfu_hits
);
2650 buf
->b_arc_access
= ddi_get_lbolt();
2651 } else if (buf
->b_state
== arc_mfu_ghost
) {
2652 arc_state_t
*new_state
= arc_mfu
;
2654 * This buffer has been accessed more than once but has
2655 * been evicted from the cache. Move it back to the
2659 if (buf
->b_flags
& ARC_PREFETCH
) {
2661 * This is a prefetch access...
2662 * move this block back to the MRU state.
2664 ASSERT3U(refcount_count(&buf
->b_refcnt
), ==, 0);
2665 new_state
= arc_mru
;
2668 buf
->b_arc_access
= ddi_get_lbolt();
2669 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2670 arc_change_state(new_state
, buf
, hash_lock
);
2672 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
2673 } else if (buf
->b_state
== arc_l2c_only
) {
2675 * This buffer is on the 2nd Level ARC.
2678 buf
->b_arc_access
= ddi_get_lbolt();
2679 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2680 arc_change_state(arc_mfu
, buf
, hash_lock
);
2682 ASSERT(!"invalid arc state");
2686 /* a generic arc_done_func_t which you can use */
2689 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2691 if (zio
== NULL
|| zio
->io_error
== 0)
2692 bcopy(buf
->b_data
, arg
, buf
->b_hdr
->b_size
);
2693 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2696 /* a generic arc_done_func_t */
2698 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2700 arc_buf_t
**bufp
= arg
;
2701 if (zio
&& zio
->io_error
) {
2702 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2706 ASSERT(buf
->b_data
);
2711 arc_read_done(zio_t
*zio
)
2713 arc_buf_hdr_t
*hdr
, *found
;
2715 arc_buf_t
*abuf
; /* buffer we're assigning to callback */
2716 kmutex_t
*hash_lock
;
2717 arc_callback_t
*callback_list
, *acb
;
2718 int freeable
= FALSE
;
2720 buf
= zio
->io_private
;
2724 * The hdr was inserted into hash-table and removed from lists
2725 * prior to starting I/O. We should find this header, since
2726 * it's in the hash table, and it should be legit since it's
2727 * not possible to evict it during the I/O. The only possible
2728 * reason for it not to be found is if we were freed during the
2731 found
= buf_hash_find(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
,
2734 ASSERT((found
== NULL
&& HDR_FREED_IN_READ(hdr
) && hash_lock
== NULL
) ||
2735 (found
== hdr
&& DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
2736 (found
== hdr
&& HDR_L2_READING(hdr
)));
2738 hdr
->b_flags
&= ~ARC_L2_EVICTED
;
2739 if (l2arc_noprefetch
&& (hdr
->b_flags
& ARC_PREFETCH
))
2740 hdr
->b_flags
&= ~ARC_L2CACHE
;
2742 /* byteswap if necessary */
2743 callback_list
= hdr
->b_acb
;
2744 ASSERT(callback_list
!= NULL
);
2745 if (BP_SHOULD_BYTESWAP(zio
->io_bp
) && zio
->io_error
== 0) {
2746 arc_byteswap_func_t
*func
= BP_GET_LEVEL(zio
->io_bp
) > 0 ?
2747 byteswap_uint64_array
:
2748 dmu_ot
[BP_GET_TYPE(zio
->io_bp
)].ot_byteswap
;
2749 func(buf
->b_data
, hdr
->b_size
);
2752 arc_cksum_compute(buf
, B_FALSE
);
2754 if (hash_lock
&& zio
->io_error
== 0 && hdr
->b_state
== arc_anon
) {
2756 * Only call arc_access on anonymous buffers. This is because
2757 * if we've issued an I/O for an evicted buffer, we've already
2758 * called arc_access (to prevent any simultaneous readers from
2759 * getting confused).
2761 arc_access(hdr
, hash_lock
);
2764 /* create copies of the data buffer for the callers */
2766 for (acb
= callback_list
; acb
; acb
= acb
->acb_next
) {
2767 if (acb
->acb_done
) {
2769 abuf
= arc_buf_clone(buf
);
2770 acb
->acb_buf
= abuf
;
2775 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
2776 ASSERT(!HDR_BUF_AVAILABLE(hdr
));
2778 ASSERT(buf
->b_efunc
== NULL
);
2779 ASSERT(hdr
->b_datacnt
== 1);
2780 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
2783 ASSERT(refcount_is_zero(&hdr
->b_refcnt
) || callback_list
!= NULL
);
2785 if (zio
->io_error
!= 0) {
2786 hdr
->b_flags
|= ARC_IO_ERROR
;
2787 if (hdr
->b_state
!= arc_anon
)
2788 arc_change_state(arc_anon
, hdr
, hash_lock
);
2789 if (HDR_IN_HASH_TABLE(hdr
))
2790 buf_hash_remove(hdr
);
2791 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2795 * Broadcast before we drop the hash_lock to avoid the possibility
2796 * that the hdr (and hence the cv) might be freed before we get to
2797 * the cv_broadcast().
2799 cv_broadcast(&hdr
->b_cv
);
2802 mutex_exit(hash_lock
);
2805 * This block was freed while we waited for the read to
2806 * complete. It has been removed from the hash table and
2807 * moved to the anonymous state (so that it won't show up
2810 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
2811 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2814 /* execute each callback and free its structure */
2815 while ((acb
= callback_list
) != NULL
) {
2817 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
2819 if (acb
->acb_zio_dummy
!= NULL
) {
2820 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
2821 zio_nowait(acb
->acb_zio_dummy
);
2824 callback_list
= acb
->acb_next
;
2825 kmem_free(acb
, sizeof (arc_callback_t
));
2829 arc_hdr_destroy(hdr
);
2833 * "Read" the block block at the specified DVA (in bp) via the
2834 * cache. If the block is found in the cache, invoke the provided
2835 * callback immediately and return. Note that the `zio' parameter
2836 * in the callback will be NULL in this case, since no IO was
2837 * required. If the block is not in the cache pass the read request
2838 * on to the spa with a substitute callback function, so that the
2839 * requested block will be added to the cache.
2841 * If a read request arrives for a block that has a read in-progress,
2842 * either wait for the in-progress read to complete (and return the
2843 * results); or, if this is a read with a "done" func, add a record
2844 * to the read to invoke the "done" func when the read completes,
2845 * and return; or just return.
2847 * arc_read_done() will invoke all the requested "done" functions
2848 * for readers of this block.
2850 * Normal callers should use arc_read and pass the arc buffer and offset
2851 * for the bp. But if you know you don't need locking, you can use
2855 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
2856 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2857 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2863 * XXX This happens from traverse callback funcs, for
2864 * the objset_phys_t block.
2866 return (arc_read_nolock(pio
, spa
, bp
, done
, private, priority
,
2867 zio_flags
, arc_flags
, zb
));
2870 ASSERT(!refcount_is_zero(&pbuf
->b_hdr
->b_refcnt
));
2871 ASSERT3U((char *)bp
- (char *)pbuf
->b_data
, <, pbuf
->b_hdr
->b_size
);
2872 rw_enter(&pbuf
->b_data_lock
, RW_READER
);
2874 err
= arc_read_nolock(pio
, spa
, bp
, done
, private, priority
,
2875 zio_flags
, arc_flags
, zb
);
2876 rw_exit(&pbuf
->b_data_lock
);
2882 arc_read_nolock(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
,
2883 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2884 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2887 arc_buf_t
*buf
= NULL
;
2888 kmutex_t
*hash_lock
;
2890 uint64_t guid
= spa_guid(spa
);
2893 hdr
= buf_hash_find(guid
, BP_IDENTITY(bp
), BP_PHYSICAL_BIRTH(bp
),
2895 if (hdr
&& hdr
->b_datacnt
> 0) {
2897 *arc_flags
|= ARC_CACHED
;
2899 if (HDR_IO_IN_PROGRESS(hdr
)) {
2901 if (*arc_flags
& ARC_WAIT
) {
2902 cv_wait(&hdr
->b_cv
, hash_lock
);
2903 mutex_exit(hash_lock
);
2906 ASSERT(*arc_flags
& ARC_NOWAIT
);
2909 arc_callback_t
*acb
= NULL
;
2911 acb
= kmem_zalloc(sizeof (arc_callback_t
),
2913 acb
->acb_done
= done
;
2914 acb
->acb_private
= private;
2916 acb
->acb_zio_dummy
= zio_null(pio
,
2917 spa
, NULL
, NULL
, NULL
, zio_flags
);
2919 ASSERT(acb
->acb_done
!= NULL
);
2920 acb
->acb_next
= hdr
->b_acb
;
2922 add_reference(hdr
, hash_lock
, private);
2923 mutex_exit(hash_lock
);
2926 mutex_exit(hash_lock
);
2930 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
2933 add_reference(hdr
, hash_lock
, private);
2935 * If this block is already in use, create a new
2936 * copy of the data so that we will be guaranteed
2937 * that arc_release() will always succeed.
2941 ASSERT(buf
->b_data
);
2942 if (HDR_BUF_AVAILABLE(hdr
)) {
2943 ASSERT(buf
->b_efunc
== NULL
);
2944 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
2946 buf
= arc_buf_clone(buf
);
2949 } else if (*arc_flags
& ARC_PREFETCH
&&
2950 refcount_count(&hdr
->b_refcnt
) == 0) {
2951 hdr
->b_flags
|= ARC_PREFETCH
;
2953 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
2954 arc_access(hdr
, hash_lock
);
2955 if (*arc_flags
& ARC_L2CACHE
)
2956 hdr
->b_flags
|= ARC_L2CACHE
;
2957 mutex_exit(hash_lock
);
2958 ARCSTAT_BUMP(arcstat_hits
);
2959 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2960 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2961 data
, metadata
, hits
);
2964 done(NULL
, buf
, private);
2966 uint64_t size
= BP_GET_LSIZE(bp
);
2967 arc_callback_t
*acb
;
2970 boolean_t devw
= B_FALSE
;
2973 /* this block is not in the cache */
2974 arc_buf_hdr_t
*exists
;
2975 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
2976 buf
= arc_buf_alloc(spa
, size
, private, type
);
2978 hdr
->b_dva
= *BP_IDENTITY(bp
);
2979 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
2980 hdr
->b_cksum0
= bp
->blk_cksum
.zc_word
[0];
2981 exists
= buf_hash_insert(hdr
, &hash_lock
);
2983 /* somebody beat us to the hash insert */
2984 mutex_exit(hash_lock
);
2985 buf_discard_identity(hdr
);
2986 (void) arc_buf_remove_ref(buf
, private);
2987 goto top
; /* restart the IO request */
2989 /* if this is a prefetch, we don't have a reference */
2990 if (*arc_flags
& ARC_PREFETCH
) {
2991 (void) remove_reference(hdr
, hash_lock
,
2993 hdr
->b_flags
|= ARC_PREFETCH
;
2995 if (*arc_flags
& ARC_L2CACHE
)
2996 hdr
->b_flags
|= ARC_L2CACHE
;
2997 if (BP_GET_LEVEL(bp
) > 0)
2998 hdr
->b_flags
|= ARC_INDIRECT
;
3000 /* this block is in the ghost cache */
3001 ASSERT(GHOST_STATE(hdr
->b_state
));
3002 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3003 ASSERT3U(refcount_count(&hdr
->b_refcnt
), ==, 0);
3004 ASSERT(hdr
->b_buf
== NULL
);
3006 /* if this is a prefetch, we don't have a reference */
3007 if (*arc_flags
& ARC_PREFETCH
)
3008 hdr
->b_flags
|= ARC_PREFETCH
;
3010 add_reference(hdr
, hash_lock
, private);
3011 if (*arc_flags
& ARC_L2CACHE
)
3012 hdr
->b_flags
|= ARC_L2CACHE
;
3013 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
3016 buf
->b_efunc
= NULL
;
3017 buf
->b_private
= NULL
;
3020 ASSERT(hdr
->b_datacnt
== 0);
3022 arc_get_data_buf(buf
);
3023 arc_access(hdr
, hash_lock
);
3026 ASSERT(!GHOST_STATE(hdr
->b_state
));
3028 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_PUSHPAGE
);
3029 acb
->acb_done
= done
;
3030 acb
->acb_private
= private;
3032 ASSERT(hdr
->b_acb
== NULL
);
3034 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3036 if (HDR_L2CACHE(hdr
) && hdr
->b_l2hdr
!= NULL
&&
3037 (vd
= hdr
->b_l2hdr
->b_dev
->l2ad_vdev
) != NULL
) {
3038 devw
= hdr
->b_l2hdr
->b_dev
->l2ad_writing
;
3039 addr
= hdr
->b_l2hdr
->b_daddr
;
3041 * Lock out device removal.
3043 if (vdev_is_dead(vd
) ||
3044 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
3048 mutex_exit(hash_lock
);
3050 ASSERT3U(hdr
->b_size
, ==, size
);
3051 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
3052 uint64_t, size
, zbookmark_t
*, zb
);
3053 ARCSTAT_BUMP(arcstat_misses
);
3054 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
3055 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
3056 data
, metadata
, misses
);
3058 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
3060 * Read from the L2ARC if the following are true:
3061 * 1. The L2ARC vdev was previously cached.
3062 * 2. This buffer still has L2ARC metadata.
3063 * 3. This buffer isn't currently writing to the L2ARC.
3064 * 4. The L2ARC entry wasn't evicted, which may
3065 * also have invalidated the vdev.
3066 * 5. This isn't prefetch and l2arc_noprefetch is set.
3068 if (hdr
->b_l2hdr
!= NULL
&&
3069 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
3070 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
3071 l2arc_read_callback_t
*cb
;
3073 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
3074 ARCSTAT_BUMP(arcstat_l2_hits
);
3076 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
3078 cb
->l2rcb_buf
= buf
;
3079 cb
->l2rcb_spa
= spa
;
3082 cb
->l2rcb_flags
= zio_flags
;
3085 * l2arc read. The SCL_L2ARC lock will be
3086 * released by l2arc_read_done().
3088 rzio
= zio_read_phys(pio
, vd
, addr
, size
,
3089 buf
->b_data
, ZIO_CHECKSUM_OFF
,
3090 l2arc_read_done
, cb
, priority
, zio_flags
|
3091 ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_CANFAIL
|
3092 ZIO_FLAG_DONT_PROPAGATE
|
3093 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
3094 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
3096 ARCSTAT_INCR(arcstat_l2_read_bytes
, size
);
3098 if (*arc_flags
& ARC_NOWAIT
) {
3103 ASSERT(*arc_flags
& ARC_WAIT
);
3104 if (zio_wait(rzio
) == 0)
3107 /* l2arc read error; goto zio_read() */
3109 DTRACE_PROBE1(l2arc__miss
,
3110 arc_buf_hdr_t
*, hdr
);
3111 ARCSTAT_BUMP(arcstat_l2_misses
);
3112 if (HDR_L2_WRITING(hdr
))
3113 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
3114 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3118 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3119 if (l2arc_ndev
!= 0) {
3120 DTRACE_PROBE1(l2arc__miss
,
3121 arc_buf_hdr_t
*, hdr
);
3122 ARCSTAT_BUMP(arcstat_l2_misses
);
3126 rzio
= zio_read(pio
, spa
, bp
, buf
->b_data
, size
,
3127 arc_read_done
, buf
, priority
, zio_flags
, zb
);
3129 if (*arc_flags
& ARC_WAIT
)
3130 return (zio_wait(rzio
));
3132 ASSERT(*arc_flags
& ARC_NOWAIT
);
3139 arc_add_prune_callback(arc_prune_func_t
*func
, void *private)
3143 p
= kmem_alloc(sizeof(*p
), KM_SLEEP
);
3145 p
->p_private
= private;
3146 list_link_init(&p
->p_node
);
3147 refcount_create(&p
->p_refcnt
);
3149 mutex_enter(&arc_prune_mtx
);
3150 refcount_add(&p
->p_refcnt
, &arc_prune_list
);
3151 list_insert_head(&arc_prune_list
, p
);
3152 mutex_exit(&arc_prune_mtx
);
3158 arc_remove_prune_callback(arc_prune_t
*p
)
3160 mutex_enter(&arc_prune_mtx
);
3161 list_remove(&arc_prune_list
, p
);
3162 if (refcount_remove(&p
->p_refcnt
, &arc_prune_list
) == 0) {
3163 refcount_destroy(&p
->p_refcnt
);
3164 kmem_free(p
, sizeof (*p
));
3166 mutex_exit(&arc_prune_mtx
);
3170 arc_set_callback(arc_buf_t
*buf
, arc_evict_func_t
*func
, void *private)
3172 ASSERT(buf
->b_hdr
!= NULL
);
3173 ASSERT(buf
->b_hdr
->b_state
!= arc_anon
);
3174 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
) || func
== NULL
);
3175 ASSERT(buf
->b_efunc
== NULL
);
3176 ASSERT(!HDR_BUF_AVAILABLE(buf
->b_hdr
));
3178 buf
->b_efunc
= func
;
3179 buf
->b_private
= private;
3183 * This is used by the DMU to let the ARC know that a buffer is
3184 * being evicted, so the ARC should clean up. If this arc buf
3185 * is not yet in the evicted state, it will be put there.
3188 arc_buf_evict(arc_buf_t
*buf
)
3191 kmutex_t
*hash_lock
;
3194 mutex_enter(&buf
->b_evict_lock
);
3198 * We are in arc_do_user_evicts().
3200 ASSERT(buf
->b_data
== NULL
);
3201 mutex_exit(&buf
->b_evict_lock
);
3203 } else if (buf
->b_data
== NULL
) {
3204 arc_buf_t copy
= *buf
; /* structure assignment */
3206 * We are on the eviction list; process this buffer now
3207 * but let arc_do_user_evicts() do the reaping.
3209 buf
->b_efunc
= NULL
;
3210 mutex_exit(&buf
->b_evict_lock
);
3211 VERIFY(copy
.b_efunc(©
) == 0);
3214 hash_lock
= HDR_LOCK(hdr
);
3215 mutex_enter(hash_lock
);
3217 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3219 ASSERT3U(refcount_count(&hdr
->b_refcnt
), <, hdr
->b_datacnt
);
3220 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
3223 * Pull this buffer off of the hdr
3226 while (*bufp
!= buf
)
3227 bufp
= &(*bufp
)->b_next
;
3228 *bufp
= buf
->b_next
;
3230 ASSERT(buf
->b_data
!= NULL
);
3231 arc_buf_destroy(buf
, FALSE
, FALSE
);
3233 if (hdr
->b_datacnt
== 0) {
3234 arc_state_t
*old_state
= hdr
->b_state
;
3235 arc_state_t
*evicted_state
;
3237 ASSERT(hdr
->b_buf
== NULL
);
3238 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
3241 (old_state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
3243 mutex_enter(&old_state
->arcs_mtx
);
3244 mutex_enter(&evicted_state
->arcs_mtx
);
3246 arc_change_state(evicted_state
, hdr
, hash_lock
);
3247 ASSERT(HDR_IN_HASH_TABLE(hdr
));
3248 hdr
->b_flags
|= ARC_IN_HASH_TABLE
;
3249 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
3251 mutex_exit(&evicted_state
->arcs_mtx
);
3252 mutex_exit(&old_state
->arcs_mtx
);
3254 mutex_exit(hash_lock
);
3255 mutex_exit(&buf
->b_evict_lock
);
3257 VERIFY(buf
->b_efunc(buf
) == 0);
3258 buf
->b_efunc
= NULL
;
3259 buf
->b_private
= NULL
;
3262 kmem_cache_free(buf_cache
, buf
);
3267 * Release this buffer from the cache. This must be done
3268 * after a read and prior to modifying the buffer contents.
3269 * If the buffer has more than one reference, we must make
3270 * a new hdr for the buffer.
3273 arc_release(arc_buf_t
*buf
, void *tag
)
3276 kmutex_t
*hash_lock
= NULL
;
3277 l2arc_buf_hdr_t
*l2hdr
;
3278 uint64_t buf_size
= 0;
3281 * It would be nice to assert that if it's DMU metadata (level >
3282 * 0 || it's the dnode file), then it must be syncing context.
3283 * But we don't know that information at this level.
3286 mutex_enter(&buf
->b_evict_lock
);
3289 /* this buffer is not on any list */
3290 ASSERT(refcount_count(&hdr
->b_refcnt
) > 0);
3292 if (hdr
->b_state
== arc_anon
) {
3293 /* this buffer is already released */
3294 ASSERT(buf
->b_efunc
== NULL
);
3296 hash_lock
= HDR_LOCK(hdr
);
3297 mutex_enter(hash_lock
);
3299 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3302 l2hdr
= hdr
->b_l2hdr
;
3304 mutex_enter(&l2arc_buflist_mtx
);
3305 hdr
->b_l2hdr
= NULL
;
3306 buf_size
= hdr
->b_size
;
3310 * Do we have more than one buf?
3312 if (hdr
->b_datacnt
> 1) {
3313 arc_buf_hdr_t
*nhdr
;
3315 uint64_t blksz
= hdr
->b_size
;
3316 uint64_t spa
= hdr
->b_spa
;
3317 arc_buf_contents_t type
= hdr
->b_type
;
3318 uint32_t flags
= hdr
->b_flags
;
3320 ASSERT(hdr
->b_buf
!= buf
|| buf
->b_next
!= NULL
);
3322 * Pull the data off of this hdr and attach it to
3323 * a new anonymous hdr.
3325 (void) remove_reference(hdr
, hash_lock
, tag
);
3327 while (*bufp
!= buf
)
3328 bufp
= &(*bufp
)->b_next
;
3329 *bufp
= buf
->b_next
;
3332 ASSERT3U(hdr
->b_state
->arcs_size
, >=, hdr
->b_size
);
3333 atomic_add_64(&hdr
->b_state
->arcs_size
, -hdr
->b_size
);
3334 if (refcount_is_zero(&hdr
->b_refcnt
)) {
3335 uint64_t *size
= &hdr
->b_state
->arcs_lsize
[hdr
->b_type
];
3336 ASSERT3U(*size
, >=, hdr
->b_size
);
3337 atomic_add_64(size
, -hdr
->b_size
);
3339 hdr
->b_datacnt
-= 1;
3340 arc_cksum_verify(buf
);
3342 mutex_exit(hash_lock
);
3344 nhdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
3345 nhdr
->b_size
= blksz
;
3347 nhdr
->b_type
= type
;
3349 nhdr
->b_state
= arc_anon
;
3350 nhdr
->b_arc_access
= 0;
3351 nhdr
->b_flags
= flags
& ARC_L2_WRITING
;
3352 nhdr
->b_l2hdr
= NULL
;
3353 nhdr
->b_datacnt
= 1;
3354 nhdr
->b_freeze_cksum
= NULL
;
3355 (void) refcount_add(&nhdr
->b_refcnt
, tag
);
3357 mutex_exit(&buf
->b_evict_lock
);
3358 atomic_add_64(&arc_anon
->arcs_size
, blksz
);
3360 mutex_exit(&buf
->b_evict_lock
);
3361 ASSERT(refcount_count(&hdr
->b_refcnt
) == 1);
3362 ASSERT(!list_link_active(&hdr
->b_arc_node
));
3363 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3364 if (hdr
->b_state
!= arc_anon
)
3365 arc_change_state(arc_anon
, hdr
, hash_lock
);
3366 hdr
->b_arc_access
= 0;
3368 mutex_exit(hash_lock
);
3370 buf_discard_identity(hdr
);
3373 buf
->b_efunc
= NULL
;
3374 buf
->b_private
= NULL
;
3377 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
3378 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
3379 ARCSTAT_INCR(arcstat_l2_size
, -buf_size
);
3380 mutex_exit(&l2arc_buflist_mtx
);
3385 * Release this buffer. If it does not match the provided BP, fill it
3386 * with that block's contents.
3390 arc_release_bp(arc_buf_t
*buf
, void *tag
, blkptr_t
*bp
, spa_t
*spa
,
3393 arc_release(buf
, tag
);
3398 arc_released(arc_buf_t
*buf
)
3402 mutex_enter(&buf
->b_evict_lock
);
3403 released
= (buf
->b_data
!= NULL
&& buf
->b_hdr
->b_state
== arc_anon
);
3404 mutex_exit(&buf
->b_evict_lock
);
3409 arc_has_callback(arc_buf_t
*buf
)
3413 mutex_enter(&buf
->b_evict_lock
);
3414 callback
= (buf
->b_efunc
!= NULL
);
3415 mutex_exit(&buf
->b_evict_lock
);
3421 arc_referenced(arc_buf_t
*buf
)
3425 mutex_enter(&buf
->b_evict_lock
);
3426 referenced
= (refcount_count(&buf
->b_hdr
->b_refcnt
));
3427 mutex_exit(&buf
->b_evict_lock
);
3428 return (referenced
);
3433 arc_write_ready(zio_t
*zio
)
3435 arc_write_callback_t
*callback
= zio
->io_private
;
3436 arc_buf_t
*buf
= callback
->awcb_buf
;
3437 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3439 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
3440 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
3443 * If the IO is already in progress, then this is a re-write
3444 * attempt, so we need to thaw and re-compute the cksum.
3445 * It is the responsibility of the callback to handle the
3446 * accounting for any re-write attempt.
3448 if (HDR_IO_IN_PROGRESS(hdr
)) {
3449 mutex_enter(&hdr
->b_freeze_lock
);
3450 if (hdr
->b_freeze_cksum
!= NULL
) {
3451 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
3452 hdr
->b_freeze_cksum
= NULL
;
3454 mutex_exit(&hdr
->b_freeze_lock
);
3456 arc_cksum_compute(buf
, B_FALSE
);
3457 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3461 arc_write_done(zio_t
*zio
)
3463 arc_write_callback_t
*callback
= zio
->io_private
;
3464 arc_buf_t
*buf
= callback
->awcb_buf
;
3465 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3467 ASSERT(hdr
->b_acb
== NULL
);
3469 if (zio
->io_error
== 0) {
3470 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
3471 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
3472 hdr
->b_cksum0
= zio
->io_bp
->blk_cksum
.zc_word
[0];
3474 ASSERT(BUF_EMPTY(hdr
));
3478 * If the block to be written was all-zero, we may have
3479 * compressed it away. In this case no write was performed
3480 * so there will be no dva/birth/checksum. The buffer must
3481 * therefore remain anonymous (and uncached).
3483 if (!BUF_EMPTY(hdr
)) {
3484 arc_buf_hdr_t
*exists
;
3485 kmutex_t
*hash_lock
;
3487 ASSERT(zio
->io_error
== 0);
3489 arc_cksum_verify(buf
);
3491 exists
= buf_hash_insert(hdr
, &hash_lock
);
3494 * This can only happen if we overwrite for
3495 * sync-to-convergence, because we remove
3496 * buffers from the hash table when we arc_free().
3498 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
3499 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
3500 panic("bad overwrite, hdr=%p exists=%p",
3501 (void *)hdr
, (void *)exists
);
3502 ASSERT(refcount_is_zero(&exists
->b_refcnt
));
3503 arc_change_state(arc_anon
, exists
, hash_lock
);
3504 mutex_exit(hash_lock
);
3505 arc_hdr_destroy(exists
);
3506 exists
= buf_hash_insert(hdr
, &hash_lock
);
3507 ASSERT3P(exists
, ==, NULL
);
3510 ASSERT(hdr
->b_datacnt
== 1);
3511 ASSERT(hdr
->b_state
== arc_anon
);
3512 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
3513 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
3516 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3517 /* if it's not anon, we are doing a scrub */
3518 if (!exists
&& hdr
->b_state
== arc_anon
)
3519 arc_access(hdr
, hash_lock
);
3520 mutex_exit(hash_lock
);
3522 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3525 ASSERT(!refcount_is_zero(&hdr
->b_refcnt
));
3526 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
3528 kmem_free(callback
, sizeof (arc_write_callback_t
));
3532 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
,
3533 blkptr_t
*bp
, arc_buf_t
*buf
, boolean_t l2arc
, const zio_prop_t
*zp
,
3534 arc_done_func_t
*ready
, arc_done_func_t
*done
, void *private,
3535 int priority
, int zio_flags
, const zbookmark_t
*zb
)
3537 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3538 arc_write_callback_t
*callback
;
3541 ASSERT(ready
!= NULL
);
3542 ASSERT(done
!= NULL
);
3543 ASSERT(!HDR_IO_ERROR(hdr
));
3544 ASSERT((hdr
->b_flags
& ARC_IO_IN_PROGRESS
) == 0);
3545 ASSERT(hdr
->b_acb
== NULL
);
3547 hdr
->b_flags
|= ARC_L2CACHE
;
3548 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
3549 callback
->awcb_ready
= ready
;
3550 callback
->awcb_done
= done
;
3551 callback
->awcb_private
= private;
3552 callback
->awcb_buf
= buf
;
3554 zio
= zio_write(pio
, spa
, txg
, bp
, buf
->b_data
, hdr
->b_size
, zp
,
3555 arc_write_ready
, arc_write_done
, callback
, priority
, zio_flags
, zb
);
3561 arc_memory_throttle(uint64_t reserve
, uint64_t inflight_data
, uint64_t txg
)
3564 uint64_t available_memory
;
3566 /* Easily reclaimable memory (free + inactive + arc-evictable) */
3567 available_memory
= ptob(spl_kmem_availrmem()) + arc_evictable_memory();
3570 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
3573 if (available_memory
<= zfs_write_limit_max
) {
3574 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3575 DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim
);
3579 if (inflight_data
> available_memory
/ 4) {
3580 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3581 DMU_TX_STAT_BUMP(dmu_tx_memory_inflight
);
3589 arc_tempreserve_clear(uint64_t reserve
)
3591 atomic_add_64(&arc_tempreserve
, -reserve
);
3592 ASSERT((int64_t)arc_tempreserve
>= 0);
3596 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
3603 * Once in a while, fail for no reason. Everything should cope.
3605 if (spa_get_random(10000) == 0) {
3606 dprintf("forcing random failure\n");
3610 if (reserve
> arc_c
/4 && !arc_no_grow
)
3611 arc_c
= MIN(arc_c_max
, reserve
* 4);
3612 if (reserve
> arc_c
) {
3613 DMU_TX_STAT_BUMP(dmu_tx_memory_reserve
);
3618 * Don't count loaned bufs as in flight dirty data to prevent long
3619 * network delays from blocking transactions that are ready to be
3620 * assigned to a txg.
3622 anon_size
= MAX((int64_t)(arc_anon
->arcs_size
- arc_loaned_bytes
), 0);
3625 * Writes will, almost always, require additional memory allocations
3626 * in order to compress/encrypt/etc the data. We therefor need to
3627 * make sure that there is sufficient available memory for this.
3629 if ((error
= arc_memory_throttle(reserve
, anon_size
, txg
)))
3633 * Throttle writes when the amount of dirty data in the cache
3634 * gets too large. We try to keep the cache less than half full
3635 * of dirty blocks so that our sync times don't grow too large.
3636 * Note: if two requests come in concurrently, we might let them
3637 * both succeed, when one of them should fail. Not a huge deal.
3640 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
3641 anon_size
> arc_c
/ 4) {
3642 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3643 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3644 arc_tempreserve
>>10,
3645 arc_anon
->arcs_lsize
[ARC_BUFC_METADATA
]>>10,
3646 arc_anon
->arcs_lsize
[ARC_BUFC_DATA
]>>10,
3647 reserve
>>10, arc_c
>>10);
3648 DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle
);
3651 atomic_add_64(&arc_tempreserve
, reserve
);
3656 arc_kstat_update_state(arc_state_t
*state
, kstat_named_t
*size
,
3657 kstat_named_t
*evict_data
, kstat_named_t
*evict_metadata
)
3659 size
->value
.ui64
= state
->arcs_size
;
3660 evict_data
->value
.ui64
= state
->arcs_lsize
[ARC_BUFC_DATA
];
3661 evict_metadata
->value
.ui64
= state
->arcs_lsize
[ARC_BUFC_METADATA
];
3665 arc_kstat_update(kstat_t
*ksp
, int rw
)
3667 arc_stats_t
*as
= ksp
->ks_data
;
3669 if (rw
== KSTAT_WRITE
) {
3672 arc_kstat_update_state(arc_anon
,
3673 &as
->arcstat_anon_size
,
3674 &as
->arcstat_anon_evict_data
,
3675 &as
->arcstat_anon_evict_metadata
);
3676 arc_kstat_update_state(arc_mru
,
3677 &as
->arcstat_mru_size
,
3678 &as
->arcstat_mru_evict_data
,
3679 &as
->arcstat_mru_evict_metadata
);
3680 arc_kstat_update_state(arc_mru_ghost
,
3681 &as
->arcstat_mru_ghost_size
,
3682 &as
->arcstat_mru_ghost_evict_data
,
3683 &as
->arcstat_mru_ghost_evict_metadata
);
3684 arc_kstat_update_state(arc_mfu
,
3685 &as
->arcstat_mfu_size
,
3686 &as
->arcstat_mfu_evict_data
,
3687 &as
->arcstat_mfu_evict_metadata
);
3688 arc_kstat_update_state(arc_mfu_ghost
,
3689 &as
->arcstat_mfu_ghost_size
,
3690 &as
->arcstat_mfu_ghost_evict_data
,
3691 &as
->arcstat_mfu_ghost_evict_metadata
);
3700 mutex_init(&arc_reclaim_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3701 cv_init(&arc_reclaim_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
3703 /* Convert seconds to clock ticks */
3704 arc_min_prefetch_lifespan
= 1 * hz
;
3706 /* Start out with 1/8 of all memory */
3707 arc_c
= physmem
* PAGESIZE
/ 8;
3711 * On architectures where the physical memory can be larger
3712 * than the addressable space (intel in 32-bit mode), we may
3713 * need to limit the cache to 1/8 of VM size.
3715 arc_c
= MIN(arc_c
, vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 8);
3717 * Register a shrinker to support synchronous (direct) memory
3718 * reclaim from the arc. This is done to prevent kswapd from
3719 * swapping out pages when it is preferable to shrink the arc.
3721 spl_register_shrinker(&arc_shrinker
);
3724 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3725 arc_c_min
= MAX(arc_c
/ 4, 64<<20);
3726 /* set max to 1/2 of all memory */
3727 arc_c_max
= MAX(arc_c
* 4, arc_c_max
);
3730 * Allow the tunables to override our calculations if they are
3731 * reasonable (ie. over 64MB)
3733 if (zfs_arc_max
> 64<<20 && zfs_arc_max
< physmem
* PAGESIZE
)
3734 arc_c_max
= zfs_arc_max
;
3735 if (zfs_arc_min
> 64<<20 && zfs_arc_min
<= arc_c_max
)
3736 arc_c_min
= zfs_arc_min
;
3739 arc_p
= (arc_c
>> 1);
3741 /* limit meta-data to 1/4 of the arc capacity */
3742 arc_meta_limit
= arc_c_max
/ 4;
3745 /* Allow the tunable to override if it is reasonable */
3746 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
3747 arc_meta_limit
= zfs_arc_meta_limit
;
3749 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
3750 arc_c_min
= arc_meta_limit
/ 2;
3752 if (zfs_arc_grow_retry
> 0)
3753 arc_grow_retry
= zfs_arc_grow_retry
;
3755 if (zfs_arc_shrink_shift
> 0)
3756 arc_shrink_shift
= zfs_arc_shrink_shift
;
3758 if (zfs_arc_p_min_shift
> 0)
3759 arc_p_min_shift
= zfs_arc_p_min_shift
;
3761 if (zfs_arc_meta_prune
> 0)
3762 arc_meta_prune
= zfs_arc_meta_prune
;
3764 /* if kmem_flags are set, lets try to use less memory */
3765 if (kmem_debugging())
3767 if (arc_c
< arc_c_min
)
3770 arc_anon
= &ARC_anon
;
3772 arc_mru_ghost
= &ARC_mru_ghost
;
3774 arc_mfu_ghost
= &ARC_mfu_ghost
;
3775 arc_l2c_only
= &ARC_l2c_only
;
3778 mutex_init(&arc_anon
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3779 mutex_init(&arc_mru
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3780 mutex_init(&arc_mru_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3781 mutex_init(&arc_mfu
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3782 mutex_init(&arc_mfu_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3783 mutex_init(&arc_l2c_only
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3785 list_create(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
3786 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3787 list_create(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
3788 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3789 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3790 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3791 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
3792 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3793 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
3794 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3795 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
3796 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3797 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3798 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3799 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
3800 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3801 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
3802 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3803 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
3804 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3808 arc_thread_exit
= 0;
3809 list_create(&arc_prune_list
, sizeof (arc_prune_t
),
3810 offsetof(arc_prune_t
, p_node
));
3811 arc_eviction_list
= NULL
;
3812 mutex_init(&arc_prune_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3813 mutex_init(&arc_eviction_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3814 bzero(&arc_eviction_hdr
, sizeof (arc_buf_hdr_t
));
3816 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
3817 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
3819 if (arc_ksp
!= NULL
) {
3820 arc_ksp
->ks_data
= &arc_stats
;
3821 arc_ksp
->ks_update
= arc_kstat_update
;
3822 kstat_install(arc_ksp
);
3825 (void) thread_create(NULL
, 0, arc_adapt_thread
, NULL
, 0, &p0
,
3826 TS_RUN
, minclsyspri
);
3831 if (zfs_write_limit_max
== 0)
3832 zfs_write_limit_max
= ptob(physmem
) >> zfs_write_limit_shift
;
3834 zfs_write_limit_shift
= 0;
3835 mutex_init(&zfs_write_limit_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3843 mutex_enter(&arc_reclaim_thr_lock
);
3845 spl_unregister_shrinker(&arc_shrinker
);
3846 #endif /* _KERNEL */
3848 arc_thread_exit
= 1;
3849 while (arc_thread_exit
!= 0)
3850 cv_wait(&arc_reclaim_thr_cv
, &arc_reclaim_thr_lock
);
3851 mutex_exit(&arc_reclaim_thr_lock
);
3857 if (arc_ksp
!= NULL
) {
3858 kstat_delete(arc_ksp
);
3862 mutex_enter(&arc_prune_mtx
);
3863 while ((p
= list_head(&arc_prune_list
)) != NULL
) {
3864 list_remove(&arc_prune_list
, p
);
3865 refcount_remove(&p
->p_refcnt
, &arc_prune_list
);
3866 refcount_destroy(&p
->p_refcnt
);
3867 kmem_free(p
, sizeof (*p
));
3869 mutex_exit(&arc_prune_mtx
);
3871 list_destroy(&arc_prune_list
);
3872 mutex_destroy(&arc_prune_mtx
);
3873 mutex_destroy(&arc_eviction_mtx
);
3874 mutex_destroy(&arc_reclaim_thr_lock
);
3875 cv_destroy(&arc_reclaim_thr_cv
);
3877 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
3878 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3879 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
3880 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3881 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
3882 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3883 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
3884 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3886 mutex_destroy(&arc_anon
->arcs_mtx
);
3887 mutex_destroy(&arc_mru
->arcs_mtx
);
3888 mutex_destroy(&arc_mru_ghost
->arcs_mtx
);
3889 mutex_destroy(&arc_mfu
->arcs_mtx
);
3890 mutex_destroy(&arc_mfu_ghost
->arcs_mtx
);
3891 mutex_destroy(&arc_l2c_only
->arcs_mtx
);
3893 mutex_destroy(&zfs_write_limit_lock
);
3897 ASSERT(arc_loaned_bytes
== 0);
3903 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3904 * It uses dedicated storage devices to hold cached data, which are populated
3905 * using large infrequent writes. The main role of this cache is to boost
3906 * the performance of random read workloads. The intended L2ARC devices
3907 * include short-stroked disks, solid state disks, and other media with
3908 * substantially faster read latency than disk.
3910 * +-----------------------+
3912 * +-----------------------+
3915 * l2arc_feed_thread() arc_read()
3919 * +---------------+ |
3921 * +---------------+ |
3926 * +-------+ +-------+
3928 * | cache | | cache |
3929 * +-------+ +-------+
3930 * +=========+ .-----.
3931 * : L2ARC : |-_____-|
3932 * : devices : | Disks |
3933 * +=========+ `-_____-'
3935 * Read requests are satisfied from the following sources, in order:
3938 * 2) vdev cache of L2ARC devices
3940 * 4) vdev cache of disks
3943 * Some L2ARC device types exhibit extremely slow write performance.
3944 * To accommodate for this there are some significant differences between
3945 * the L2ARC and traditional cache design:
3947 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3948 * the ARC behave as usual, freeing buffers and placing headers on ghost
3949 * lists. The ARC does not send buffers to the L2ARC during eviction as
3950 * this would add inflated write latencies for all ARC memory pressure.
3952 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3953 * It does this by periodically scanning buffers from the eviction-end of
3954 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3955 * not already there. It scans until a headroom of buffers is satisfied,
3956 * which itself is a buffer for ARC eviction. The thread that does this is
3957 * l2arc_feed_thread(), illustrated below; example sizes are included to
3958 * provide a better sense of ratio than this diagram:
3961 * +---------------------+----------+
3962 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3963 * +---------------------+----------+ | o L2ARC eligible
3964 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3965 * +---------------------+----------+ |
3966 * 15.9 Gbytes ^ 32 Mbytes |
3968 * l2arc_feed_thread()
3970 * l2arc write hand <--[oooo]--'
3974 * +==============================+
3975 * L2ARC dev |####|#|###|###| |####| ... |
3976 * +==============================+
3979 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3980 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3981 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3982 * safe to say that this is an uncommon case, since buffers at the end of
3983 * the ARC lists have moved there due to inactivity.
3985 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3986 * then the L2ARC simply misses copying some buffers. This serves as a
3987 * pressure valve to prevent heavy read workloads from both stalling the ARC
3988 * with waits and clogging the L2ARC with writes. This also helps prevent
3989 * the potential for the L2ARC to churn if it attempts to cache content too
3990 * quickly, such as during backups of the entire pool.
3992 * 5. After system boot and before the ARC has filled main memory, there are
3993 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3994 * lists can remain mostly static. Instead of searching from tail of these
3995 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3996 * for eligible buffers, greatly increasing its chance of finding them.
3998 * The L2ARC device write speed is also boosted during this time so that
3999 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4000 * there are no L2ARC reads, and no fear of degrading read performance
4001 * through increased writes.
4003 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4004 * the vdev queue can aggregate them into larger and fewer writes. Each
4005 * device is written to in a rotor fashion, sweeping writes through
4006 * available space then repeating.
4008 * 7. The L2ARC does not store dirty content. It never needs to flush
4009 * write buffers back to disk based storage.
4011 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4012 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4014 * The performance of the L2ARC can be tweaked by a number of tunables, which
4015 * may be necessary for different workloads:
4017 * l2arc_write_max max write bytes per interval
4018 * l2arc_write_boost extra write bytes during device warmup
4019 * l2arc_noprefetch skip caching prefetched buffers
4020 * l2arc_headroom number of max device writes to precache
4021 * l2arc_feed_secs seconds between L2ARC writing
4023 * Tunables may be removed or added as future performance improvements are
4024 * integrated, and also may become zpool properties.
4026 * There are three key functions that control how the L2ARC warms up:
4028 * l2arc_write_eligible() check if a buffer is eligible to cache
4029 * l2arc_write_size() calculate how much to write
4030 * l2arc_write_interval() calculate sleep delay between writes
4032 * These three functions determine what to write, how much, and how quickly
4037 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
)
4040 * A buffer is *not* eligible for the L2ARC if it:
4041 * 1. belongs to a different spa.
4042 * 2. is already cached on the L2ARC.
4043 * 3. has an I/O in progress (it may be an incomplete read).
4044 * 4. is flagged not eligible (zfs property).
4046 if (ab
->b_spa
!= spa_guid
|| ab
->b_l2hdr
!= NULL
||
4047 HDR_IO_IN_PROGRESS(ab
) || !HDR_L2CACHE(ab
))
4054 l2arc_write_size(l2arc_dev_t
*dev
)
4058 size
= dev
->l2ad_write
;
4060 if (arc_warm
== B_FALSE
)
4061 size
+= dev
->l2ad_boost
;
4068 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
4070 clock_t interval
, next
, now
;
4073 * If the ARC lists are busy, increase our write rate; if the
4074 * lists are stale, idle back. This is achieved by checking
4075 * how much we previously wrote - if it was more than half of
4076 * what we wanted, schedule the next write much sooner.
4078 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
4079 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
4081 interval
= hz
* l2arc_feed_secs
;
4083 now
= ddi_get_lbolt();
4084 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
4090 l2arc_hdr_stat_add(void)
4092 ARCSTAT_INCR(arcstat_l2_hdr_size
, HDR_SIZE
+ L2HDR_SIZE
);
4093 ARCSTAT_INCR(arcstat_hdr_size
, -HDR_SIZE
);
4097 l2arc_hdr_stat_remove(void)
4099 ARCSTAT_INCR(arcstat_l2_hdr_size
, -(HDR_SIZE
+ L2HDR_SIZE
));
4100 ARCSTAT_INCR(arcstat_hdr_size
, HDR_SIZE
);
4104 * Cycle through L2ARC devices. This is how L2ARC load balances.
4105 * If a device is returned, this also returns holding the spa config lock.
4107 static l2arc_dev_t
*
4108 l2arc_dev_get_next(void)
4110 l2arc_dev_t
*first
, *next
= NULL
;
4113 * Lock out the removal of spas (spa_namespace_lock), then removal
4114 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4115 * both locks will be dropped and a spa config lock held instead.
4117 mutex_enter(&spa_namespace_lock
);
4118 mutex_enter(&l2arc_dev_mtx
);
4120 /* if there are no vdevs, there is nothing to do */
4121 if (l2arc_ndev
== 0)
4125 next
= l2arc_dev_last
;
4127 /* loop around the list looking for a non-faulted vdev */
4129 next
= list_head(l2arc_dev_list
);
4131 next
= list_next(l2arc_dev_list
, next
);
4133 next
= list_head(l2arc_dev_list
);
4136 /* if we have come back to the start, bail out */
4139 else if (next
== first
)
4142 } while (vdev_is_dead(next
->l2ad_vdev
));
4144 /* if we were unable to find any usable vdevs, return NULL */
4145 if (vdev_is_dead(next
->l2ad_vdev
))
4148 l2arc_dev_last
= next
;
4151 mutex_exit(&l2arc_dev_mtx
);
4154 * Grab the config lock to prevent the 'next' device from being
4155 * removed while we are writing to it.
4158 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
4159 mutex_exit(&spa_namespace_lock
);
4165 * Free buffers that were tagged for destruction.
4168 l2arc_do_free_on_write(void)
4171 l2arc_data_free_t
*df
, *df_prev
;
4173 mutex_enter(&l2arc_free_on_write_mtx
);
4174 buflist
= l2arc_free_on_write
;
4176 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
4177 df_prev
= list_prev(buflist
, df
);
4178 ASSERT(df
->l2df_data
!= NULL
);
4179 ASSERT(df
->l2df_func
!= NULL
);
4180 df
->l2df_func(df
->l2df_data
, df
->l2df_size
);
4181 list_remove(buflist
, df
);
4182 kmem_free(df
, sizeof (l2arc_data_free_t
));
4185 mutex_exit(&l2arc_free_on_write_mtx
);
4189 * A write to a cache device has completed. Update all headers to allow
4190 * reads from these buffers to begin.
4193 l2arc_write_done(zio_t
*zio
)
4195 l2arc_write_callback_t
*cb
;
4198 arc_buf_hdr_t
*head
, *ab
, *ab_prev
;
4199 l2arc_buf_hdr_t
*abl2
;
4200 kmutex_t
*hash_lock
;
4202 cb
= zio
->io_private
;
4204 dev
= cb
->l2wcb_dev
;
4205 ASSERT(dev
!= NULL
);
4206 head
= cb
->l2wcb_head
;
4207 ASSERT(head
!= NULL
);
4208 buflist
= dev
->l2ad_buflist
;
4209 ASSERT(buflist
!= NULL
);
4210 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
4211 l2arc_write_callback_t
*, cb
);
4213 if (zio
->io_error
!= 0)
4214 ARCSTAT_BUMP(arcstat_l2_writes_error
);
4216 mutex_enter(&l2arc_buflist_mtx
);
4219 * All writes completed, or an error was hit.
4221 for (ab
= list_prev(buflist
, head
); ab
; ab
= ab_prev
) {
4222 ab_prev
= list_prev(buflist
, ab
);
4224 hash_lock
= HDR_LOCK(ab
);
4225 if (!mutex_tryenter(hash_lock
)) {
4227 * This buffer misses out. It may be in a stage
4228 * of eviction. Its ARC_L2_WRITING flag will be
4229 * left set, denying reads to this buffer.
4231 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss
);
4235 if (zio
->io_error
!= 0) {
4237 * Error - drop L2ARC entry.
4239 list_remove(buflist
, ab
);
4242 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4243 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4247 * Allow ARC to begin reads to this L2ARC entry.
4249 ab
->b_flags
&= ~ARC_L2_WRITING
;
4251 mutex_exit(hash_lock
);
4254 atomic_inc_64(&l2arc_writes_done
);
4255 list_remove(buflist
, head
);
4256 kmem_cache_free(hdr_cache
, head
);
4257 mutex_exit(&l2arc_buflist_mtx
);
4259 l2arc_do_free_on_write();
4261 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
4265 * A read to a cache device completed. Validate buffer contents before
4266 * handing over to the regular ARC routines.
4269 l2arc_read_done(zio_t
*zio
)
4271 l2arc_read_callback_t
*cb
;
4274 kmutex_t
*hash_lock
;
4277 ASSERT(zio
->io_vd
!= NULL
);
4278 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
4280 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
4282 cb
= zio
->io_private
;
4284 buf
= cb
->l2rcb_buf
;
4285 ASSERT(buf
!= NULL
);
4287 hash_lock
= HDR_LOCK(buf
->b_hdr
);
4288 mutex_enter(hash_lock
);
4290 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
4293 * Check this survived the L2ARC journey.
4295 equal
= arc_cksum_equal(buf
);
4296 if (equal
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
4297 mutex_exit(hash_lock
);
4298 zio
->io_private
= buf
;
4299 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
4300 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
4303 mutex_exit(hash_lock
);
4305 * Buffer didn't survive caching. Increment stats and
4306 * reissue to the original storage device.
4308 if (zio
->io_error
!= 0) {
4309 ARCSTAT_BUMP(arcstat_l2_io_error
);
4311 zio
->io_error
= EIO
;
4314 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
4317 * If there's no waiter, issue an async i/o to the primary
4318 * storage now. If there *is* a waiter, the caller must
4319 * issue the i/o in a context where it's OK to block.
4321 if (zio
->io_waiter
== NULL
) {
4322 zio_t
*pio
= zio_unique_parent(zio
);
4324 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
4326 zio_nowait(zio_read(pio
, cb
->l2rcb_spa
, &cb
->l2rcb_bp
,
4327 buf
->b_data
, zio
->io_size
, arc_read_done
, buf
,
4328 zio
->io_priority
, cb
->l2rcb_flags
, &cb
->l2rcb_zb
));
4332 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
4336 * This is the list priority from which the L2ARC will search for pages to
4337 * cache. This is used within loops (0..3) to cycle through lists in the
4338 * desired order. This order can have a significant effect on cache
4341 * Currently the metadata lists are hit first, MFU then MRU, followed by
4342 * the data lists. This function returns a locked list, and also returns
4346 l2arc_list_locked(int list_num
, kmutex_t
**lock
)
4348 list_t
*list
= NULL
;
4350 ASSERT(list_num
>= 0 && list_num
<= 3);
4354 list
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
4355 *lock
= &arc_mfu
->arcs_mtx
;
4358 list
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
4359 *lock
= &arc_mru
->arcs_mtx
;
4362 list
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
4363 *lock
= &arc_mfu
->arcs_mtx
;
4366 list
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
4367 *lock
= &arc_mru
->arcs_mtx
;
4371 ASSERT(!(MUTEX_HELD(*lock
)));
4377 * Evict buffers from the device write hand to the distance specified in
4378 * bytes. This distance may span populated buffers, it may span nothing.
4379 * This is clearing a region on the L2ARC device ready for writing.
4380 * If the 'all' boolean is set, every buffer is evicted.
4383 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
4386 l2arc_buf_hdr_t
*abl2
;
4387 arc_buf_hdr_t
*ab
, *ab_prev
;
4388 kmutex_t
*hash_lock
;
4391 buflist
= dev
->l2ad_buflist
;
4393 if (buflist
== NULL
)
4396 if (!all
&& dev
->l2ad_first
) {
4398 * This is the first sweep through the device. There is
4404 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
4406 * When nearing the end of the device, evict to the end
4407 * before the device write hand jumps to the start.
4409 taddr
= dev
->l2ad_end
;
4411 taddr
= dev
->l2ad_hand
+ distance
;
4413 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
4414 uint64_t, taddr
, boolean_t
, all
);
4417 mutex_enter(&l2arc_buflist_mtx
);
4418 for (ab
= list_tail(buflist
); ab
; ab
= ab_prev
) {
4419 ab_prev
= list_prev(buflist
, ab
);
4421 hash_lock
= HDR_LOCK(ab
);
4422 if (!mutex_tryenter(hash_lock
)) {
4424 * Missed the hash lock. Retry.
4426 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
4427 mutex_exit(&l2arc_buflist_mtx
);
4428 mutex_enter(hash_lock
);
4429 mutex_exit(hash_lock
);
4433 if (HDR_L2_WRITE_HEAD(ab
)) {
4435 * We hit a write head node. Leave it for
4436 * l2arc_write_done().
4438 list_remove(buflist
, ab
);
4439 mutex_exit(hash_lock
);
4443 if (!all
&& ab
->b_l2hdr
!= NULL
&&
4444 (ab
->b_l2hdr
->b_daddr
> taddr
||
4445 ab
->b_l2hdr
->b_daddr
< dev
->l2ad_hand
)) {
4447 * We've evicted to the target address,
4448 * or the end of the device.
4450 mutex_exit(hash_lock
);
4454 if (HDR_FREE_IN_PROGRESS(ab
)) {
4456 * Already on the path to destruction.
4458 mutex_exit(hash_lock
);
4462 if (ab
->b_state
== arc_l2c_only
) {
4463 ASSERT(!HDR_L2_READING(ab
));
4465 * This doesn't exist in the ARC. Destroy.
4466 * arc_hdr_destroy() will call list_remove()
4467 * and decrement arcstat_l2_size.
4469 arc_change_state(arc_anon
, ab
, hash_lock
);
4470 arc_hdr_destroy(ab
);
4473 * Invalidate issued or about to be issued
4474 * reads, since we may be about to write
4475 * over this location.
4477 if (HDR_L2_READING(ab
)) {
4478 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
4479 ab
->b_flags
|= ARC_L2_EVICTED
;
4483 * Tell ARC this no longer exists in L2ARC.
4485 if (ab
->b_l2hdr
!= NULL
) {
4488 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4489 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4491 list_remove(buflist
, ab
);
4494 * This may have been leftover after a
4497 ab
->b_flags
&= ~ARC_L2_WRITING
;
4499 mutex_exit(hash_lock
);
4501 mutex_exit(&l2arc_buflist_mtx
);
4503 vdev_space_update(dev
->l2ad_vdev
, -(taddr
- dev
->l2ad_evict
), 0, 0);
4504 dev
->l2ad_evict
= taddr
;
4508 * Find and write ARC buffers to the L2ARC device.
4510 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4511 * for reading until they have completed writing.
4514 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
4516 arc_buf_hdr_t
*ab
, *ab_prev
, *head
;
4517 l2arc_buf_hdr_t
*hdrl2
;
4519 uint64_t passed_sz
, write_sz
, buf_sz
, headroom
;
4521 kmutex_t
*hash_lock
, *list_lock
= NULL
;
4522 boolean_t have_lock
, full
;
4523 l2arc_write_callback_t
*cb
;
4525 uint64_t guid
= spa_guid(spa
);
4528 ASSERT(dev
->l2ad_vdev
!= NULL
);
4533 head
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
4534 head
->b_flags
|= ARC_L2_WRITE_HEAD
;
4537 * Copy buffers for L2ARC writing.
4539 mutex_enter(&l2arc_buflist_mtx
);
4540 for (try = 0; try <= 3; try++) {
4541 list
= l2arc_list_locked(try, &list_lock
);
4545 * L2ARC fast warmup.
4547 * Until the ARC is warm and starts to evict, read from the
4548 * head of the ARC lists rather than the tail.
4550 headroom
= target_sz
* l2arc_headroom
;
4551 if (arc_warm
== B_FALSE
)
4552 ab
= list_head(list
);
4554 ab
= list_tail(list
);
4556 for (; ab
; ab
= ab_prev
) {
4557 if (arc_warm
== B_FALSE
)
4558 ab_prev
= list_next(list
, ab
);
4560 ab_prev
= list_prev(list
, ab
);
4562 hash_lock
= HDR_LOCK(ab
);
4563 have_lock
= MUTEX_HELD(hash_lock
);
4564 if (!have_lock
&& !mutex_tryenter(hash_lock
)) {
4566 * Skip this buffer rather than waiting.
4571 passed_sz
+= ab
->b_size
;
4572 if (passed_sz
> headroom
) {
4576 mutex_exit(hash_lock
);
4580 if (!l2arc_write_eligible(guid
, ab
)) {
4581 mutex_exit(hash_lock
);
4585 if ((write_sz
+ ab
->b_size
) > target_sz
) {
4587 mutex_exit(hash_lock
);
4593 * Insert a dummy header on the buflist so
4594 * l2arc_write_done() can find where the
4595 * write buffers begin without searching.
4597 list_insert_head(dev
->l2ad_buflist
, head
);
4599 cb
= kmem_alloc(sizeof (l2arc_write_callback_t
),
4601 cb
->l2wcb_dev
= dev
;
4602 cb
->l2wcb_head
= head
;
4603 pio
= zio_root(spa
, l2arc_write_done
, cb
,
4608 * Create and add a new L2ARC header.
4610 hdrl2
= kmem_zalloc(sizeof (l2arc_buf_hdr_t
),
4613 hdrl2
->b_daddr
= dev
->l2ad_hand
;
4615 ab
->b_flags
|= ARC_L2_WRITING
;
4616 ab
->b_l2hdr
= hdrl2
;
4617 list_insert_head(dev
->l2ad_buflist
, ab
);
4618 buf_data
= ab
->b_buf
->b_data
;
4619 buf_sz
= ab
->b_size
;
4622 * Compute and store the buffer cksum before
4623 * writing. On debug the cksum is verified first.
4625 arc_cksum_verify(ab
->b_buf
);
4626 arc_cksum_compute(ab
->b_buf
, B_TRUE
);
4628 mutex_exit(hash_lock
);
4630 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
4631 dev
->l2ad_hand
, buf_sz
, buf_data
, ZIO_CHECKSUM_OFF
,
4632 NULL
, NULL
, ZIO_PRIORITY_ASYNC_WRITE
,
4633 ZIO_FLAG_CANFAIL
, B_FALSE
);
4635 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
4637 (void) zio_nowait(wzio
);
4640 * Keep the clock hand suitably device-aligned.
4642 buf_sz
= vdev_psize_to_asize(dev
->l2ad_vdev
, buf_sz
);
4645 dev
->l2ad_hand
+= buf_sz
;
4648 mutex_exit(list_lock
);
4653 mutex_exit(&l2arc_buflist_mtx
);
4656 ASSERT3U(write_sz
, ==, 0);
4657 kmem_cache_free(hdr_cache
, head
);
4661 ASSERT3U(write_sz
, <=, target_sz
);
4662 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
4663 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_sz
);
4664 ARCSTAT_INCR(arcstat_l2_size
, write_sz
);
4665 vdev_space_update(dev
->l2ad_vdev
, write_sz
, 0, 0);
4668 * Bump device hand to the device start if it is approaching the end.
4669 * l2arc_evict() will already have evicted ahead for this case.
4671 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
4672 vdev_space_update(dev
->l2ad_vdev
,
4673 dev
->l2ad_end
- dev
->l2ad_hand
, 0, 0);
4674 dev
->l2ad_hand
= dev
->l2ad_start
;
4675 dev
->l2ad_evict
= dev
->l2ad_start
;
4676 dev
->l2ad_first
= B_FALSE
;
4679 dev
->l2ad_writing
= B_TRUE
;
4680 (void) zio_wait(pio
);
4681 dev
->l2ad_writing
= B_FALSE
;
4687 * This thread feeds the L2ARC at regular intervals. This is the beating
4688 * heart of the L2ARC.
4691 l2arc_feed_thread(void)
4696 uint64_t size
, wrote
;
4697 clock_t begin
, next
= ddi_get_lbolt();
4699 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
4701 mutex_enter(&l2arc_feed_thr_lock
);
4703 while (l2arc_thread_exit
== 0) {
4704 CALLB_CPR_SAFE_BEGIN(&cpr
);
4705 (void) cv_timedwait_interruptible(&l2arc_feed_thr_cv
,
4706 &l2arc_feed_thr_lock
, next
);
4707 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
4708 next
= ddi_get_lbolt() + hz
;
4711 * Quick check for L2ARC devices.
4713 mutex_enter(&l2arc_dev_mtx
);
4714 if (l2arc_ndev
== 0) {
4715 mutex_exit(&l2arc_dev_mtx
);
4718 mutex_exit(&l2arc_dev_mtx
);
4719 begin
= ddi_get_lbolt();
4722 * This selects the next l2arc device to write to, and in
4723 * doing so the next spa to feed from: dev->l2ad_spa. This
4724 * will return NULL if there are now no l2arc devices or if
4725 * they are all faulted.
4727 * If a device is returned, its spa's config lock is also
4728 * held to prevent device removal. l2arc_dev_get_next()
4729 * will grab and release l2arc_dev_mtx.
4731 if ((dev
= l2arc_dev_get_next()) == NULL
)
4734 spa
= dev
->l2ad_spa
;
4735 ASSERT(spa
!= NULL
);
4738 * If the pool is read-only then force the feed thread to
4739 * sleep a little longer.
4741 if (!spa_writeable(spa
)) {
4742 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
4743 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4748 * Avoid contributing to memory pressure.
4751 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
4752 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4756 ARCSTAT_BUMP(arcstat_l2_feeds
);
4758 size
= l2arc_write_size(dev
);
4761 * Evict L2ARC buffers that will be overwritten.
4763 l2arc_evict(dev
, size
, B_FALSE
);
4766 * Write ARC buffers.
4768 wrote
= l2arc_write_buffers(spa
, dev
, size
);
4771 * Calculate interval between writes.
4773 next
= l2arc_write_interval(begin
, size
, wrote
);
4774 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4777 l2arc_thread_exit
= 0;
4778 cv_broadcast(&l2arc_feed_thr_cv
);
4779 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
4784 l2arc_vdev_present(vdev_t
*vd
)
4788 mutex_enter(&l2arc_dev_mtx
);
4789 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
4790 dev
= list_next(l2arc_dev_list
, dev
)) {
4791 if (dev
->l2ad_vdev
== vd
)
4794 mutex_exit(&l2arc_dev_mtx
);
4796 return (dev
!= NULL
);
4800 * Add a vdev for use by the L2ARC. By this point the spa has already
4801 * validated the vdev and opened it.
4804 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
4806 l2arc_dev_t
*adddev
;
4808 ASSERT(!l2arc_vdev_present(vd
));
4811 * Create a new l2arc device entry.
4813 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
4814 adddev
->l2ad_spa
= spa
;
4815 adddev
->l2ad_vdev
= vd
;
4816 adddev
->l2ad_write
= l2arc_write_max
;
4817 adddev
->l2ad_boost
= l2arc_write_boost
;
4818 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
4819 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
4820 adddev
->l2ad_hand
= adddev
->l2ad_start
;
4821 adddev
->l2ad_evict
= adddev
->l2ad_start
;
4822 adddev
->l2ad_first
= B_TRUE
;
4823 adddev
->l2ad_writing
= B_FALSE
;
4824 list_link_init(&adddev
->l2ad_node
);
4825 ASSERT3U(adddev
->l2ad_write
, >, 0);
4828 * This is a list of all ARC buffers that are still valid on the
4831 adddev
->l2ad_buflist
= kmem_zalloc(sizeof (list_t
), KM_SLEEP
);
4832 list_create(adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
4833 offsetof(arc_buf_hdr_t
, b_l2node
));
4835 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
4838 * Add device to global list
4840 mutex_enter(&l2arc_dev_mtx
);
4841 list_insert_head(l2arc_dev_list
, adddev
);
4842 atomic_inc_64(&l2arc_ndev
);
4843 mutex_exit(&l2arc_dev_mtx
);
4847 * Remove a vdev from the L2ARC.
4850 l2arc_remove_vdev(vdev_t
*vd
)
4852 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
4855 * Find the device by vdev
4857 mutex_enter(&l2arc_dev_mtx
);
4858 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
4859 nextdev
= list_next(l2arc_dev_list
, dev
);
4860 if (vd
== dev
->l2ad_vdev
) {
4865 ASSERT(remdev
!= NULL
);
4868 * Remove device from global list
4870 list_remove(l2arc_dev_list
, remdev
);
4871 l2arc_dev_last
= NULL
; /* may have been invalidated */
4872 atomic_dec_64(&l2arc_ndev
);
4873 mutex_exit(&l2arc_dev_mtx
);
4876 * Clear all buflists and ARC references. L2ARC device flush.
4878 l2arc_evict(remdev
, 0, B_TRUE
);
4879 list_destroy(remdev
->l2ad_buflist
);
4880 kmem_free(remdev
->l2ad_buflist
, sizeof (list_t
));
4881 kmem_free(remdev
, sizeof (l2arc_dev_t
));
4887 l2arc_thread_exit
= 0;
4889 l2arc_writes_sent
= 0;
4890 l2arc_writes_done
= 0;
4892 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
4893 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
4894 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4895 mutex_init(&l2arc_buflist_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4896 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4898 l2arc_dev_list
= &L2ARC_dev_list
;
4899 l2arc_free_on_write
= &L2ARC_free_on_write
;
4900 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
4901 offsetof(l2arc_dev_t
, l2ad_node
));
4902 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
4903 offsetof(l2arc_data_free_t
, l2df_list_node
));
4910 * This is called from dmu_fini(), which is called from spa_fini();
4911 * Because of this, we can assume that all l2arc devices have
4912 * already been removed when the pools themselves were removed.
4915 l2arc_do_free_on_write();
4917 mutex_destroy(&l2arc_feed_thr_lock
);
4918 cv_destroy(&l2arc_feed_thr_cv
);
4919 mutex_destroy(&l2arc_dev_mtx
);
4920 mutex_destroy(&l2arc_buflist_mtx
);
4921 mutex_destroy(&l2arc_free_on_write_mtx
);
4923 list_destroy(l2arc_dev_list
);
4924 list_destroy(l2arc_free_on_write
);
4930 if (!(spa_mode_global
& FWRITE
))
4933 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
4934 TS_RUN
, minclsyspri
);
4940 if (!(spa_mode_global
& FWRITE
))
4943 mutex_enter(&l2arc_feed_thr_lock
);
4944 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
4945 l2arc_thread_exit
= 1;
4946 while (l2arc_thread_exit
!= 0)
4947 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
4948 mutex_exit(&l2arc_feed_thr_lock
);
4951 #if defined(_KERNEL) && defined(HAVE_SPL)
4952 EXPORT_SYMBOL(arc_read
);
4953 EXPORT_SYMBOL(arc_buf_remove_ref
);
4954 EXPORT_SYMBOL(arc_getbuf_func
);
4955 EXPORT_SYMBOL(arc_add_prune_callback
);
4956 EXPORT_SYMBOL(arc_remove_prune_callback
);
4958 module_param(zfs_arc_min
, ulong
, 0444);
4959 MODULE_PARM_DESC(zfs_arc_min
, "Min arc size");
4961 module_param(zfs_arc_max
, ulong
, 0444);
4962 MODULE_PARM_DESC(zfs_arc_max
, "Max arc size");
4964 module_param(zfs_arc_meta_limit
, ulong
, 0444);
4965 MODULE_PARM_DESC(zfs_arc_meta_limit
, "Meta limit for arc size");
4967 module_param(zfs_arc_meta_prune
, int, 0444);
4968 MODULE_PARM_DESC(zfs_arc_meta_prune
, "Bytes of meta data to prune");
4970 module_param(zfs_arc_grow_retry
, int, 0444);
4971 MODULE_PARM_DESC(zfs_arc_grow_retry
, "Seconds before growing arc size");
4973 module_param(zfs_arc_shrink_shift
, int, 0444);
4974 MODULE_PARM_DESC(zfs_arc_shrink_shift
, "log2(fraction of arc to reclaim)");
4976 module_param(zfs_arc_p_min_shift
, int, 0444);
4977 MODULE_PARM_DESC(zfs_arc_p_min_shift
, "arc_c shift to calc min/max arc_p");
4979 module_param(l2arc_write_max
, ulong
, 0444);
4980 MODULE_PARM_DESC(l2arc_write_max
, "Max write bytes per interval");
4982 module_param(l2arc_write_boost
, ulong
, 0444);
4983 MODULE_PARM_DESC(l2arc_write_boost
, "Extra write bytes during device warmup");
4985 module_param(l2arc_headroom
, ulong
, 0444);
4986 MODULE_PARM_DESC(l2arc_headroom
, "Number of max device writes to precache");
4988 module_param(l2arc_feed_secs
, ulong
, 0444);
4989 MODULE_PARM_DESC(l2arc_feed_secs
, "Seconds between L2ARC writing");
4991 module_param(l2arc_feed_min_ms
, ulong
, 0444);
4992 MODULE_PARM_DESC(l2arc_feed_min_ms
, "Min feed interval in milliseconds");
4994 module_param(l2arc_noprefetch
, int, 0444);
4995 MODULE_PARM_DESC(l2arc_noprefetch
, "Skip caching prefetched buffers");
4997 module_param(l2arc_feed_again
, int, 0444);
4998 MODULE_PARM_DESC(l2arc_feed_again
, "Turbo L2ARC warmup");
5000 module_param(l2arc_norw
, int, 0444);
5001 MODULE_PARM_DESC(l2arc_norw
, "No reads during writes");