4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 * DVA-based Adjustable Replacement Cache
28 * While much of the theory of operation used here is
29 * based on the self-tuning, low overhead replacement cache
30 * presented by Megiddo and Modha at FAST 2003, there are some
31 * significant differences:
33 * 1. The Megiddo and Modha model assumes any page is evictable.
34 * Pages in its cache cannot be "locked" into memory. This makes
35 * the eviction algorithm simple: evict the last page in the list.
36 * This also make the performance characteristics easy to reason
37 * about. Our cache is not so simple. At any given moment, some
38 * subset of the blocks in the cache are un-evictable because we
39 * have handed out a reference to them. Blocks are only evictable
40 * when there are no external references active. This makes
41 * eviction far more problematic: we choose to evict the evictable
42 * blocks that are the "lowest" in the list.
44 * There are times when it is not possible to evict the requested
45 * space. In these circumstances we are unable to adjust the cache
46 * size. To prevent the cache growing unbounded at these times we
47 * implement a "cache throttle" that slows the flow of new data
48 * into the cache until we can make space available.
50 * 2. The Megiddo and Modha model assumes a fixed cache size.
51 * Pages are evicted when the cache is full and there is a cache
52 * miss. Our model has a variable sized cache. It grows with
53 * high use, but also tries to react to memory pressure from the
54 * operating system: decreasing its size when system memory is
57 * 3. The Megiddo and Modha model assumes a fixed page size. All
58 * elements of the cache are therefor exactly the same size. So
59 * when adjusting the cache size following a cache miss, its simply
60 * a matter of choosing a single page to evict. In our model, we
61 * have variable sized cache blocks (rangeing from 512 bytes to
62 * 128K bytes). We therefor choose a set of blocks to evict to make
63 * space for a cache miss that approximates as closely as possible
64 * the space used by the new block.
66 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
67 * by N. Megiddo & D. Modha, FAST 2003
73 * A new reference to a cache buffer can be obtained in two
74 * ways: 1) via a hash table lookup using the DVA as a key,
75 * or 2) via one of the ARC lists. The arc_read() interface
76 * uses method 1, while the internal arc algorithms for
77 * adjusting the cache use method 2. We therefor provide two
78 * types of locks: 1) the hash table lock array, and 2) the
81 * Buffers do not have their own mutexs, rather they rely on the
82 * hash table mutexs for the bulk of their protection (i.e. most
83 * fields in the arc_buf_hdr_t are protected by these mutexs).
85 * buf_hash_find() returns the appropriate mutex (held) when it
86 * locates the requested buffer in the hash table. It returns
87 * NULL for the mutex if the buffer was not in the table.
89 * buf_hash_remove() expects the appropriate hash mutex to be
90 * already held before it is invoked.
92 * Each arc state also has a mutex which is used to protect the
93 * buffer list associated with the state. When attempting to
94 * obtain a hash table lock while holding an arc list lock you
95 * must use: mutex_tryenter() to avoid deadlock. Also note that
96 * the active state mutex must be held before the ghost state mutex.
98 * Arc buffers may have an associated eviction callback function.
99 * This function will be invoked prior to removing the buffer (e.g.
100 * in arc_do_user_evicts()). Note however that the data associated
101 * with the buffer may be evicted prior to the callback. The callback
102 * must be made with *no locks held* (to prevent deadlock). Additionally,
103 * the users of callbacks must ensure that their private data is
104 * protected from simultaneous callbacks from arc_buf_evict()
105 * and arc_do_user_evicts().
107 * Note that the majority of the performance stats are manipulated
108 * with atomic operations.
110 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
112 * - L2ARC buflist creation
113 * - L2ARC buflist eviction
114 * - L2ARC write completion, which walks L2ARC buflists
115 * - ARC header destruction, as it removes from L2ARC buflists
116 * - ARC header release, as it removes from L2ARC buflists
121 #include <sys/zfs_context.h>
123 #include <sys/refcount.h>
124 #include <sys/vdev.h>
125 #include <sys/vdev_impl.h>
127 #include <sys/vmsystm.h>
129 #include <sys/fs/swapnode.h>
130 #include <sys/dnlc.h>
132 #include <sys/callb.h>
133 #include <sys/kstat.h>
134 #include <zfs_fletcher.h>
136 static kmutex_t arc_reclaim_thr_lock
;
137 static kcondvar_t arc_reclaim_thr_cv
; /* used to signal reclaim thr */
138 static uint8_t arc_thread_exit
;
140 extern int zfs_write_limit_shift
;
141 extern uint64_t zfs_write_limit_max
;
142 extern kmutex_t zfs_write_limit_lock
;
144 #define ARC_REDUCE_DNLC_PERCENT 3
145 uint_t arc_reduce_dnlc_percent
= ARC_REDUCE_DNLC_PERCENT
;
147 typedef enum arc_reclaim_strategy
{
148 ARC_RECLAIM_AGGR
, /* Aggressive reclaim strategy */
149 ARC_RECLAIM_CONS
/* Conservative reclaim strategy */
150 } arc_reclaim_strategy_t
;
152 /* number of seconds before growing cache again */
153 static int arc_grow_retry
= 60;
155 /* shift of arc_c for calculating both min and max arc_p */
156 static int arc_p_min_shift
= 4;
158 /* log2(fraction of arc to reclaim) */
159 static int arc_shrink_shift
= 5;
162 * minimum lifespan of a prefetch block in clock ticks
163 * (initialized in arc_init())
165 static int arc_min_prefetch_lifespan
;
170 * The arc has filled available memory and has now warmed up.
172 static boolean_t arc_warm
;
175 * These tunables are for performance analysis.
177 unsigned long zfs_arc_max
= 0;
178 unsigned long zfs_arc_min
= 0;
179 unsigned long zfs_arc_meta_limit
= 0;
180 int zfs_arc_grow_retry
= 0;
181 int zfs_arc_shrink_shift
= 0;
182 int zfs_arc_p_min_shift
= 0;
183 int zfs_arc_reduce_dnlc_percent
= 0;
186 * Note that buffers can be in one of 6 states:
187 * ARC_anon - anonymous (discussed below)
188 * ARC_mru - recently used, currently cached
189 * ARC_mru_ghost - recentely used, no longer in cache
190 * ARC_mfu - frequently used, currently cached
191 * ARC_mfu_ghost - frequently used, no longer in cache
192 * ARC_l2c_only - exists in L2ARC but not other states
193 * When there are no active references to the buffer, they are
194 * are linked onto a list in one of these arc states. These are
195 * the only buffers that can be evicted or deleted. Within each
196 * state there are multiple lists, one for meta-data and one for
197 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
198 * etc.) is tracked separately so that it can be managed more
199 * explicitly: favored over data, limited explicitly.
201 * Anonymous buffers are buffers that are not associated with
202 * a DVA. These are buffers that hold dirty block copies
203 * before they are written to stable storage. By definition,
204 * they are "ref'd" and are considered part of arc_mru
205 * that cannot be freed. Generally, they will aquire a DVA
206 * as they are written and migrate onto the arc_mru list.
208 * The ARC_l2c_only state is for buffers that are in the second
209 * level ARC but no longer in any of the ARC_m* lists. The second
210 * level ARC itself may also contain buffers that are in any of
211 * the ARC_m* states - meaning that a buffer can exist in two
212 * places. The reason for the ARC_l2c_only state is to keep the
213 * buffer header in the hash table, so that reads that hit the
214 * second level ARC benefit from these fast lookups.
217 typedef struct arc_state
{
218 list_t arcs_list
[ARC_BUFC_NUMTYPES
]; /* list of evictable buffers */
219 uint64_t arcs_lsize
[ARC_BUFC_NUMTYPES
]; /* amount of evictable data */
220 uint64_t arcs_size
; /* total amount of data in this state */
225 static arc_state_t ARC_anon
;
226 static arc_state_t ARC_mru
;
227 static arc_state_t ARC_mru_ghost
;
228 static arc_state_t ARC_mfu
;
229 static arc_state_t ARC_mfu_ghost
;
230 static arc_state_t ARC_l2c_only
;
232 typedef struct arc_stats
{
233 kstat_named_t arcstat_hits
;
234 kstat_named_t arcstat_misses
;
235 kstat_named_t arcstat_demand_data_hits
;
236 kstat_named_t arcstat_demand_data_misses
;
237 kstat_named_t arcstat_demand_metadata_hits
;
238 kstat_named_t arcstat_demand_metadata_misses
;
239 kstat_named_t arcstat_prefetch_data_hits
;
240 kstat_named_t arcstat_prefetch_data_misses
;
241 kstat_named_t arcstat_prefetch_metadata_hits
;
242 kstat_named_t arcstat_prefetch_metadata_misses
;
243 kstat_named_t arcstat_mru_hits
;
244 kstat_named_t arcstat_mru_ghost_hits
;
245 kstat_named_t arcstat_mfu_hits
;
246 kstat_named_t arcstat_mfu_ghost_hits
;
247 kstat_named_t arcstat_deleted
;
248 kstat_named_t arcstat_recycle_miss
;
249 kstat_named_t arcstat_mutex_miss
;
250 kstat_named_t arcstat_evict_skip
;
251 kstat_named_t arcstat_evict_l2_cached
;
252 kstat_named_t arcstat_evict_l2_eligible
;
253 kstat_named_t arcstat_evict_l2_ineligible
;
254 kstat_named_t arcstat_hash_elements
;
255 kstat_named_t arcstat_hash_elements_max
;
256 kstat_named_t arcstat_hash_collisions
;
257 kstat_named_t arcstat_hash_chains
;
258 kstat_named_t arcstat_hash_chain_max
;
259 kstat_named_t arcstat_p
;
260 kstat_named_t arcstat_c
;
261 kstat_named_t arcstat_c_min
;
262 kstat_named_t arcstat_c_max
;
263 kstat_named_t arcstat_size
;
264 kstat_named_t arcstat_hdr_size
;
265 kstat_named_t arcstat_data_size
;
266 kstat_named_t arcstat_other_size
;
267 kstat_named_t arcstat_l2_hits
;
268 kstat_named_t arcstat_l2_misses
;
269 kstat_named_t arcstat_l2_feeds
;
270 kstat_named_t arcstat_l2_rw_clash
;
271 kstat_named_t arcstat_l2_read_bytes
;
272 kstat_named_t arcstat_l2_write_bytes
;
273 kstat_named_t arcstat_l2_writes_sent
;
274 kstat_named_t arcstat_l2_writes_done
;
275 kstat_named_t arcstat_l2_writes_error
;
276 kstat_named_t arcstat_l2_writes_hdr_miss
;
277 kstat_named_t arcstat_l2_evict_lock_retry
;
278 kstat_named_t arcstat_l2_evict_reading
;
279 kstat_named_t arcstat_l2_free_on_write
;
280 kstat_named_t arcstat_l2_abort_lowmem
;
281 kstat_named_t arcstat_l2_cksum_bad
;
282 kstat_named_t arcstat_l2_io_error
;
283 kstat_named_t arcstat_l2_size
;
284 kstat_named_t arcstat_l2_hdr_size
;
285 kstat_named_t arcstat_memory_throttle_count
;
286 kstat_named_t arcstat_memory_direct_count
;
287 kstat_named_t arcstat_memory_indirect_count
;
288 kstat_named_t arcstat_no_grow
;
289 kstat_named_t arcstat_tempreserve
;
290 kstat_named_t arcstat_loaned_bytes
;
291 kstat_named_t arcstat_meta_used
;
292 kstat_named_t arcstat_meta_limit
;
293 kstat_named_t arcstat_meta_max
;
296 static arc_stats_t arc_stats
= {
297 { "hits", KSTAT_DATA_UINT64
},
298 { "misses", KSTAT_DATA_UINT64
},
299 { "demand_data_hits", KSTAT_DATA_UINT64
},
300 { "demand_data_misses", KSTAT_DATA_UINT64
},
301 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
302 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
303 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
304 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
305 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
306 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
307 { "mru_hits", KSTAT_DATA_UINT64
},
308 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
309 { "mfu_hits", KSTAT_DATA_UINT64
},
310 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
311 { "deleted", KSTAT_DATA_UINT64
},
312 { "recycle_miss", KSTAT_DATA_UINT64
},
313 { "mutex_miss", KSTAT_DATA_UINT64
},
314 { "evict_skip", KSTAT_DATA_UINT64
},
315 { "evict_l2_cached", KSTAT_DATA_UINT64
},
316 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
317 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
318 { "hash_elements", KSTAT_DATA_UINT64
},
319 { "hash_elements_max", KSTAT_DATA_UINT64
},
320 { "hash_collisions", KSTAT_DATA_UINT64
},
321 { "hash_chains", KSTAT_DATA_UINT64
},
322 { "hash_chain_max", KSTAT_DATA_UINT64
},
323 { "p", KSTAT_DATA_UINT64
},
324 { "c", KSTAT_DATA_UINT64
},
325 { "c_min", KSTAT_DATA_UINT64
},
326 { "c_max", KSTAT_DATA_UINT64
},
327 { "size", KSTAT_DATA_UINT64
},
328 { "hdr_size", KSTAT_DATA_UINT64
},
329 { "data_size", KSTAT_DATA_UINT64
},
330 { "other_size", KSTAT_DATA_UINT64
},
331 { "l2_hits", KSTAT_DATA_UINT64
},
332 { "l2_misses", KSTAT_DATA_UINT64
},
333 { "l2_feeds", KSTAT_DATA_UINT64
},
334 { "l2_rw_clash", KSTAT_DATA_UINT64
},
335 { "l2_read_bytes", KSTAT_DATA_UINT64
},
336 { "l2_write_bytes", KSTAT_DATA_UINT64
},
337 { "l2_writes_sent", KSTAT_DATA_UINT64
},
338 { "l2_writes_done", KSTAT_DATA_UINT64
},
339 { "l2_writes_error", KSTAT_DATA_UINT64
},
340 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64
},
341 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
342 { "l2_evict_reading", KSTAT_DATA_UINT64
},
343 { "l2_free_on_write", KSTAT_DATA_UINT64
},
344 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
345 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
346 { "l2_io_error", KSTAT_DATA_UINT64
},
347 { "l2_size", KSTAT_DATA_UINT64
},
348 { "l2_hdr_size", KSTAT_DATA_UINT64
},
349 { "memory_throttle_count", KSTAT_DATA_UINT64
},
350 { "memory_direct_count", KSTAT_DATA_UINT64
},
351 { "memory_indirect_count", KSTAT_DATA_UINT64
},
352 { "arc_no_grow", KSTAT_DATA_UINT64
},
353 { "arc_tempreserve", KSTAT_DATA_UINT64
},
354 { "arc_loaned_bytes", KSTAT_DATA_UINT64
},
355 { "arc_meta_used", KSTAT_DATA_UINT64
},
356 { "arc_meta_limit", KSTAT_DATA_UINT64
},
357 { "arc_meta_max", KSTAT_DATA_UINT64
},
360 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
362 #define ARCSTAT_INCR(stat, val) \
363 atomic_add_64(&arc_stats.stat.value.ui64, (val));
365 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
366 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
368 #define ARCSTAT_MAX(stat, val) { \
370 while ((val) > (m = arc_stats.stat.value.ui64) && \
371 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
375 #define ARCSTAT_MAXSTAT(stat) \
376 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
379 * We define a macro to allow ARC hits/misses to be easily broken down by
380 * two separate conditions, giving a total of four different subtypes for
381 * each of hits and misses (so eight statistics total).
383 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
386 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
388 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
392 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
394 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
399 static arc_state_t
*arc_anon
;
400 static arc_state_t
*arc_mru
;
401 static arc_state_t
*arc_mru_ghost
;
402 static arc_state_t
*arc_mfu
;
403 static arc_state_t
*arc_mfu_ghost
;
404 static arc_state_t
*arc_l2c_only
;
407 * There are several ARC variables that are critical to export as kstats --
408 * but we don't want to have to grovel around in the kstat whenever we wish to
409 * manipulate them. For these variables, we therefore define them to be in
410 * terms of the statistic variable. This assures that we are not introducing
411 * the possibility of inconsistency by having shadow copies of the variables,
412 * while still allowing the code to be readable.
414 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
415 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
416 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
417 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
418 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
419 #define arc_no_grow ARCSTAT(arcstat_no_grow)
420 #define arc_tempreserve ARCSTAT(arcstat_tempreserve)
421 #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
422 #define arc_meta_used ARCSTAT(arcstat_meta_used)
423 #define arc_meta_limit ARCSTAT(arcstat_meta_limit)
424 #define arc_meta_max ARCSTAT(arcstat_meta_max)
426 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t
;
428 typedef struct arc_callback arc_callback_t
;
430 struct arc_callback
{
432 arc_done_func_t
*acb_done
;
434 zio_t
*acb_zio_dummy
;
435 arc_callback_t
*acb_next
;
438 typedef struct arc_write_callback arc_write_callback_t
;
440 struct arc_write_callback
{
442 arc_done_func_t
*awcb_ready
;
443 arc_done_func_t
*awcb_done
;
448 /* protected by hash lock */
453 kmutex_t b_freeze_lock
;
454 zio_cksum_t
*b_freeze_cksum
;
457 arc_buf_hdr_t
*b_hash_next
;
462 arc_callback_t
*b_acb
;
466 arc_buf_contents_t b_type
;
470 /* protected by arc state mutex */
471 arc_state_t
*b_state
;
472 list_node_t b_arc_node
;
474 /* updated atomically */
475 clock_t b_arc_access
;
477 /* self protecting */
480 l2arc_buf_hdr_t
*b_l2hdr
;
481 list_node_t b_l2node
;
484 static arc_buf_t
*arc_eviction_list
;
485 static kmutex_t arc_eviction_mtx
;
486 static arc_buf_hdr_t arc_eviction_hdr
;
487 static void arc_get_data_buf(arc_buf_t
*buf
);
488 static void arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
);
489 static int arc_evict_needed(arc_buf_contents_t type
);
490 static void arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
);
492 static boolean_t
l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
);
494 #define GHOST_STATE(state) \
495 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
496 (state) == arc_l2c_only)
499 * Private ARC flags. These flags are private ARC only flags that will show up
500 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
501 * be passed in as arc_flags in things like arc_read. However, these flags
502 * should never be passed and should only be set by ARC code. When adding new
503 * public flags, make sure not to smash the private ones.
506 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
507 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
508 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
509 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
510 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
511 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
512 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
513 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
514 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
515 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
517 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
518 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
519 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
520 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
521 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
522 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
523 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
524 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
525 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
526 (hdr)->b_l2hdr != NULL)
527 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
528 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
529 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
535 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
536 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
539 * Hash table routines
542 #define HT_LOCK_ALIGN 64
543 #define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN)))
548 unsigned char pad
[HT_LOCK_PAD
];
552 #define BUF_LOCKS 256
553 typedef struct buf_hash_table
{
555 arc_buf_hdr_t
**ht_table
;
556 struct ht_lock ht_locks
[BUF_LOCKS
];
559 static buf_hash_table_t buf_hash_table
;
561 #define BUF_HASH_INDEX(spa, dva, birth) \
562 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
563 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
564 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
565 #define HDR_LOCK(hdr) \
566 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
568 uint64_t zfs_crc64_table
[256];
574 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
575 #define L2ARC_HEADROOM 2 /* num of writes */
576 #define L2ARC_FEED_SECS 1 /* caching interval secs */
577 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
579 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
580 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
583 * L2ARC Performance Tunables
585 unsigned long l2arc_write_max
= L2ARC_WRITE_SIZE
; /* def max write size */
586 unsigned long l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra warmup write */
587 unsigned long l2arc_headroom
= L2ARC_HEADROOM
; /* # of dev writes */
588 unsigned long l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
589 unsigned long l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval msecs */
590 int l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
591 int l2arc_feed_again
= B_TRUE
; /* turbo warmup */
592 int l2arc_norw
= B_TRUE
; /* no reads during writes */
597 typedef struct l2arc_dev
{
598 vdev_t
*l2ad_vdev
; /* vdev */
599 spa_t
*l2ad_spa
; /* spa */
600 uint64_t l2ad_hand
; /* next write location */
601 uint64_t l2ad_write
; /* desired write size, bytes */
602 uint64_t l2ad_boost
; /* warmup write boost, bytes */
603 uint64_t l2ad_start
; /* first addr on device */
604 uint64_t l2ad_end
; /* last addr on device */
605 uint64_t l2ad_evict
; /* last addr eviction reached */
606 boolean_t l2ad_first
; /* first sweep through */
607 boolean_t l2ad_writing
; /* currently writing */
608 list_t
*l2ad_buflist
; /* buffer list */
609 list_node_t l2ad_node
; /* device list node */
612 static list_t L2ARC_dev_list
; /* device list */
613 static list_t
*l2arc_dev_list
; /* device list pointer */
614 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
615 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
616 static kmutex_t l2arc_buflist_mtx
; /* mutex for all buflists */
617 static list_t L2ARC_free_on_write
; /* free after write buf list */
618 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
619 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
620 static uint64_t l2arc_ndev
; /* number of devices */
622 typedef struct l2arc_read_callback
{
623 arc_buf_t
*l2rcb_buf
; /* read buffer */
624 spa_t
*l2rcb_spa
; /* spa */
625 blkptr_t l2rcb_bp
; /* original blkptr */
626 zbookmark_t l2rcb_zb
; /* original bookmark */
627 int l2rcb_flags
; /* original flags */
628 } l2arc_read_callback_t
;
630 typedef struct l2arc_write_callback
{
631 l2arc_dev_t
*l2wcb_dev
; /* device info */
632 arc_buf_hdr_t
*l2wcb_head
; /* head of write buflist */
633 } l2arc_write_callback_t
;
635 struct l2arc_buf_hdr
{
636 /* protected by arc_buf_hdr mutex */
637 l2arc_dev_t
*b_dev
; /* L2ARC device */
638 uint64_t b_daddr
; /* disk address, offset byte */
641 typedef struct l2arc_data_free
{
642 /* protected by l2arc_free_on_write_mtx */
645 void (*l2df_func
)(void *, size_t);
646 list_node_t l2df_list_node
;
649 static kmutex_t l2arc_feed_thr_lock
;
650 static kcondvar_t l2arc_feed_thr_cv
;
651 static uint8_t l2arc_thread_exit
;
653 static void l2arc_read_done(zio_t
*zio
);
654 static void l2arc_hdr_stat_add(void);
655 static void l2arc_hdr_stat_remove(void);
658 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
660 uint8_t *vdva
= (uint8_t *)dva
;
661 uint64_t crc
= -1ULL;
664 ASSERT(zfs_crc64_table
[128] == ZFS_CRC64_POLY
);
666 for (i
= 0; i
< sizeof (dva_t
); i
++)
667 crc
= (crc
>> 8) ^ zfs_crc64_table
[(crc
^ vdva
[i
]) & 0xFF];
669 crc
^= (spa
>>8) ^ birth
;
674 #define BUF_EMPTY(buf) \
675 ((buf)->b_dva.dva_word[0] == 0 && \
676 (buf)->b_dva.dva_word[1] == 0 && \
679 #define BUF_EQUAL(spa, dva, birth, buf) \
680 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
681 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
682 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
685 buf_discard_identity(arc_buf_hdr_t
*hdr
)
687 hdr
->b_dva
.dva_word
[0] = 0;
688 hdr
->b_dva
.dva_word
[1] = 0;
693 static arc_buf_hdr_t
*
694 buf_hash_find(uint64_t spa
, const dva_t
*dva
, uint64_t birth
, kmutex_t
**lockp
)
696 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
697 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
700 mutex_enter(hash_lock
);
701 for (buf
= buf_hash_table
.ht_table
[idx
]; buf
!= NULL
;
702 buf
= buf
->b_hash_next
) {
703 if (BUF_EQUAL(spa
, dva
, birth
, buf
)) {
708 mutex_exit(hash_lock
);
714 * Insert an entry into the hash table. If there is already an element
715 * equal to elem in the hash table, then the already existing element
716 * will be returned and the new element will not be inserted.
717 * Otherwise returns NULL.
719 static arc_buf_hdr_t
*
720 buf_hash_insert(arc_buf_hdr_t
*buf
, kmutex_t
**lockp
)
722 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
723 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
727 ASSERT(!HDR_IN_HASH_TABLE(buf
));
729 mutex_enter(hash_lock
);
730 for (fbuf
= buf_hash_table
.ht_table
[idx
], i
= 0; fbuf
!= NULL
;
731 fbuf
= fbuf
->b_hash_next
, i
++) {
732 if (BUF_EQUAL(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
, fbuf
))
736 buf
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
737 buf_hash_table
.ht_table
[idx
] = buf
;
738 buf
->b_flags
|= ARC_IN_HASH_TABLE
;
740 /* collect some hash table performance data */
742 ARCSTAT_BUMP(arcstat_hash_collisions
);
744 ARCSTAT_BUMP(arcstat_hash_chains
);
746 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
749 ARCSTAT_BUMP(arcstat_hash_elements
);
750 ARCSTAT_MAXSTAT(arcstat_hash_elements
);
756 buf_hash_remove(arc_buf_hdr_t
*buf
)
758 arc_buf_hdr_t
*fbuf
, **bufp
;
759 uint64_t idx
= BUF_HASH_INDEX(buf
->b_spa
, &buf
->b_dva
, buf
->b_birth
);
761 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
762 ASSERT(HDR_IN_HASH_TABLE(buf
));
764 bufp
= &buf_hash_table
.ht_table
[idx
];
765 while ((fbuf
= *bufp
) != buf
) {
766 ASSERT(fbuf
!= NULL
);
767 bufp
= &fbuf
->b_hash_next
;
769 *bufp
= buf
->b_hash_next
;
770 buf
->b_hash_next
= NULL
;
771 buf
->b_flags
&= ~ARC_IN_HASH_TABLE
;
773 /* collect some hash table performance data */
774 ARCSTAT_BUMPDOWN(arcstat_hash_elements
);
776 if (buf_hash_table
.ht_table
[idx
] &&
777 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
778 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
782 * Global data structures and functions for the buf kmem cache.
784 static kmem_cache_t
*hdr_cache
;
785 static kmem_cache_t
*buf_cache
;
792 #if defined(_KERNEL) && defined(HAVE_SPL)
793 /* Large allocations which do not require contiguous pages
794 * should be using vmem_free() in the linux kernel */
795 vmem_free(buf_hash_table
.ht_table
,
796 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
798 kmem_free(buf_hash_table
.ht_table
,
799 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
801 for (i
= 0; i
< BUF_LOCKS
; i
++)
802 mutex_destroy(&buf_hash_table
.ht_locks
[i
].ht_lock
);
803 kmem_cache_destroy(hdr_cache
);
804 kmem_cache_destroy(buf_cache
);
808 * Constructor callback - called when the cache is empty
809 * and a new buf is requested.
813 hdr_cons(void *vbuf
, void *unused
, int kmflag
)
815 arc_buf_hdr_t
*buf
= vbuf
;
817 bzero(buf
, sizeof (arc_buf_hdr_t
));
818 refcount_create(&buf
->b_refcnt
);
819 cv_init(&buf
->b_cv
, NULL
, CV_DEFAULT
, NULL
);
820 mutex_init(&buf
->b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
821 list_link_init(&buf
->b_arc_node
);
822 list_link_init(&buf
->b_l2node
);
823 arc_space_consume(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
830 buf_cons(void *vbuf
, void *unused
, int kmflag
)
832 arc_buf_t
*buf
= vbuf
;
834 bzero(buf
, sizeof (arc_buf_t
));
835 mutex_init(&buf
->b_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
836 rw_init(&buf
->b_data_lock
, NULL
, RW_DEFAULT
, NULL
);
837 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
843 * Destructor callback - called when a cached buf is
844 * no longer required.
848 hdr_dest(void *vbuf
, void *unused
)
850 arc_buf_hdr_t
*buf
= vbuf
;
852 ASSERT(BUF_EMPTY(buf
));
853 refcount_destroy(&buf
->b_refcnt
);
854 cv_destroy(&buf
->b_cv
);
855 mutex_destroy(&buf
->b_freeze_lock
);
856 arc_space_return(sizeof (arc_buf_hdr_t
), ARC_SPACE_HDRS
);
861 buf_dest(void *vbuf
, void *unused
)
863 arc_buf_t
*buf
= vbuf
;
865 mutex_destroy(&buf
->b_evict_lock
);
866 rw_destroy(&buf
->b_data_lock
);
867 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
871 * Reclaim callback -- invoked when memory is low.
875 hdr_recl(void *unused
)
877 dprintf("hdr_recl called\n");
879 * umem calls the reclaim func when we destroy the buf cache,
880 * which is after we do arc_fini().
883 cv_signal(&arc_reclaim_thr_cv
);
890 uint64_t hsize
= 1ULL << 12;
894 * The hash table is big enough to fill all of physical memory
895 * with an average 64K block size. The table will take up
896 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
898 while (hsize
* 65536 < physmem
* PAGESIZE
)
901 buf_hash_table
.ht_mask
= hsize
- 1;
902 #if defined(_KERNEL) && defined(HAVE_SPL)
903 /* Large allocations which do not require contiguous pages
904 * should be using vmem_alloc() in the linux kernel */
905 buf_hash_table
.ht_table
=
906 vmem_zalloc(hsize
* sizeof (void*), KM_SLEEP
);
908 buf_hash_table
.ht_table
=
909 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
911 if (buf_hash_table
.ht_table
== NULL
) {
912 ASSERT(hsize
> (1ULL << 8));
917 hdr_cache
= kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t
),
918 0, hdr_cons
, hdr_dest
, hdr_recl
, NULL
, NULL
, 0);
919 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
920 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
922 for (i
= 0; i
< 256; i
++)
923 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
924 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
926 for (i
= 0; i
< BUF_LOCKS
; i
++) {
927 mutex_init(&buf_hash_table
.ht_locks
[i
].ht_lock
,
928 NULL
, MUTEX_DEFAULT
, NULL
);
932 #define ARC_MINTIME (hz>>4) /* 62 ms */
935 arc_cksum_verify(arc_buf_t
*buf
)
939 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
942 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
943 if (buf
->b_hdr
->b_freeze_cksum
== NULL
||
944 (buf
->b_hdr
->b_flags
& ARC_IO_ERROR
)) {
945 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
948 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
949 if (!ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
))
950 panic("buffer modified while frozen!");
951 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
955 arc_cksum_equal(arc_buf_t
*buf
)
960 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
961 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
, &zc
);
962 equal
= ZIO_CHECKSUM_EQUAL(*buf
->b_hdr
->b_freeze_cksum
, zc
);
963 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
969 arc_cksum_compute(arc_buf_t
*buf
, boolean_t force
)
971 if (!force
&& !(zfs_flags
& ZFS_DEBUG_MODIFY
))
974 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
975 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
976 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
979 buf
->b_hdr
->b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
), KM_SLEEP
);
980 fletcher_2_native(buf
->b_data
, buf
->b_hdr
->b_size
,
981 buf
->b_hdr
->b_freeze_cksum
);
982 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
986 arc_buf_thaw(arc_buf_t
*buf
)
988 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
989 if (buf
->b_hdr
->b_state
!= arc_anon
)
990 panic("modifying non-anon buffer!");
991 if (buf
->b_hdr
->b_flags
& ARC_IO_IN_PROGRESS
)
992 panic("modifying buffer while i/o in progress!");
993 arc_cksum_verify(buf
);
996 mutex_enter(&buf
->b_hdr
->b_freeze_lock
);
997 if (buf
->b_hdr
->b_freeze_cksum
!= NULL
) {
998 kmem_free(buf
->b_hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
999 buf
->b_hdr
->b_freeze_cksum
= NULL
;
1002 if (zfs_flags
& ZFS_DEBUG_MODIFY
) {
1003 if (buf
->b_hdr
->b_thawed
)
1004 kmem_free(buf
->b_hdr
->b_thawed
, 1);
1005 buf
->b_hdr
->b_thawed
= kmem_alloc(1, KM_SLEEP
);
1008 mutex_exit(&buf
->b_hdr
->b_freeze_lock
);
1012 arc_buf_freeze(arc_buf_t
*buf
)
1014 kmutex_t
*hash_lock
;
1016 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1019 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1020 mutex_enter(hash_lock
);
1022 ASSERT(buf
->b_hdr
->b_freeze_cksum
!= NULL
||
1023 buf
->b_hdr
->b_state
== arc_anon
);
1024 arc_cksum_compute(buf
, B_FALSE
);
1025 mutex_exit(hash_lock
);
1029 add_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1031 ASSERT(MUTEX_HELD(hash_lock
));
1033 if ((refcount_add(&ab
->b_refcnt
, tag
) == 1) &&
1034 (ab
->b_state
!= arc_anon
)) {
1035 uint64_t delta
= ab
->b_size
* ab
->b_datacnt
;
1036 list_t
*list
= &ab
->b_state
->arcs_list
[ab
->b_type
];
1037 uint64_t *size
= &ab
->b_state
->arcs_lsize
[ab
->b_type
];
1039 ASSERT(!MUTEX_HELD(&ab
->b_state
->arcs_mtx
));
1040 mutex_enter(&ab
->b_state
->arcs_mtx
);
1041 ASSERT(list_link_active(&ab
->b_arc_node
));
1042 list_remove(list
, ab
);
1043 if (GHOST_STATE(ab
->b_state
)) {
1044 ASSERT3U(ab
->b_datacnt
, ==, 0);
1045 ASSERT3P(ab
->b_buf
, ==, NULL
);
1049 ASSERT3U(*size
, >=, delta
);
1050 atomic_add_64(size
, -delta
);
1051 mutex_exit(&ab
->b_state
->arcs_mtx
);
1052 /* remove the prefetch flag if we get a reference */
1053 if (ab
->b_flags
& ARC_PREFETCH
)
1054 ab
->b_flags
&= ~ARC_PREFETCH
;
1059 remove_reference(arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
, void *tag
)
1062 arc_state_t
*state
= ab
->b_state
;
1064 ASSERT(state
== arc_anon
|| MUTEX_HELD(hash_lock
));
1065 ASSERT(!GHOST_STATE(state
));
1067 if (((cnt
= refcount_remove(&ab
->b_refcnt
, tag
)) == 0) &&
1068 (state
!= arc_anon
)) {
1069 uint64_t *size
= &state
->arcs_lsize
[ab
->b_type
];
1071 ASSERT(!MUTEX_HELD(&state
->arcs_mtx
));
1072 mutex_enter(&state
->arcs_mtx
);
1073 ASSERT(!list_link_active(&ab
->b_arc_node
));
1074 list_insert_head(&state
->arcs_list
[ab
->b_type
], ab
);
1075 ASSERT(ab
->b_datacnt
> 0);
1076 atomic_add_64(size
, ab
->b_size
* ab
->b_datacnt
);
1077 mutex_exit(&state
->arcs_mtx
);
1083 * Move the supplied buffer to the indicated state. The mutex
1084 * for the buffer must be held by the caller.
1087 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*ab
, kmutex_t
*hash_lock
)
1089 arc_state_t
*old_state
= ab
->b_state
;
1090 int64_t refcnt
= refcount_count(&ab
->b_refcnt
);
1091 uint64_t from_delta
, to_delta
;
1093 ASSERT(MUTEX_HELD(hash_lock
));
1094 ASSERT(new_state
!= old_state
);
1095 ASSERT(refcnt
== 0 || ab
->b_datacnt
> 0);
1096 ASSERT(ab
->b_datacnt
== 0 || !GHOST_STATE(new_state
));
1097 ASSERT(ab
->b_datacnt
<= 1 || old_state
!= arc_anon
);
1099 from_delta
= to_delta
= ab
->b_datacnt
* ab
->b_size
;
1102 * If this buffer is evictable, transfer it from the
1103 * old state list to the new state list.
1106 if (old_state
!= arc_anon
) {
1107 int use_mutex
= !MUTEX_HELD(&old_state
->arcs_mtx
);
1108 uint64_t *size
= &old_state
->arcs_lsize
[ab
->b_type
];
1111 mutex_enter(&old_state
->arcs_mtx
);
1113 ASSERT(list_link_active(&ab
->b_arc_node
));
1114 list_remove(&old_state
->arcs_list
[ab
->b_type
], ab
);
1117 * If prefetching out of the ghost cache,
1118 * we will have a non-zero datacnt.
1120 if (GHOST_STATE(old_state
) && ab
->b_datacnt
== 0) {
1121 /* ghost elements have a ghost size */
1122 ASSERT(ab
->b_buf
== NULL
);
1123 from_delta
= ab
->b_size
;
1125 ASSERT3U(*size
, >=, from_delta
);
1126 atomic_add_64(size
, -from_delta
);
1129 mutex_exit(&old_state
->arcs_mtx
);
1131 if (new_state
!= arc_anon
) {
1132 int use_mutex
= !MUTEX_HELD(&new_state
->arcs_mtx
);
1133 uint64_t *size
= &new_state
->arcs_lsize
[ab
->b_type
];
1136 mutex_enter(&new_state
->arcs_mtx
);
1138 list_insert_head(&new_state
->arcs_list
[ab
->b_type
], ab
);
1140 /* ghost elements have a ghost size */
1141 if (GHOST_STATE(new_state
)) {
1142 ASSERT(ab
->b_datacnt
== 0);
1143 ASSERT(ab
->b_buf
== NULL
);
1144 to_delta
= ab
->b_size
;
1146 atomic_add_64(size
, to_delta
);
1149 mutex_exit(&new_state
->arcs_mtx
);
1153 ASSERT(!BUF_EMPTY(ab
));
1154 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(ab
))
1155 buf_hash_remove(ab
);
1157 /* adjust state sizes */
1159 atomic_add_64(&new_state
->arcs_size
, to_delta
);
1161 ASSERT3U(old_state
->arcs_size
, >=, from_delta
);
1162 atomic_add_64(&old_state
->arcs_size
, -from_delta
);
1164 ab
->b_state
= new_state
;
1166 /* adjust l2arc hdr stats */
1167 if (new_state
== arc_l2c_only
)
1168 l2arc_hdr_stat_add();
1169 else if (old_state
== arc_l2c_only
)
1170 l2arc_hdr_stat_remove();
1174 arc_space_consume(uint64_t space
, arc_space_type_t type
)
1176 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1181 case ARC_SPACE_DATA
:
1182 ARCSTAT_INCR(arcstat_data_size
, space
);
1184 case ARC_SPACE_OTHER
:
1185 ARCSTAT_INCR(arcstat_other_size
, space
);
1187 case ARC_SPACE_HDRS
:
1188 ARCSTAT_INCR(arcstat_hdr_size
, space
);
1190 case ARC_SPACE_L2HDRS
:
1191 ARCSTAT_INCR(arcstat_l2_hdr_size
, space
);
1195 atomic_add_64(&arc_meta_used
, space
);
1196 atomic_add_64(&arc_size
, space
);
1200 arc_space_return(uint64_t space
, arc_space_type_t type
)
1202 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
1207 case ARC_SPACE_DATA
:
1208 ARCSTAT_INCR(arcstat_data_size
, -space
);
1210 case ARC_SPACE_OTHER
:
1211 ARCSTAT_INCR(arcstat_other_size
, -space
);
1213 case ARC_SPACE_HDRS
:
1214 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
1216 case ARC_SPACE_L2HDRS
:
1217 ARCSTAT_INCR(arcstat_l2_hdr_size
, -space
);
1221 ASSERT(arc_meta_used
>= space
);
1222 if (arc_meta_max
< arc_meta_used
)
1223 arc_meta_max
= arc_meta_used
;
1224 atomic_add_64(&arc_meta_used
, -space
);
1225 ASSERT(arc_size
>= space
);
1226 atomic_add_64(&arc_size
, -space
);
1230 arc_data_buf_alloc(uint64_t size
)
1232 if (arc_evict_needed(ARC_BUFC_DATA
))
1233 cv_signal(&arc_reclaim_thr_cv
);
1234 atomic_add_64(&arc_size
, size
);
1235 return (zio_data_buf_alloc(size
));
1239 arc_data_buf_free(void *buf
, uint64_t size
)
1241 zio_data_buf_free(buf
, size
);
1242 ASSERT(arc_size
>= size
);
1243 atomic_add_64(&arc_size
, -size
);
1247 arc_buf_alloc(spa_t
*spa
, int size
, void *tag
, arc_buf_contents_t type
)
1252 ASSERT3U(size
, >, 0);
1253 hdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
1254 ASSERT(BUF_EMPTY(hdr
));
1257 hdr
->b_spa
= spa_guid(spa
);
1258 hdr
->b_state
= arc_anon
;
1259 hdr
->b_arc_access
= 0;
1260 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1263 buf
->b_efunc
= NULL
;
1264 buf
->b_private
= NULL
;
1267 arc_get_data_buf(buf
);
1270 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1271 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1276 static char *arc_onloan_tag
= "onloan";
1279 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1280 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1281 * buffers must be returned to the arc before they can be used by the DMU or
1285 arc_loan_buf(spa_t
*spa
, int size
)
1289 buf
= arc_buf_alloc(spa
, size
, arc_onloan_tag
, ARC_BUFC_DATA
);
1291 atomic_add_64(&arc_loaned_bytes
, size
);
1296 * Return a loaned arc buffer to the arc.
1299 arc_return_buf(arc_buf_t
*buf
, void *tag
)
1301 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1303 ASSERT(buf
->b_data
!= NULL
);
1304 (void) refcount_add(&hdr
->b_refcnt
, tag
);
1305 (void) refcount_remove(&hdr
->b_refcnt
, arc_onloan_tag
);
1307 atomic_add_64(&arc_loaned_bytes
, -hdr
->b_size
);
1310 /* Detach an arc_buf from a dbuf (tag) */
1312 arc_loan_inuse_buf(arc_buf_t
*buf
, void *tag
)
1316 ASSERT(buf
->b_data
!= NULL
);
1318 (void) refcount_add(&hdr
->b_refcnt
, arc_onloan_tag
);
1319 (void) refcount_remove(&hdr
->b_refcnt
, tag
);
1320 buf
->b_efunc
= NULL
;
1321 buf
->b_private
= NULL
;
1323 atomic_add_64(&arc_loaned_bytes
, hdr
->b_size
);
1327 arc_buf_clone(arc_buf_t
*from
)
1330 arc_buf_hdr_t
*hdr
= from
->b_hdr
;
1331 uint64_t size
= hdr
->b_size
;
1333 ASSERT(hdr
->b_state
!= arc_anon
);
1335 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
1338 buf
->b_efunc
= NULL
;
1339 buf
->b_private
= NULL
;
1340 buf
->b_next
= hdr
->b_buf
;
1342 arc_get_data_buf(buf
);
1343 bcopy(from
->b_data
, buf
->b_data
, size
);
1344 hdr
->b_datacnt
+= 1;
1349 arc_buf_add_ref(arc_buf_t
*buf
, void* tag
)
1352 kmutex_t
*hash_lock
;
1355 * Check to see if this buffer is evicted. Callers
1356 * must verify b_data != NULL to know if the add_ref
1359 mutex_enter(&buf
->b_evict_lock
);
1360 if (buf
->b_data
== NULL
) {
1361 mutex_exit(&buf
->b_evict_lock
);
1364 hash_lock
= HDR_LOCK(buf
->b_hdr
);
1365 mutex_enter(hash_lock
);
1367 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1368 mutex_exit(&buf
->b_evict_lock
);
1370 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
1371 add_reference(hdr
, hash_lock
, tag
);
1372 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
1373 arc_access(hdr
, hash_lock
);
1374 mutex_exit(hash_lock
);
1375 ARCSTAT_BUMP(arcstat_hits
);
1376 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
1377 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
1378 data
, metadata
, hits
);
1382 * Free the arc data buffer. If it is an l2arc write in progress,
1383 * the buffer is placed on l2arc_free_on_write to be freed later.
1386 arc_buf_data_free(arc_buf_hdr_t
*hdr
, void (*free_func
)(void *, size_t),
1387 void *data
, size_t size
)
1389 if (HDR_L2_WRITING(hdr
)) {
1390 l2arc_data_free_t
*df
;
1391 df
= kmem_alloc(sizeof (l2arc_data_free_t
), KM_SLEEP
);
1392 df
->l2df_data
= data
;
1393 df
->l2df_size
= size
;
1394 df
->l2df_func
= free_func
;
1395 mutex_enter(&l2arc_free_on_write_mtx
);
1396 list_insert_head(l2arc_free_on_write
, df
);
1397 mutex_exit(&l2arc_free_on_write_mtx
);
1398 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
1400 free_func(data
, size
);
1405 arc_buf_destroy(arc_buf_t
*buf
, boolean_t recycle
, boolean_t all
)
1409 /* free up data associated with the buf */
1411 arc_state_t
*state
= buf
->b_hdr
->b_state
;
1412 uint64_t size
= buf
->b_hdr
->b_size
;
1413 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
1415 arc_cksum_verify(buf
);
1418 if (type
== ARC_BUFC_METADATA
) {
1419 arc_buf_data_free(buf
->b_hdr
, zio_buf_free
,
1421 arc_space_return(size
, ARC_SPACE_DATA
);
1423 ASSERT(type
== ARC_BUFC_DATA
);
1424 arc_buf_data_free(buf
->b_hdr
,
1425 zio_data_buf_free
, buf
->b_data
, size
);
1426 ARCSTAT_INCR(arcstat_data_size
, -size
);
1427 atomic_add_64(&arc_size
, -size
);
1430 if (list_link_active(&buf
->b_hdr
->b_arc_node
)) {
1431 uint64_t *cnt
= &state
->arcs_lsize
[type
];
1433 ASSERT(refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
1434 ASSERT(state
!= arc_anon
);
1436 ASSERT3U(*cnt
, >=, size
);
1437 atomic_add_64(cnt
, -size
);
1439 ASSERT3U(state
->arcs_size
, >=, size
);
1440 atomic_add_64(&state
->arcs_size
, -size
);
1442 ASSERT(buf
->b_hdr
->b_datacnt
> 0);
1443 buf
->b_hdr
->b_datacnt
-= 1;
1446 /* only remove the buf if requested */
1450 /* remove the buf from the hdr list */
1451 for (bufp
= &buf
->b_hdr
->b_buf
; *bufp
!= buf
; bufp
= &(*bufp
)->b_next
)
1453 *bufp
= buf
->b_next
;
1456 ASSERT(buf
->b_efunc
== NULL
);
1458 /* clean up the buf */
1460 kmem_cache_free(buf_cache
, buf
);
1464 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
1466 l2arc_buf_hdr_t
*l2hdr
= hdr
->b_l2hdr
;
1468 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1469 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
1470 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1472 if (l2hdr
!= NULL
) {
1473 boolean_t buflist_held
= MUTEX_HELD(&l2arc_buflist_mtx
);
1475 * To prevent arc_free() and l2arc_evict() from
1476 * attempting to free the same buffer at the same time,
1477 * a FREE_IN_PROGRESS flag is given to arc_free() to
1478 * give it priority. l2arc_evict() can't destroy this
1479 * header while we are waiting on l2arc_buflist_mtx.
1481 * The hdr may be removed from l2ad_buflist before we
1482 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1484 if (!buflist_held
) {
1485 mutex_enter(&l2arc_buflist_mtx
);
1486 l2hdr
= hdr
->b_l2hdr
;
1489 if (l2hdr
!= NULL
) {
1490 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
1491 ARCSTAT_INCR(arcstat_l2_size
, -hdr
->b_size
);
1492 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
1493 if (hdr
->b_state
== arc_l2c_only
)
1494 l2arc_hdr_stat_remove();
1495 hdr
->b_l2hdr
= NULL
;
1499 mutex_exit(&l2arc_buflist_mtx
);
1502 if (!BUF_EMPTY(hdr
)) {
1503 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1504 buf_discard_identity(hdr
);
1506 while (hdr
->b_buf
) {
1507 arc_buf_t
*buf
= hdr
->b_buf
;
1510 mutex_enter(&arc_eviction_mtx
);
1511 mutex_enter(&buf
->b_evict_lock
);
1512 ASSERT(buf
->b_hdr
!= NULL
);
1513 arc_buf_destroy(hdr
->b_buf
, FALSE
, FALSE
);
1514 hdr
->b_buf
= buf
->b_next
;
1515 buf
->b_hdr
= &arc_eviction_hdr
;
1516 buf
->b_next
= arc_eviction_list
;
1517 arc_eviction_list
= buf
;
1518 mutex_exit(&buf
->b_evict_lock
);
1519 mutex_exit(&arc_eviction_mtx
);
1521 arc_buf_destroy(hdr
->b_buf
, FALSE
, TRUE
);
1524 if (hdr
->b_freeze_cksum
!= NULL
) {
1525 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
1526 hdr
->b_freeze_cksum
= NULL
;
1528 if (hdr
->b_thawed
) {
1529 kmem_free(hdr
->b_thawed
, 1);
1530 hdr
->b_thawed
= NULL
;
1533 ASSERT(!list_link_active(&hdr
->b_arc_node
));
1534 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
1535 ASSERT3P(hdr
->b_acb
, ==, NULL
);
1536 kmem_cache_free(hdr_cache
, hdr
);
1540 arc_buf_free(arc_buf_t
*buf
, void *tag
)
1542 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1543 int hashed
= hdr
->b_state
!= arc_anon
;
1545 ASSERT(buf
->b_efunc
== NULL
);
1546 ASSERT(buf
->b_data
!= NULL
);
1549 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1551 mutex_enter(hash_lock
);
1553 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1555 (void) remove_reference(hdr
, hash_lock
, tag
);
1556 if (hdr
->b_datacnt
> 1) {
1557 arc_buf_destroy(buf
, FALSE
, TRUE
);
1559 ASSERT(buf
== hdr
->b_buf
);
1560 ASSERT(buf
->b_efunc
== NULL
);
1561 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1563 mutex_exit(hash_lock
);
1564 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
1567 * We are in the middle of an async write. Don't destroy
1568 * this buffer unless the write completes before we finish
1569 * decrementing the reference count.
1571 mutex_enter(&arc_eviction_mtx
);
1572 (void) remove_reference(hdr
, NULL
, tag
);
1573 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
1574 destroy_hdr
= !HDR_IO_IN_PROGRESS(hdr
);
1575 mutex_exit(&arc_eviction_mtx
);
1577 arc_hdr_destroy(hdr
);
1579 if (remove_reference(hdr
, NULL
, tag
) > 0)
1580 arc_buf_destroy(buf
, FALSE
, TRUE
);
1582 arc_hdr_destroy(hdr
);
1587 arc_buf_remove_ref(arc_buf_t
*buf
, void* tag
)
1589 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1590 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
1591 int no_callback
= (buf
->b_efunc
== NULL
);
1593 if (hdr
->b_state
== arc_anon
) {
1594 ASSERT(hdr
->b_datacnt
== 1);
1595 arc_buf_free(buf
, tag
);
1596 return (no_callback
);
1599 mutex_enter(hash_lock
);
1601 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
1602 ASSERT(hdr
->b_state
!= arc_anon
);
1603 ASSERT(buf
->b_data
!= NULL
);
1605 (void) remove_reference(hdr
, hash_lock
, tag
);
1606 if (hdr
->b_datacnt
> 1) {
1608 arc_buf_destroy(buf
, FALSE
, TRUE
);
1609 } else if (no_callback
) {
1610 ASSERT(hdr
->b_buf
== buf
&& buf
->b_next
== NULL
);
1611 ASSERT(buf
->b_efunc
== NULL
);
1612 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
1614 ASSERT(no_callback
|| hdr
->b_datacnt
> 1 ||
1615 refcount_is_zero(&hdr
->b_refcnt
));
1616 mutex_exit(hash_lock
);
1617 return (no_callback
);
1621 arc_buf_size(arc_buf_t
*buf
)
1623 return (buf
->b_hdr
->b_size
);
1627 * Evict buffers from list until we've removed the specified number of
1628 * bytes. Move the removed buffers to the appropriate evict state.
1629 * If the recycle flag is set, then attempt to "recycle" a buffer:
1630 * - look for a buffer to evict that is `bytes' long.
1631 * - return the data block from this buffer rather than freeing it.
1632 * This flag is used by callers that are trying to make space for a
1633 * new buffer in a full arc cache.
1635 * This function makes a "best effort". It skips over any buffers
1636 * it can't get a hash_lock on, and so may not catch all candidates.
1637 * It may also return without evicting as much space as requested.
1640 arc_evict(arc_state_t
*state
, uint64_t spa
, int64_t bytes
, boolean_t recycle
,
1641 arc_buf_contents_t type
)
1643 arc_state_t
*evicted_state
;
1644 uint64_t bytes_evicted
= 0, skipped
= 0, missed
= 0;
1645 arc_buf_hdr_t
*ab
, *ab_prev
= NULL
;
1646 list_t
*list
= &state
->arcs_list
[type
];
1647 kmutex_t
*hash_lock
;
1648 boolean_t have_lock
;
1649 void *stolen
= NULL
;
1651 ASSERT(state
== arc_mru
|| state
== arc_mfu
);
1653 evicted_state
= (state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
1655 mutex_enter(&state
->arcs_mtx
);
1656 mutex_enter(&evicted_state
->arcs_mtx
);
1658 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1659 ab_prev
= list_prev(list
, ab
);
1660 /* prefetch buffers have a minimum lifespan */
1661 if (HDR_IO_IN_PROGRESS(ab
) ||
1662 (spa
&& ab
->b_spa
!= spa
) ||
1663 (ab
->b_flags
& (ARC_PREFETCH
|ARC_INDIRECT
) &&
1664 ddi_get_lbolt() - ab
->b_arc_access
<
1665 arc_min_prefetch_lifespan
)) {
1669 /* "lookahead" for better eviction candidate */
1670 if (recycle
&& ab
->b_size
!= bytes
&&
1671 ab_prev
&& ab_prev
->b_size
== bytes
)
1673 hash_lock
= HDR_LOCK(ab
);
1674 have_lock
= MUTEX_HELD(hash_lock
);
1675 if (have_lock
|| mutex_tryenter(hash_lock
)) {
1676 ASSERT3U(refcount_count(&ab
->b_refcnt
), ==, 0);
1677 ASSERT(ab
->b_datacnt
> 0);
1679 arc_buf_t
*buf
= ab
->b_buf
;
1680 if (!mutex_tryenter(&buf
->b_evict_lock
)) {
1685 bytes_evicted
+= ab
->b_size
;
1686 if (recycle
&& ab
->b_type
== type
&&
1687 ab
->b_size
== bytes
&&
1688 !HDR_L2_WRITING(ab
)) {
1689 stolen
= buf
->b_data
;
1694 mutex_enter(&arc_eviction_mtx
);
1695 arc_buf_destroy(buf
,
1696 buf
->b_data
== stolen
, FALSE
);
1697 ab
->b_buf
= buf
->b_next
;
1698 buf
->b_hdr
= &arc_eviction_hdr
;
1699 buf
->b_next
= arc_eviction_list
;
1700 arc_eviction_list
= buf
;
1701 mutex_exit(&arc_eviction_mtx
);
1702 mutex_exit(&buf
->b_evict_lock
);
1704 mutex_exit(&buf
->b_evict_lock
);
1705 arc_buf_destroy(buf
,
1706 buf
->b_data
== stolen
, TRUE
);
1711 ARCSTAT_INCR(arcstat_evict_l2_cached
,
1714 if (l2arc_write_eligible(ab
->b_spa
, ab
)) {
1715 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
1719 arcstat_evict_l2_ineligible
,
1724 if (ab
->b_datacnt
== 0) {
1725 arc_change_state(evicted_state
, ab
, hash_lock
);
1726 ASSERT(HDR_IN_HASH_TABLE(ab
));
1727 ab
->b_flags
|= ARC_IN_HASH_TABLE
;
1728 ab
->b_flags
&= ~ARC_BUF_AVAILABLE
;
1729 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, ab
);
1732 mutex_exit(hash_lock
);
1733 if (bytes
>= 0 && bytes_evicted
>= bytes
)
1740 mutex_exit(&evicted_state
->arcs_mtx
);
1741 mutex_exit(&state
->arcs_mtx
);
1743 if (bytes_evicted
< bytes
)
1744 dprintf("only evicted %lld bytes from %x\n",
1745 (longlong_t
)bytes_evicted
, state
);
1748 ARCSTAT_INCR(arcstat_evict_skip
, skipped
);
1751 ARCSTAT_INCR(arcstat_mutex_miss
, missed
);
1754 * We have just evicted some date into the ghost state, make
1755 * sure we also adjust the ghost state size if necessary.
1758 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
> arc_c
) {
1759 int64_t mru_over
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
+
1760 arc_mru_ghost
->arcs_size
- arc_c
;
1762 if (mru_over
> 0 && arc_mru_ghost
->arcs_lsize
[type
] > 0) {
1764 MIN(arc_mru_ghost
->arcs_lsize
[type
], mru_over
);
1765 arc_evict_ghost(arc_mru_ghost
, 0, todelete
);
1766 } else if (arc_mfu_ghost
->arcs_lsize
[type
] > 0) {
1767 int64_t todelete
= MIN(arc_mfu_ghost
->arcs_lsize
[type
],
1768 arc_mru_ghost
->arcs_size
+
1769 arc_mfu_ghost
->arcs_size
- arc_c
);
1770 arc_evict_ghost(arc_mfu_ghost
, 0, todelete
);
1778 * Remove buffers from list until we've removed the specified number of
1779 * bytes. Destroy the buffers that are removed.
1782 arc_evict_ghost(arc_state_t
*state
, uint64_t spa
, int64_t bytes
)
1784 arc_buf_hdr_t
*ab
, *ab_prev
;
1785 arc_buf_hdr_t marker
;
1786 list_t
*list
= &state
->arcs_list
[ARC_BUFC_DATA
];
1787 kmutex_t
*hash_lock
;
1788 uint64_t bytes_deleted
= 0;
1789 uint64_t bufs_skipped
= 0;
1791 ASSERT(GHOST_STATE(state
));
1792 bzero(&marker
, sizeof(marker
));
1794 mutex_enter(&state
->arcs_mtx
);
1795 for (ab
= list_tail(list
); ab
; ab
= ab_prev
) {
1796 ab_prev
= list_prev(list
, ab
);
1797 if (spa
&& ab
->b_spa
!= spa
)
1800 /* ignore markers */
1804 hash_lock
= HDR_LOCK(ab
);
1805 /* caller may be trying to modify this buffer, skip it */
1806 if (MUTEX_HELD(hash_lock
))
1808 if (mutex_tryenter(hash_lock
)) {
1809 ASSERT(!HDR_IO_IN_PROGRESS(ab
));
1810 ASSERT(ab
->b_buf
== NULL
);
1811 ARCSTAT_BUMP(arcstat_deleted
);
1812 bytes_deleted
+= ab
->b_size
;
1814 if (ab
->b_l2hdr
!= NULL
) {
1816 * This buffer is cached on the 2nd Level ARC;
1817 * don't destroy the header.
1819 arc_change_state(arc_l2c_only
, ab
, hash_lock
);
1820 mutex_exit(hash_lock
);
1822 arc_change_state(arc_anon
, ab
, hash_lock
);
1823 mutex_exit(hash_lock
);
1824 arc_hdr_destroy(ab
);
1827 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, ab
);
1828 if (bytes
>= 0 && bytes_deleted
>= bytes
)
1830 } else if (bytes
< 0) {
1832 * Insert a list marker and then wait for the
1833 * hash lock to become available. Once its
1834 * available, restart from where we left off.
1836 list_insert_after(list
, ab
, &marker
);
1837 mutex_exit(&state
->arcs_mtx
);
1838 mutex_enter(hash_lock
);
1839 mutex_exit(hash_lock
);
1840 mutex_enter(&state
->arcs_mtx
);
1841 ab_prev
= list_prev(list
, &marker
);
1842 list_remove(list
, &marker
);
1846 mutex_exit(&state
->arcs_mtx
);
1848 if (list
== &state
->arcs_list
[ARC_BUFC_DATA
] &&
1849 (bytes
< 0 || bytes_deleted
< bytes
)) {
1850 list
= &state
->arcs_list
[ARC_BUFC_METADATA
];
1855 ARCSTAT_INCR(arcstat_mutex_miss
, bufs_skipped
);
1859 if (bytes_deleted
< bytes
)
1860 dprintf("only deleted %lld bytes from %p\n",
1861 (longlong_t
)bytes_deleted
, state
);
1867 int64_t adjustment
, delta
;
1873 adjustment
= MIN((int64_t)(arc_size
- arc_c
),
1874 (int64_t)(arc_anon
->arcs_size
+ arc_mru
->arcs_size
+ arc_meta_used
-
1877 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1878 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_DATA
], adjustment
);
1879 (void) arc_evict(arc_mru
, 0, delta
, FALSE
, ARC_BUFC_DATA
);
1880 adjustment
-= delta
;
1883 if (adjustment
> 0 && arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1884 delta
= MIN(arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
], adjustment
);
1885 (void) arc_evict(arc_mru
, 0, delta
, FALSE
,
1893 adjustment
= arc_size
- arc_c
;
1895 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] > 0) {
1896 delta
= MIN(adjustment
, arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
]);
1897 (void) arc_evict(arc_mfu
, 0, delta
, FALSE
, ARC_BUFC_DATA
);
1898 adjustment
-= delta
;
1901 if (adjustment
> 0 && arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
] > 0) {
1902 int64_t delta
= MIN(adjustment
,
1903 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
]);
1904 (void) arc_evict(arc_mfu
, 0, delta
, FALSE
,
1909 * Adjust ghost lists
1912 adjustment
= arc_mru
->arcs_size
+ arc_mru_ghost
->arcs_size
- arc_c
;
1914 if (adjustment
> 0 && arc_mru_ghost
->arcs_size
> 0) {
1915 delta
= MIN(arc_mru_ghost
->arcs_size
, adjustment
);
1916 arc_evict_ghost(arc_mru_ghost
, 0, delta
);
1920 arc_mru_ghost
->arcs_size
+ arc_mfu_ghost
->arcs_size
- arc_c
;
1922 if (adjustment
> 0 && arc_mfu_ghost
->arcs_size
> 0) {
1923 delta
= MIN(arc_mfu_ghost
->arcs_size
, adjustment
);
1924 arc_evict_ghost(arc_mfu_ghost
, 0, delta
);
1929 arc_do_user_evicts(void)
1931 mutex_enter(&arc_eviction_mtx
);
1932 while (arc_eviction_list
!= NULL
) {
1933 arc_buf_t
*buf
= arc_eviction_list
;
1934 arc_eviction_list
= buf
->b_next
;
1935 mutex_enter(&buf
->b_evict_lock
);
1937 mutex_exit(&buf
->b_evict_lock
);
1938 mutex_exit(&arc_eviction_mtx
);
1940 if (buf
->b_efunc
!= NULL
)
1941 VERIFY(buf
->b_efunc(buf
) == 0);
1943 buf
->b_efunc
= NULL
;
1944 buf
->b_private
= NULL
;
1945 kmem_cache_free(buf_cache
, buf
);
1946 mutex_enter(&arc_eviction_mtx
);
1948 mutex_exit(&arc_eviction_mtx
);
1952 * Flush all *evictable* data from the cache for the given spa.
1953 * NOTE: this will not touch "active" (i.e. referenced) data.
1956 arc_flush(spa_t
*spa
)
1961 guid
= spa_guid(spa
);
1963 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_DATA
])) {
1964 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
1968 while (list_head(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
])) {
1969 (void) arc_evict(arc_mru
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
1973 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
])) {
1974 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_DATA
);
1978 while (list_head(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
])) {
1979 (void) arc_evict(arc_mfu
, guid
, -1, FALSE
, ARC_BUFC_METADATA
);
1984 arc_evict_ghost(arc_mru_ghost
, guid
, -1);
1985 arc_evict_ghost(arc_mfu_ghost
, guid
, -1);
1987 mutex_enter(&arc_reclaim_thr_lock
);
1988 arc_do_user_evicts();
1989 mutex_exit(&arc_reclaim_thr_lock
);
1990 ASSERT(spa
|| arc_eviction_list
== NULL
);
1996 if (arc_c
> arc_c_min
) {
2000 to_free
= MAX(arc_c
>> arc_shrink_shift
, ptob(needfree
));
2002 to_free
= arc_c
>> arc_shrink_shift
;
2004 if (arc_c
> arc_c_min
+ to_free
)
2005 atomic_add_64(&arc_c
, -to_free
);
2009 atomic_add_64(&arc_p
, -(arc_p
>> arc_shrink_shift
));
2010 if (arc_c
> arc_size
)
2011 arc_c
= MAX(arc_size
, arc_c_min
);
2013 arc_p
= (arc_c
>> 1);
2014 ASSERT(arc_c
>= arc_c_min
);
2015 ASSERT((int64_t)arc_p
>= 0);
2018 if (arc_size
> arc_c
)
2023 arc_reclaim_needed(void)
2032 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2037 * check that we're out of range of the pageout scanner. It starts to
2038 * schedule paging if freemem is less than lotsfree and needfree.
2039 * lotsfree is the high-water mark for pageout, and needfree is the
2040 * number of needed free pages. We add extra pages here to make sure
2041 * the scanner doesn't start up while we're freeing memory.
2043 if (freemem
< lotsfree
+ needfree
+ extra
)
2047 * check to make sure that swapfs has enough space so that anon
2048 * reservations can still succeed. anon_resvmem() checks that the
2049 * availrmem is greater than swapfs_minfree, and the number of reserved
2050 * swap pages. We also add a bit of extra here just to prevent
2051 * circumstances from getting really dire.
2053 if (availrmem
< swapfs_minfree
+ swapfs_reserve
+ extra
)
2058 * If we're on an i386 platform, it's possible that we'll exhaust the
2059 * kernel heap space before we ever run out of available physical
2060 * memory. Most checks of the size of the heap_area compare against
2061 * tune.t_minarmem, which is the minimum available real memory that we
2062 * can have in the system. However, this is generally fixed at 25 pages
2063 * which is so low that it's useless. In this comparison, we seek to
2064 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2065 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2068 if (btop(vmem_size(heap_arena
, VMEM_FREE
)) <
2069 (btop(vmem_size(heap_arena
, VMEM_FREE
| VMEM_ALLOC
)) >> 2))
2074 if (spa_get_random(100) == 0)
2081 arc_kmem_reap_now(arc_reclaim_strategy_t strat
)
2084 kmem_cache_t
*prev_cache
= NULL
;
2085 kmem_cache_t
*prev_data_cache
= NULL
;
2086 extern kmem_cache_t
*zio_buf_cache
[];
2087 extern kmem_cache_t
*zio_data_buf_cache
[];
2091 while ((arc_meta_used
>= arc_meta_limit
) && (retry
< 10)) {
2093 * We are exceeding our meta-data cache limit.
2094 * Purge some DNLC entries to release holds on meta-data.
2096 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent
);
2101 * Reclaim unused memory from all kmem caches.
2108 * An aggressive reclamation will shrink the cache size as well as
2109 * reap free buffers from the arc kmem caches.
2111 if (strat
== ARC_RECLAIM_AGGR
)
2114 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
2115 if (zio_buf_cache
[i
] != prev_cache
) {
2116 prev_cache
= zio_buf_cache
[i
];
2117 kmem_cache_reap_now(zio_buf_cache
[i
]);
2119 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
2120 prev_data_cache
= zio_data_buf_cache
[i
];
2121 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
2124 kmem_cache_reap_now(buf_cache
);
2125 kmem_cache_reap_now(hdr_cache
);
2129 arc_reclaim_thread(void)
2131 clock_t growtime
= 0;
2132 arc_reclaim_strategy_t last_reclaim
= ARC_RECLAIM_CONS
;
2135 CALLB_CPR_INIT(&cpr
, &arc_reclaim_thr_lock
, callb_generic_cpr
, FTAG
);
2137 mutex_enter(&arc_reclaim_thr_lock
);
2138 while (arc_thread_exit
== 0) {
2139 if (arc_reclaim_needed()) {
2142 if (last_reclaim
== ARC_RECLAIM_CONS
) {
2143 last_reclaim
= ARC_RECLAIM_AGGR
;
2145 last_reclaim
= ARC_RECLAIM_CONS
;
2149 last_reclaim
= ARC_RECLAIM_AGGR
;
2153 /* reset the growth delay for every reclaim */
2154 growtime
= ddi_get_lbolt() + (arc_grow_retry
* hz
);
2156 arc_kmem_reap_now(last_reclaim
);
2159 } else if (arc_no_grow
&& ddi_get_lbolt() >= growtime
) {
2160 arc_no_grow
= FALSE
;
2163 /* Keep meta data usage within limits */
2164 if (arc_meta_used
>= arc_meta_limit
)
2165 arc_kmem_reap_now(ARC_RECLAIM_CONS
);
2169 if (arc_eviction_list
!= NULL
)
2170 arc_do_user_evicts();
2172 /* block until needed, or one second, whichever is shorter */
2173 CALLB_CPR_SAFE_BEGIN(&cpr
);
2174 (void) cv_timedwait_interruptible(&arc_reclaim_thr_cv
,
2175 &arc_reclaim_thr_lock
, (ddi_get_lbolt() + hz
));
2176 CALLB_CPR_SAFE_END(&cpr
, &arc_reclaim_thr_lock
);
2179 arc_thread_exit
= 0;
2180 cv_broadcast(&arc_reclaim_thr_cv
);
2181 CALLB_CPR_EXIT(&cpr
); /* drops arc_reclaim_thr_lock */
2187 * Under Linux the arc shrinker may be called for synchronous (direct)
2188 * reclaim, or asynchronous (indirect) reclaim. When called by kswapd
2189 * for indirect reclaim we take a conservative approach and just reap
2190 * free slabs from the ARC caches. If this proves to be insufficient
2191 * direct reclaim will be trigger. In direct reclaim a more aggressive
2192 * strategy is used, data is evicted from the ARC and free slabs reaped.
2195 __arc_shrinker_func(struct shrinker
*shrink
, struct shrink_control
*sc
)
2197 arc_reclaim_strategy_t strategy
;
2200 /* Return number of reclaimable pages based on arc_shrink_shift */
2201 arc_reclaim
= MAX(btop(((int64_t)arc_size
- (int64_t)arc_c_min
))
2202 >> arc_shrink_shift
, 0);
2203 if (sc
->nr_to_scan
== 0)
2204 return (arc_reclaim
);
2206 /* Prevent reclaim below arc_c_min */
2207 if (arc_reclaim
<= 0)
2210 /* Not allowed to perform filesystem reclaim */
2211 if (!(sc
->gfp_mask
& __GFP_FS
))
2214 /* Reclaim in progress */
2215 if (mutex_tryenter(&arc_reclaim_thr_lock
) == 0)
2218 if (current_is_kswapd()) {
2219 strategy
= ARC_RECLAIM_CONS
;
2220 ARCSTAT_INCR(arcstat_memory_indirect_count
, 1);
2222 strategy
= ARC_RECLAIM_AGGR
;
2223 ARCSTAT_INCR(arcstat_memory_direct_count
, 1);
2226 arc_kmem_reap_now(strategy
);
2227 arc_reclaim
= MAX(btop(((int64_t)arc_size
- (int64_t)arc_c_min
))
2228 >> arc_shrink_shift
, 0);
2229 mutex_exit(&arc_reclaim_thr_lock
);
2231 return (arc_reclaim
);
2233 SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func
);
2235 SPL_SHRINKER_DECLARE(arc_shrinker
, arc_shrinker_func
, DEFAULT_SEEKS
);
2236 #endif /* _KERNEL */
2239 * Adapt arc info given the number of bytes we are trying to add and
2240 * the state that we are comming from. This function is only called
2241 * when we are adding new content to the cache.
2244 arc_adapt(int bytes
, arc_state_t
*state
)
2247 uint64_t arc_p_min
= (arc_c
>> arc_p_min_shift
);
2249 if (state
== arc_l2c_only
)
2254 * Adapt the target size of the MRU list:
2255 * - if we just hit in the MRU ghost list, then increase
2256 * the target size of the MRU list.
2257 * - if we just hit in the MFU ghost list, then increase
2258 * the target size of the MFU list by decreasing the
2259 * target size of the MRU list.
2261 if (state
== arc_mru_ghost
) {
2262 mult
= ((arc_mru_ghost
->arcs_size
>= arc_mfu_ghost
->arcs_size
) ?
2263 1 : (arc_mfu_ghost
->arcs_size
/arc_mru_ghost
->arcs_size
));
2264 mult
= MIN(mult
, 10); /* avoid wild arc_p adjustment */
2266 arc_p
= MIN(arc_c
- arc_p_min
, arc_p
+ bytes
* mult
);
2267 } else if (state
== arc_mfu_ghost
) {
2270 mult
= ((arc_mfu_ghost
->arcs_size
>= arc_mru_ghost
->arcs_size
) ?
2271 1 : (arc_mru_ghost
->arcs_size
/arc_mfu_ghost
->arcs_size
));
2272 mult
= MIN(mult
, 10);
2274 delta
= MIN(bytes
* mult
, arc_p
);
2275 arc_p
= MAX(arc_p_min
, arc_p
- delta
);
2277 ASSERT((int64_t)arc_p
>= 0);
2279 if (arc_reclaim_needed()) {
2280 cv_signal(&arc_reclaim_thr_cv
);
2287 if (arc_c
>= arc_c_max
)
2291 * If we're within (2 * maxblocksize) bytes of the target
2292 * cache size, increment the target cache size
2294 if (arc_size
> arc_c
- (2ULL << SPA_MAXBLOCKSHIFT
)) {
2295 atomic_add_64(&arc_c
, (int64_t)bytes
);
2296 if (arc_c
> arc_c_max
)
2298 else if (state
== arc_anon
)
2299 atomic_add_64(&arc_p
, (int64_t)bytes
);
2303 ASSERT((int64_t)arc_p
>= 0);
2307 * Check if the cache has reached its limits and eviction is required
2311 arc_evict_needed(arc_buf_contents_t type
)
2313 if (type
== ARC_BUFC_METADATA
&& arc_meta_used
>= arc_meta_limit
)
2318 * If zio data pages are being allocated out of a separate heap segment,
2319 * then enforce that the size of available vmem for this area remains
2320 * above about 1/32nd free.
2322 if (type
== ARC_BUFC_DATA
&& zio_arena
!= NULL
&&
2323 vmem_size(zio_arena
, VMEM_FREE
) <
2324 (vmem_size(zio_arena
, VMEM_ALLOC
) >> 5))
2328 if (arc_reclaim_needed())
2331 return (arc_size
> arc_c
);
2335 * The buffer, supplied as the first argument, needs a data block.
2336 * So, if we are at cache max, determine which cache should be victimized.
2337 * We have the following cases:
2339 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2340 * In this situation if we're out of space, but the resident size of the MFU is
2341 * under the limit, victimize the MFU cache to satisfy this insertion request.
2343 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2344 * Here, we've used up all of the available space for the MRU, so we need to
2345 * evict from our own cache instead. Evict from the set of resident MRU
2348 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2349 * c minus p represents the MFU space in the cache, since p is the size of the
2350 * cache that is dedicated to the MRU. In this situation there's still space on
2351 * the MFU side, so the MRU side needs to be victimized.
2353 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2354 * MFU's resident set is consuming more space than it has been allotted. In
2355 * this situation, we must victimize our own cache, the MFU, for this insertion.
2358 arc_get_data_buf(arc_buf_t
*buf
)
2360 arc_state_t
*state
= buf
->b_hdr
->b_state
;
2361 uint64_t size
= buf
->b_hdr
->b_size
;
2362 arc_buf_contents_t type
= buf
->b_hdr
->b_type
;
2364 arc_adapt(size
, state
);
2367 * We have not yet reached cache maximum size,
2368 * just allocate a new buffer.
2370 if (!arc_evict_needed(type
)) {
2371 if (type
== ARC_BUFC_METADATA
) {
2372 buf
->b_data
= zio_buf_alloc(size
);
2373 arc_space_consume(size
, ARC_SPACE_DATA
);
2375 ASSERT(type
== ARC_BUFC_DATA
);
2376 buf
->b_data
= zio_data_buf_alloc(size
);
2377 ARCSTAT_INCR(arcstat_data_size
, size
);
2378 atomic_add_64(&arc_size
, size
);
2384 * If we are prefetching from the mfu ghost list, this buffer
2385 * will end up on the mru list; so steal space from there.
2387 if (state
== arc_mfu_ghost
)
2388 state
= buf
->b_hdr
->b_flags
& ARC_PREFETCH
? arc_mru
: arc_mfu
;
2389 else if (state
== arc_mru_ghost
)
2392 if (state
== arc_mru
|| state
== arc_anon
) {
2393 uint64_t mru_used
= arc_anon
->arcs_size
+ arc_mru
->arcs_size
;
2394 state
= (arc_mfu
->arcs_lsize
[type
] >= size
&&
2395 arc_p
> mru_used
) ? arc_mfu
: arc_mru
;
2398 uint64_t mfu_space
= arc_c
- arc_p
;
2399 state
= (arc_mru
->arcs_lsize
[type
] >= size
&&
2400 mfu_space
> arc_mfu
->arcs_size
) ? arc_mru
: arc_mfu
;
2402 if ((buf
->b_data
= arc_evict(state
, 0, size
, TRUE
, type
)) == NULL
) {
2403 if (type
== ARC_BUFC_METADATA
) {
2404 buf
->b_data
= zio_buf_alloc(size
);
2405 arc_space_consume(size
, ARC_SPACE_DATA
);
2407 ASSERT(type
== ARC_BUFC_DATA
);
2408 buf
->b_data
= zio_data_buf_alloc(size
);
2409 ARCSTAT_INCR(arcstat_data_size
, size
);
2410 atomic_add_64(&arc_size
, size
);
2412 ARCSTAT_BUMP(arcstat_recycle_miss
);
2414 ASSERT(buf
->b_data
!= NULL
);
2417 * Update the state size. Note that ghost states have a
2418 * "ghost size" and so don't need to be updated.
2420 if (!GHOST_STATE(buf
->b_hdr
->b_state
)) {
2421 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2423 atomic_add_64(&hdr
->b_state
->arcs_size
, size
);
2424 if (list_link_active(&hdr
->b_arc_node
)) {
2425 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
2426 atomic_add_64(&hdr
->b_state
->arcs_lsize
[type
], size
);
2429 * If we are growing the cache, and we are adding anonymous
2430 * data, and we have outgrown arc_p, update arc_p
2432 if (arc_size
< arc_c
&& hdr
->b_state
== arc_anon
&&
2433 arc_anon
->arcs_size
+ arc_mru
->arcs_size
> arc_p
)
2434 arc_p
= MIN(arc_c
, arc_p
+ size
);
2439 * This routine is called whenever a buffer is accessed.
2440 * NOTE: the hash lock is dropped in this function.
2443 arc_access(arc_buf_hdr_t
*buf
, kmutex_t
*hash_lock
)
2447 ASSERT(MUTEX_HELD(hash_lock
));
2449 if (buf
->b_state
== arc_anon
) {
2451 * This buffer is not in the cache, and does not
2452 * appear in our "ghost" list. Add the new buffer
2456 ASSERT(buf
->b_arc_access
== 0);
2457 buf
->b_arc_access
= ddi_get_lbolt();
2458 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2459 arc_change_state(arc_mru
, buf
, hash_lock
);
2461 } else if (buf
->b_state
== arc_mru
) {
2462 now
= ddi_get_lbolt();
2465 * If this buffer is here because of a prefetch, then either:
2466 * - clear the flag if this is a "referencing" read
2467 * (any subsequent access will bump this into the MFU state).
2469 * - move the buffer to the head of the list if this is
2470 * another prefetch (to make it less likely to be evicted).
2472 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2473 if (refcount_count(&buf
->b_refcnt
) == 0) {
2474 ASSERT(list_link_active(&buf
->b_arc_node
));
2476 buf
->b_flags
&= ~ARC_PREFETCH
;
2477 ARCSTAT_BUMP(arcstat_mru_hits
);
2479 buf
->b_arc_access
= now
;
2484 * This buffer has been "accessed" only once so far,
2485 * but it is still in the cache. Move it to the MFU
2488 if (now
> buf
->b_arc_access
+ ARC_MINTIME
) {
2490 * More than 125ms have passed since we
2491 * instantiated this buffer. Move it to the
2492 * most frequently used state.
2494 buf
->b_arc_access
= now
;
2495 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2496 arc_change_state(arc_mfu
, buf
, hash_lock
);
2498 ARCSTAT_BUMP(arcstat_mru_hits
);
2499 } else if (buf
->b_state
== arc_mru_ghost
) {
2500 arc_state_t
*new_state
;
2502 * This buffer has been "accessed" recently, but
2503 * was evicted from the cache. Move it to the
2507 if (buf
->b_flags
& ARC_PREFETCH
) {
2508 new_state
= arc_mru
;
2509 if (refcount_count(&buf
->b_refcnt
) > 0)
2510 buf
->b_flags
&= ~ARC_PREFETCH
;
2511 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, buf
);
2513 new_state
= arc_mfu
;
2514 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2517 buf
->b_arc_access
= ddi_get_lbolt();
2518 arc_change_state(new_state
, buf
, hash_lock
);
2520 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
2521 } else if (buf
->b_state
== arc_mfu
) {
2523 * This buffer has been accessed more than once and is
2524 * still in the cache. Keep it in the MFU state.
2526 * NOTE: an add_reference() that occurred when we did
2527 * the arc_read() will have kicked this off the list.
2528 * If it was a prefetch, we will explicitly move it to
2529 * the head of the list now.
2531 if ((buf
->b_flags
& ARC_PREFETCH
) != 0) {
2532 ASSERT(refcount_count(&buf
->b_refcnt
) == 0);
2533 ASSERT(list_link_active(&buf
->b_arc_node
));
2535 ARCSTAT_BUMP(arcstat_mfu_hits
);
2536 buf
->b_arc_access
= ddi_get_lbolt();
2537 } else if (buf
->b_state
== arc_mfu_ghost
) {
2538 arc_state_t
*new_state
= arc_mfu
;
2540 * This buffer has been accessed more than once but has
2541 * been evicted from the cache. Move it back to the
2545 if (buf
->b_flags
& ARC_PREFETCH
) {
2547 * This is a prefetch access...
2548 * move this block back to the MRU state.
2550 ASSERT3U(refcount_count(&buf
->b_refcnt
), ==, 0);
2551 new_state
= arc_mru
;
2554 buf
->b_arc_access
= ddi_get_lbolt();
2555 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2556 arc_change_state(new_state
, buf
, hash_lock
);
2558 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
2559 } else if (buf
->b_state
== arc_l2c_only
) {
2561 * This buffer is on the 2nd Level ARC.
2564 buf
->b_arc_access
= ddi_get_lbolt();
2565 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, buf
);
2566 arc_change_state(arc_mfu
, buf
, hash_lock
);
2568 ASSERT(!"invalid arc state");
2572 /* a generic arc_done_func_t which you can use */
2575 arc_bcopy_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2577 if (zio
== NULL
|| zio
->io_error
== 0)
2578 bcopy(buf
->b_data
, arg
, buf
->b_hdr
->b_size
);
2579 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2582 /* a generic arc_done_func_t */
2584 arc_getbuf_func(zio_t
*zio
, arc_buf_t
*buf
, void *arg
)
2586 arc_buf_t
**bufp
= arg
;
2587 if (zio
&& zio
->io_error
) {
2588 VERIFY(arc_buf_remove_ref(buf
, arg
) == 1);
2592 ASSERT(buf
->b_data
);
2597 arc_read_done(zio_t
*zio
)
2599 arc_buf_hdr_t
*hdr
, *found
;
2601 arc_buf_t
*abuf
; /* buffer we're assigning to callback */
2602 kmutex_t
*hash_lock
;
2603 arc_callback_t
*callback_list
, *acb
;
2604 int freeable
= FALSE
;
2606 buf
= zio
->io_private
;
2610 * The hdr was inserted into hash-table and removed from lists
2611 * prior to starting I/O. We should find this header, since
2612 * it's in the hash table, and it should be legit since it's
2613 * not possible to evict it during the I/O. The only possible
2614 * reason for it not to be found is if we were freed during the
2617 found
= buf_hash_find(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
,
2620 ASSERT((found
== NULL
&& HDR_FREED_IN_READ(hdr
) && hash_lock
== NULL
) ||
2621 (found
== hdr
&& DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
2622 (found
== hdr
&& HDR_L2_READING(hdr
)));
2624 hdr
->b_flags
&= ~ARC_L2_EVICTED
;
2625 if (l2arc_noprefetch
&& (hdr
->b_flags
& ARC_PREFETCH
))
2626 hdr
->b_flags
&= ~ARC_L2CACHE
;
2628 /* byteswap if necessary */
2629 callback_list
= hdr
->b_acb
;
2630 ASSERT(callback_list
!= NULL
);
2631 if (BP_SHOULD_BYTESWAP(zio
->io_bp
) && zio
->io_error
== 0) {
2632 arc_byteswap_func_t
*func
= BP_GET_LEVEL(zio
->io_bp
) > 0 ?
2633 byteswap_uint64_array
:
2634 dmu_ot
[BP_GET_TYPE(zio
->io_bp
)].ot_byteswap
;
2635 func(buf
->b_data
, hdr
->b_size
);
2638 arc_cksum_compute(buf
, B_FALSE
);
2640 if (hash_lock
&& zio
->io_error
== 0 && hdr
->b_state
== arc_anon
) {
2642 * Only call arc_access on anonymous buffers. This is because
2643 * if we've issued an I/O for an evicted buffer, we've already
2644 * called arc_access (to prevent any simultaneous readers from
2645 * getting confused).
2647 arc_access(hdr
, hash_lock
);
2650 /* create copies of the data buffer for the callers */
2652 for (acb
= callback_list
; acb
; acb
= acb
->acb_next
) {
2653 if (acb
->acb_done
) {
2655 abuf
= arc_buf_clone(buf
);
2656 acb
->acb_buf
= abuf
;
2661 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
2662 ASSERT(!HDR_BUF_AVAILABLE(hdr
));
2664 ASSERT(buf
->b_efunc
== NULL
);
2665 ASSERT(hdr
->b_datacnt
== 1);
2666 hdr
->b_flags
|= ARC_BUF_AVAILABLE
;
2669 ASSERT(refcount_is_zero(&hdr
->b_refcnt
) || callback_list
!= NULL
);
2671 if (zio
->io_error
!= 0) {
2672 hdr
->b_flags
|= ARC_IO_ERROR
;
2673 if (hdr
->b_state
!= arc_anon
)
2674 arc_change_state(arc_anon
, hdr
, hash_lock
);
2675 if (HDR_IN_HASH_TABLE(hdr
))
2676 buf_hash_remove(hdr
);
2677 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2681 * Broadcast before we drop the hash_lock to avoid the possibility
2682 * that the hdr (and hence the cv) might be freed before we get to
2683 * the cv_broadcast().
2685 cv_broadcast(&hdr
->b_cv
);
2688 mutex_exit(hash_lock
);
2691 * This block was freed while we waited for the read to
2692 * complete. It has been removed from the hash table and
2693 * moved to the anonymous state (so that it won't show up
2696 ASSERT3P(hdr
->b_state
, ==, arc_anon
);
2697 freeable
= refcount_is_zero(&hdr
->b_refcnt
);
2700 /* execute each callback and free its structure */
2701 while ((acb
= callback_list
) != NULL
) {
2703 acb
->acb_done(zio
, acb
->acb_buf
, acb
->acb_private
);
2705 if (acb
->acb_zio_dummy
!= NULL
) {
2706 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
2707 zio_nowait(acb
->acb_zio_dummy
);
2710 callback_list
= acb
->acb_next
;
2711 kmem_free(acb
, sizeof (arc_callback_t
));
2715 arc_hdr_destroy(hdr
);
2719 * "Read" the block block at the specified DVA (in bp) via the
2720 * cache. If the block is found in the cache, invoke the provided
2721 * callback immediately and return. Note that the `zio' parameter
2722 * in the callback will be NULL in this case, since no IO was
2723 * required. If the block is not in the cache pass the read request
2724 * on to the spa with a substitute callback function, so that the
2725 * requested block will be added to the cache.
2727 * If a read request arrives for a block that has a read in-progress,
2728 * either wait for the in-progress read to complete (and return the
2729 * results); or, if this is a read with a "done" func, add a record
2730 * to the read to invoke the "done" func when the read completes,
2731 * and return; or just return.
2733 * arc_read_done() will invoke all the requested "done" functions
2734 * for readers of this block.
2736 * Normal callers should use arc_read and pass the arc buffer and offset
2737 * for the bp. But if you know you don't need locking, you can use
2741 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
, arc_buf_t
*pbuf
,
2742 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2743 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2749 * XXX This happens from traverse callback funcs, for
2750 * the objset_phys_t block.
2752 return (arc_read_nolock(pio
, spa
, bp
, done
, private, priority
,
2753 zio_flags
, arc_flags
, zb
));
2756 ASSERT(!refcount_is_zero(&pbuf
->b_hdr
->b_refcnt
));
2757 ASSERT3U((char *)bp
- (char *)pbuf
->b_data
, <, pbuf
->b_hdr
->b_size
);
2758 rw_enter(&pbuf
->b_data_lock
, RW_READER
);
2760 err
= arc_read_nolock(pio
, spa
, bp
, done
, private, priority
,
2761 zio_flags
, arc_flags
, zb
);
2762 rw_exit(&pbuf
->b_data_lock
);
2768 arc_read_nolock(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
,
2769 arc_done_func_t
*done
, void *private, int priority
, int zio_flags
,
2770 uint32_t *arc_flags
, const zbookmark_t
*zb
)
2773 arc_buf_t
*buf
= NULL
;
2774 kmutex_t
*hash_lock
;
2776 uint64_t guid
= spa_guid(spa
);
2779 hdr
= buf_hash_find(guid
, BP_IDENTITY(bp
), BP_PHYSICAL_BIRTH(bp
),
2781 if (hdr
&& hdr
->b_datacnt
> 0) {
2783 *arc_flags
|= ARC_CACHED
;
2785 if (HDR_IO_IN_PROGRESS(hdr
)) {
2787 if (*arc_flags
& ARC_WAIT
) {
2788 cv_wait(&hdr
->b_cv
, hash_lock
);
2789 mutex_exit(hash_lock
);
2792 ASSERT(*arc_flags
& ARC_NOWAIT
);
2795 arc_callback_t
*acb
= NULL
;
2797 acb
= kmem_zalloc(sizeof (arc_callback_t
),
2799 acb
->acb_done
= done
;
2800 acb
->acb_private
= private;
2802 acb
->acb_zio_dummy
= zio_null(pio
,
2803 spa
, NULL
, NULL
, NULL
, zio_flags
);
2805 ASSERT(acb
->acb_done
!= NULL
);
2806 acb
->acb_next
= hdr
->b_acb
;
2808 add_reference(hdr
, hash_lock
, private);
2809 mutex_exit(hash_lock
);
2812 mutex_exit(hash_lock
);
2816 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
2819 add_reference(hdr
, hash_lock
, private);
2821 * If this block is already in use, create a new
2822 * copy of the data so that we will be guaranteed
2823 * that arc_release() will always succeed.
2827 ASSERT(buf
->b_data
);
2828 if (HDR_BUF_AVAILABLE(hdr
)) {
2829 ASSERT(buf
->b_efunc
== NULL
);
2830 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
2832 buf
= arc_buf_clone(buf
);
2835 } else if (*arc_flags
& ARC_PREFETCH
&&
2836 refcount_count(&hdr
->b_refcnt
) == 0) {
2837 hdr
->b_flags
|= ARC_PREFETCH
;
2839 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
2840 arc_access(hdr
, hash_lock
);
2841 if (*arc_flags
& ARC_L2CACHE
)
2842 hdr
->b_flags
|= ARC_L2CACHE
;
2843 mutex_exit(hash_lock
);
2844 ARCSTAT_BUMP(arcstat_hits
);
2845 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2846 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2847 data
, metadata
, hits
);
2850 done(NULL
, buf
, private);
2852 uint64_t size
= BP_GET_LSIZE(bp
);
2853 arc_callback_t
*acb
;
2856 boolean_t devw
= B_FALSE
;
2859 /* this block is not in the cache */
2860 arc_buf_hdr_t
*exists
;
2861 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
2862 buf
= arc_buf_alloc(spa
, size
, private, type
);
2864 hdr
->b_dva
= *BP_IDENTITY(bp
);
2865 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
2866 hdr
->b_cksum0
= bp
->blk_cksum
.zc_word
[0];
2867 exists
= buf_hash_insert(hdr
, &hash_lock
);
2869 /* somebody beat us to the hash insert */
2870 mutex_exit(hash_lock
);
2871 buf_discard_identity(hdr
);
2872 (void) arc_buf_remove_ref(buf
, private);
2873 goto top
; /* restart the IO request */
2875 /* if this is a prefetch, we don't have a reference */
2876 if (*arc_flags
& ARC_PREFETCH
) {
2877 (void) remove_reference(hdr
, hash_lock
,
2879 hdr
->b_flags
|= ARC_PREFETCH
;
2881 if (*arc_flags
& ARC_L2CACHE
)
2882 hdr
->b_flags
|= ARC_L2CACHE
;
2883 if (BP_GET_LEVEL(bp
) > 0)
2884 hdr
->b_flags
|= ARC_INDIRECT
;
2886 /* this block is in the ghost cache */
2887 ASSERT(GHOST_STATE(hdr
->b_state
));
2888 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
2889 ASSERT3U(refcount_count(&hdr
->b_refcnt
), ==, 0);
2890 ASSERT(hdr
->b_buf
== NULL
);
2892 /* if this is a prefetch, we don't have a reference */
2893 if (*arc_flags
& ARC_PREFETCH
)
2894 hdr
->b_flags
|= ARC_PREFETCH
;
2896 add_reference(hdr
, hash_lock
, private);
2897 if (*arc_flags
& ARC_L2CACHE
)
2898 hdr
->b_flags
|= ARC_L2CACHE
;
2899 buf
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
2902 buf
->b_efunc
= NULL
;
2903 buf
->b_private
= NULL
;
2906 ASSERT(hdr
->b_datacnt
== 0);
2908 arc_get_data_buf(buf
);
2909 arc_access(hdr
, hash_lock
);
2912 ASSERT(!GHOST_STATE(hdr
->b_state
));
2914 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_PUSHPAGE
);
2915 acb
->acb_done
= done
;
2916 acb
->acb_private
= private;
2918 ASSERT(hdr
->b_acb
== NULL
);
2920 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
2922 if (HDR_L2CACHE(hdr
) && hdr
->b_l2hdr
!= NULL
&&
2923 (vd
= hdr
->b_l2hdr
->b_dev
->l2ad_vdev
) != NULL
) {
2924 devw
= hdr
->b_l2hdr
->b_dev
->l2ad_writing
;
2925 addr
= hdr
->b_l2hdr
->b_daddr
;
2927 * Lock out device removal.
2929 if (vdev_is_dead(vd
) ||
2930 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
2934 mutex_exit(hash_lock
);
2936 ASSERT3U(hdr
->b_size
, ==, size
);
2937 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
, blkptr_t
*, bp
,
2938 uint64_t, size
, zbookmark_t
*, zb
);
2939 ARCSTAT_BUMP(arcstat_misses
);
2940 ARCSTAT_CONDSTAT(!(hdr
->b_flags
& ARC_PREFETCH
),
2941 demand
, prefetch
, hdr
->b_type
!= ARC_BUFC_METADATA
,
2942 data
, metadata
, misses
);
2944 if (vd
!= NULL
&& l2arc_ndev
!= 0 && !(l2arc_norw
&& devw
)) {
2946 * Read from the L2ARC if the following are true:
2947 * 1. The L2ARC vdev was previously cached.
2948 * 2. This buffer still has L2ARC metadata.
2949 * 3. This buffer isn't currently writing to the L2ARC.
2950 * 4. The L2ARC entry wasn't evicted, which may
2951 * also have invalidated the vdev.
2952 * 5. This isn't prefetch and l2arc_noprefetch is set.
2954 if (hdr
->b_l2hdr
!= NULL
&&
2955 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
2956 !(l2arc_noprefetch
&& HDR_PREFETCH(hdr
))) {
2957 l2arc_read_callback_t
*cb
;
2959 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
2960 ARCSTAT_BUMP(arcstat_l2_hits
);
2962 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
2964 cb
->l2rcb_buf
= buf
;
2965 cb
->l2rcb_spa
= spa
;
2968 cb
->l2rcb_flags
= zio_flags
;
2971 * l2arc read. The SCL_L2ARC lock will be
2972 * released by l2arc_read_done().
2974 rzio
= zio_read_phys(pio
, vd
, addr
, size
,
2975 buf
->b_data
, ZIO_CHECKSUM_OFF
,
2976 l2arc_read_done
, cb
, priority
, zio_flags
|
2977 ZIO_FLAG_DONT_CACHE
| ZIO_FLAG_CANFAIL
|
2978 ZIO_FLAG_DONT_PROPAGATE
|
2979 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
2980 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
2982 ARCSTAT_INCR(arcstat_l2_read_bytes
, size
);
2984 if (*arc_flags
& ARC_NOWAIT
) {
2989 ASSERT(*arc_flags
& ARC_WAIT
);
2990 if (zio_wait(rzio
) == 0)
2993 /* l2arc read error; goto zio_read() */
2995 DTRACE_PROBE1(l2arc__miss
,
2996 arc_buf_hdr_t
*, hdr
);
2997 ARCSTAT_BUMP(arcstat_l2_misses
);
2998 if (HDR_L2_WRITING(hdr
))
2999 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
3000 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3004 spa_config_exit(spa
, SCL_L2ARC
, vd
);
3005 if (l2arc_ndev
!= 0) {
3006 DTRACE_PROBE1(l2arc__miss
,
3007 arc_buf_hdr_t
*, hdr
);
3008 ARCSTAT_BUMP(arcstat_l2_misses
);
3012 rzio
= zio_read(pio
, spa
, bp
, buf
->b_data
, size
,
3013 arc_read_done
, buf
, priority
, zio_flags
, zb
);
3015 if (*arc_flags
& ARC_WAIT
)
3016 return (zio_wait(rzio
));
3018 ASSERT(*arc_flags
& ARC_NOWAIT
);
3025 arc_set_callback(arc_buf_t
*buf
, arc_evict_func_t
*func
, void *private)
3027 ASSERT(buf
->b_hdr
!= NULL
);
3028 ASSERT(buf
->b_hdr
->b_state
!= arc_anon
);
3029 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
) || func
== NULL
);
3030 ASSERT(buf
->b_efunc
== NULL
);
3031 ASSERT(!HDR_BUF_AVAILABLE(buf
->b_hdr
));
3033 buf
->b_efunc
= func
;
3034 buf
->b_private
= private;
3038 * This is used by the DMU to let the ARC know that a buffer is
3039 * being evicted, so the ARC should clean up. If this arc buf
3040 * is not yet in the evicted state, it will be put there.
3043 arc_buf_evict(arc_buf_t
*buf
)
3046 kmutex_t
*hash_lock
;
3049 mutex_enter(&buf
->b_evict_lock
);
3053 * We are in arc_do_user_evicts().
3055 ASSERT(buf
->b_data
== NULL
);
3056 mutex_exit(&buf
->b_evict_lock
);
3058 } else if (buf
->b_data
== NULL
) {
3059 arc_buf_t copy
= *buf
; /* structure assignment */
3061 * We are on the eviction list; process this buffer now
3062 * but let arc_do_user_evicts() do the reaping.
3064 buf
->b_efunc
= NULL
;
3065 mutex_exit(&buf
->b_evict_lock
);
3066 VERIFY(copy
.b_efunc(©
) == 0);
3069 hash_lock
= HDR_LOCK(hdr
);
3070 mutex_enter(hash_lock
);
3072 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3074 ASSERT3U(refcount_count(&hdr
->b_refcnt
), <, hdr
->b_datacnt
);
3075 ASSERT(hdr
->b_state
== arc_mru
|| hdr
->b_state
== arc_mfu
);
3078 * Pull this buffer off of the hdr
3081 while (*bufp
!= buf
)
3082 bufp
= &(*bufp
)->b_next
;
3083 *bufp
= buf
->b_next
;
3085 ASSERT(buf
->b_data
!= NULL
);
3086 arc_buf_destroy(buf
, FALSE
, FALSE
);
3088 if (hdr
->b_datacnt
== 0) {
3089 arc_state_t
*old_state
= hdr
->b_state
;
3090 arc_state_t
*evicted_state
;
3092 ASSERT(hdr
->b_buf
== NULL
);
3093 ASSERT(refcount_is_zero(&hdr
->b_refcnt
));
3096 (old_state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
;
3098 mutex_enter(&old_state
->arcs_mtx
);
3099 mutex_enter(&evicted_state
->arcs_mtx
);
3101 arc_change_state(evicted_state
, hdr
, hash_lock
);
3102 ASSERT(HDR_IN_HASH_TABLE(hdr
));
3103 hdr
->b_flags
|= ARC_IN_HASH_TABLE
;
3104 hdr
->b_flags
&= ~ARC_BUF_AVAILABLE
;
3106 mutex_exit(&evicted_state
->arcs_mtx
);
3107 mutex_exit(&old_state
->arcs_mtx
);
3109 mutex_exit(hash_lock
);
3110 mutex_exit(&buf
->b_evict_lock
);
3112 VERIFY(buf
->b_efunc(buf
) == 0);
3113 buf
->b_efunc
= NULL
;
3114 buf
->b_private
= NULL
;
3117 kmem_cache_free(buf_cache
, buf
);
3122 * Release this buffer from the cache. This must be done
3123 * after a read and prior to modifying the buffer contents.
3124 * If the buffer has more than one reference, we must make
3125 * a new hdr for the buffer.
3128 arc_release(arc_buf_t
*buf
, void *tag
)
3131 kmutex_t
*hash_lock
= NULL
;
3132 l2arc_buf_hdr_t
*l2hdr
;
3133 uint64_t buf_size
= 0;
3136 * It would be nice to assert that if it's DMU metadata (level >
3137 * 0 || it's the dnode file), then it must be syncing context.
3138 * But we don't know that information at this level.
3141 mutex_enter(&buf
->b_evict_lock
);
3144 /* this buffer is not on any list */
3145 ASSERT(refcount_count(&hdr
->b_refcnt
) > 0);
3147 if (hdr
->b_state
== arc_anon
) {
3148 /* this buffer is already released */
3149 ASSERT(buf
->b_efunc
== NULL
);
3151 hash_lock
= HDR_LOCK(hdr
);
3152 mutex_enter(hash_lock
);
3154 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3157 l2hdr
= hdr
->b_l2hdr
;
3159 mutex_enter(&l2arc_buflist_mtx
);
3160 hdr
->b_l2hdr
= NULL
;
3161 buf_size
= hdr
->b_size
;
3165 * Do we have more than one buf?
3167 if (hdr
->b_datacnt
> 1) {
3168 arc_buf_hdr_t
*nhdr
;
3170 uint64_t blksz
= hdr
->b_size
;
3171 uint64_t spa
= hdr
->b_spa
;
3172 arc_buf_contents_t type
= hdr
->b_type
;
3173 uint32_t flags
= hdr
->b_flags
;
3175 ASSERT(hdr
->b_buf
!= buf
|| buf
->b_next
!= NULL
);
3177 * Pull the data off of this hdr and attach it to
3178 * a new anonymous hdr.
3180 (void) remove_reference(hdr
, hash_lock
, tag
);
3182 while (*bufp
!= buf
)
3183 bufp
= &(*bufp
)->b_next
;
3184 *bufp
= buf
->b_next
;
3187 ASSERT3U(hdr
->b_state
->arcs_size
, >=, hdr
->b_size
);
3188 atomic_add_64(&hdr
->b_state
->arcs_size
, -hdr
->b_size
);
3189 if (refcount_is_zero(&hdr
->b_refcnt
)) {
3190 uint64_t *size
= &hdr
->b_state
->arcs_lsize
[hdr
->b_type
];
3191 ASSERT3U(*size
, >=, hdr
->b_size
);
3192 atomic_add_64(size
, -hdr
->b_size
);
3194 hdr
->b_datacnt
-= 1;
3195 arc_cksum_verify(buf
);
3197 mutex_exit(hash_lock
);
3199 nhdr
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
3200 nhdr
->b_size
= blksz
;
3202 nhdr
->b_type
= type
;
3204 nhdr
->b_state
= arc_anon
;
3205 nhdr
->b_arc_access
= 0;
3206 nhdr
->b_flags
= flags
& ARC_L2_WRITING
;
3207 nhdr
->b_l2hdr
= NULL
;
3208 nhdr
->b_datacnt
= 1;
3209 nhdr
->b_freeze_cksum
= NULL
;
3210 (void) refcount_add(&nhdr
->b_refcnt
, tag
);
3212 mutex_exit(&buf
->b_evict_lock
);
3213 atomic_add_64(&arc_anon
->arcs_size
, blksz
);
3215 mutex_exit(&buf
->b_evict_lock
);
3216 ASSERT(refcount_count(&hdr
->b_refcnt
) == 1);
3217 ASSERT(!list_link_active(&hdr
->b_arc_node
));
3218 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3219 if (hdr
->b_state
!= arc_anon
)
3220 arc_change_state(arc_anon
, hdr
, hash_lock
);
3221 hdr
->b_arc_access
= 0;
3223 mutex_exit(hash_lock
);
3225 buf_discard_identity(hdr
);
3228 buf
->b_efunc
= NULL
;
3229 buf
->b_private
= NULL
;
3232 list_remove(l2hdr
->b_dev
->l2ad_buflist
, hdr
);
3233 kmem_free(l2hdr
, sizeof (l2arc_buf_hdr_t
));
3234 ARCSTAT_INCR(arcstat_l2_size
, -buf_size
);
3235 mutex_exit(&l2arc_buflist_mtx
);
3240 * Release this buffer. If it does not match the provided BP, fill it
3241 * with that block's contents.
3245 arc_release_bp(arc_buf_t
*buf
, void *tag
, blkptr_t
*bp
, spa_t
*spa
,
3248 arc_release(buf
, tag
);
3253 arc_released(arc_buf_t
*buf
)
3257 mutex_enter(&buf
->b_evict_lock
);
3258 released
= (buf
->b_data
!= NULL
&& buf
->b_hdr
->b_state
== arc_anon
);
3259 mutex_exit(&buf
->b_evict_lock
);
3264 arc_has_callback(arc_buf_t
*buf
)
3268 mutex_enter(&buf
->b_evict_lock
);
3269 callback
= (buf
->b_efunc
!= NULL
);
3270 mutex_exit(&buf
->b_evict_lock
);
3276 arc_referenced(arc_buf_t
*buf
)
3280 mutex_enter(&buf
->b_evict_lock
);
3281 referenced
= (refcount_count(&buf
->b_hdr
->b_refcnt
));
3282 mutex_exit(&buf
->b_evict_lock
);
3283 return (referenced
);
3288 arc_write_ready(zio_t
*zio
)
3290 arc_write_callback_t
*callback
= zio
->io_private
;
3291 arc_buf_t
*buf
= callback
->awcb_buf
;
3292 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3294 ASSERT(!refcount_is_zero(&buf
->b_hdr
->b_refcnt
));
3295 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
3298 * If the IO is already in progress, then this is a re-write
3299 * attempt, so we need to thaw and re-compute the cksum.
3300 * It is the responsibility of the callback to handle the
3301 * accounting for any re-write attempt.
3303 if (HDR_IO_IN_PROGRESS(hdr
)) {
3304 mutex_enter(&hdr
->b_freeze_lock
);
3305 if (hdr
->b_freeze_cksum
!= NULL
) {
3306 kmem_free(hdr
->b_freeze_cksum
, sizeof (zio_cksum_t
));
3307 hdr
->b_freeze_cksum
= NULL
;
3309 mutex_exit(&hdr
->b_freeze_lock
);
3311 arc_cksum_compute(buf
, B_FALSE
);
3312 hdr
->b_flags
|= ARC_IO_IN_PROGRESS
;
3316 arc_write_done(zio_t
*zio
)
3318 arc_write_callback_t
*callback
= zio
->io_private
;
3319 arc_buf_t
*buf
= callback
->awcb_buf
;
3320 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3322 ASSERT(hdr
->b_acb
== NULL
);
3324 if (zio
->io_error
== 0) {
3325 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
3326 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
3327 hdr
->b_cksum0
= zio
->io_bp
->blk_cksum
.zc_word
[0];
3329 ASSERT(BUF_EMPTY(hdr
));
3333 * If the block to be written was all-zero, we may have
3334 * compressed it away. In this case no write was performed
3335 * so there will be no dva/birth/checksum. The buffer must
3336 * therefore remain anonymous (and uncached).
3338 if (!BUF_EMPTY(hdr
)) {
3339 arc_buf_hdr_t
*exists
;
3340 kmutex_t
*hash_lock
;
3342 ASSERT(zio
->io_error
== 0);
3344 arc_cksum_verify(buf
);
3346 exists
= buf_hash_insert(hdr
, &hash_lock
);
3349 * This can only happen if we overwrite for
3350 * sync-to-convergence, because we remove
3351 * buffers from the hash table when we arc_free().
3353 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
3354 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
3355 panic("bad overwrite, hdr=%p exists=%p",
3356 (void *)hdr
, (void *)exists
);
3357 ASSERT(refcount_is_zero(&exists
->b_refcnt
));
3358 arc_change_state(arc_anon
, exists
, hash_lock
);
3359 mutex_exit(hash_lock
);
3360 arc_hdr_destroy(exists
);
3361 exists
= buf_hash_insert(hdr
, &hash_lock
);
3362 ASSERT3P(exists
, ==, NULL
);
3365 ASSERT(hdr
->b_datacnt
== 1);
3366 ASSERT(hdr
->b_state
== arc_anon
);
3367 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
3368 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
3371 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3372 /* if it's not anon, we are doing a scrub */
3373 if (!exists
&& hdr
->b_state
== arc_anon
)
3374 arc_access(hdr
, hash_lock
);
3375 mutex_exit(hash_lock
);
3377 hdr
->b_flags
&= ~ARC_IO_IN_PROGRESS
;
3380 ASSERT(!refcount_is_zero(&hdr
->b_refcnt
));
3381 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
3383 kmem_free(callback
, sizeof (arc_write_callback_t
));
3387 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
,
3388 blkptr_t
*bp
, arc_buf_t
*buf
, boolean_t l2arc
, const zio_prop_t
*zp
,
3389 arc_done_func_t
*ready
, arc_done_func_t
*done
, void *private,
3390 int priority
, int zio_flags
, const zbookmark_t
*zb
)
3392 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3393 arc_write_callback_t
*callback
;
3396 ASSERT(ready
!= NULL
);
3397 ASSERT(done
!= NULL
);
3398 ASSERT(!HDR_IO_ERROR(hdr
));
3399 ASSERT((hdr
->b_flags
& ARC_IO_IN_PROGRESS
) == 0);
3400 ASSERT(hdr
->b_acb
== NULL
);
3402 hdr
->b_flags
|= ARC_L2CACHE
;
3403 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
3404 callback
->awcb_ready
= ready
;
3405 callback
->awcb_done
= done
;
3406 callback
->awcb_private
= private;
3407 callback
->awcb_buf
= buf
;
3409 zio
= zio_write(pio
, spa
, txg
, bp
, buf
->b_data
, hdr
->b_size
, zp
,
3410 arc_write_ready
, arc_write_done
, callback
, priority
, zio_flags
, zb
);
3416 arc_memory_throttle(uint64_t reserve
, uint64_t inflight_data
, uint64_t txg
)
3419 uint64_t available_memory
= ptob(freemem
);
3420 static uint64_t page_load
= 0;
3421 static uint64_t last_txg
= 0;
3425 MIN(available_memory
, vmem_size(heap_arena
, VMEM_FREE
));
3427 if (available_memory
>= zfs_write_limit_max
)
3430 if (txg
> last_txg
) {
3435 * If we are in pageout, we know that memory is already tight,
3436 * the arc is already going to be evicting, so we just want to
3437 * continue to let page writes occur as quickly as possible.
3439 if (curproc
== proc_pageout
) {
3440 if (page_load
> MAX(ptob(minfree
), available_memory
) / 4)
3442 /* Note: reserve is inflated, so we deflate */
3443 page_load
+= reserve
/ 8;
3445 } else if (page_load
> 0 && arc_reclaim_needed()) {
3446 /* memory is low, delay before restarting */
3447 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3452 if (arc_size
> arc_c_min
) {
3453 uint64_t evictable_memory
=
3454 arc_mru
->arcs_lsize
[ARC_BUFC_DATA
] +
3455 arc_mru
->arcs_lsize
[ARC_BUFC_METADATA
] +
3456 arc_mfu
->arcs_lsize
[ARC_BUFC_DATA
] +
3457 arc_mfu
->arcs_lsize
[ARC_BUFC_METADATA
];
3458 available_memory
+= MIN(evictable_memory
, arc_size
- arc_c_min
);
3461 if (inflight_data
> available_memory
/ 4) {
3462 ARCSTAT_INCR(arcstat_memory_throttle_count
, 1);
3470 arc_tempreserve_clear(uint64_t reserve
)
3472 atomic_add_64(&arc_tempreserve
, -reserve
);
3473 ASSERT((int64_t)arc_tempreserve
>= 0);
3477 arc_tempreserve_space(uint64_t reserve
, uint64_t txg
)
3484 * Once in a while, fail for no reason. Everything should cope.
3486 if (spa_get_random(10000) == 0) {
3487 dprintf("forcing random failure\n");
3491 if (reserve
> arc_c
/4 && !arc_no_grow
)
3492 arc_c
= MIN(arc_c_max
, reserve
* 4);
3493 if (reserve
> arc_c
)
3497 * Don't count loaned bufs as in flight dirty data to prevent long
3498 * network delays from blocking transactions that are ready to be
3499 * assigned to a txg.
3501 anon_size
= MAX((int64_t)(arc_anon
->arcs_size
- arc_loaned_bytes
), 0);
3504 * Writes will, almost always, require additional memory allocations
3505 * in order to compress/encrypt/etc the data. We therefor need to
3506 * make sure that there is sufficient available memory for this.
3508 if ((error
= arc_memory_throttle(reserve
, anon_size
, txg
)))
3512 * Throttle writes when the amount of dirty data in the cache
3513 * gets too large. We try to keep the cache less than half full
3514 * of dirty blocks so that our sync times don't grow too large.
3515 * Note: if two requests come in concurrently, we might let them
3516 * both succeed, when one of them should fail. Not a huge deal.
3519 if (reserve
+ arc_tempreserve
+ anon_size
> arc_c
/ 2 &&
3520 anon_size
> arc_c
/ 4) {
3521 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3522 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3523 arc_tempreserve
>>10,
3524 arc_anon
->arcs_lsize
[ARC_BUFC_METADATA
]>>10,
3525 arc_anon
->arcs_lsize
[ARC_BUFC_DATA
]>>10,
3526 reserve
>>10, arc_c
>>10);
3529 atomic_add_64(&arc_tempreserve
, reserve
);
3536 mutex_init(&arc_reclaim_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3537 cv_init(&arc_reclaim_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
3539 /* Convert seconds to clock ticks */
3540 arc_min_prefetch_lifespan
= 1 * hz
;
3542 /* Start out with 1/8 of all memory */
3543 arc_c
= physmem
* PAGESIZE
/ 8;
3547 * On architectures where the physical memory can be larger
3548 * than the addressable space (intel in 32-bit mode), we may
3549 * need to limit the cache to 1/8 of VM size.
3551 arc_c
= MIN(arc_c
, vmem_size(heap_arena
, VMEM_ALLOC
| VMEM_FREE
) / 8);
3553 * Register a shrinker to support synchronous (direct) memory
3554 * reclaim from the arc. This is done to prevent kswapd from
3555 * swapping out pages when it is preferable to shrink the arc.
3557 spl_register_shrinker(&arc_shrinker
);
3560 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3561 arc_c_min
= MAX(arc_c
/ 4, 64<<20);
3562 /* set max to 1/2 of all memory, or all but 4GB, whichever is more */
3563 if (arc_c
* 8 >= ((uint64_t)4<<30))
3564 arc_c_max
= (arc_c
* 8) - ((uint64_t)4<<30);
3566 arc_c_max
= arc_c_min
;
3567 arc_c_max
= MAX(arc_c
* 4, arc_c_max
);
3570 * Allow the tunables to override our calculations if they are
3571 * reasonable (ie. over 64MB)
3573 if (zfs_arc_max
> 64<<20 && zfs_arc_max
< physmem
* PAGESIZE
)
3574 arc_c_max
= zfs_arc_max
;
3575 if (zfs_arc_min
> 64<<20 && zfs_arc_min
<= arc_c_max
)
3576 arc_c_min
= zfs_arc_min
;
3579 arc_p
= (arc_c
>> 1);
3581 /* limit meta-data to 1/4 of the arc capacity */
3582 arc_meta_limit
= arc_c_max
/ 4;
3585 /* Allow the tunable to override if it is reasonable */
3586 if (zfs_arc_meta_limit
> 0 && zfs_arc_meta_limit
<= arc_c_max
)
3587 arc_meta_limit
= zfs_arc_meta_limit
;
3589 if (arc_c_min
< arc_meta_limit
/ 2 && zfs_arc_min
== 0)
3590 arc_c_min
= arc_meta_limit
/ 2;
3592 if (zfs_arc_grow_retry
> 0)
3593 arc_grow_retry
= zfs_arc_grow_retry
;
3595 if (zfs_arc_shrink_shift
> 0)
3596 arc_shrink_shift
= zfs_arc_shrink_shift
;
3598 if (zfs_arc_p_min_shift
> 0)
3599 arc_p_min_shift
= zfs_arc_p_min_shift
;
3601 if (zfs_arc_reduce_dnlc_percent
> 0)
3602 arc_reduce_dnlc_percent
= zfs_arc_reduce_dnlc_percent
;
3604 /* if kmem_flags are set, lets try to use less memory */
3605 if (kmem_debugging())
3607 if (arc_c
< arc_c_min
)
3610 arc_anon
= &ARC_anon
;
3612 arc_mru_ghost
= &ARC_mru_ghost
;
3614 arc_mfu_ghost
= &ARC_mfu_ghost
;
3615 arc_l2c_only
= &ARC_l2c_only
;
3618 mutex_init(&arc_anon
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3619 mutex_init(&arc_mru
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3620 mutex_init(&arc_mru_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3621 mutex_init(&arc_mfu
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3622 mutex_init(&arc_mfu_ghost
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3623 mutex_init(&arc_l2c_only
->arcs_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3625 list_create(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
3626 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3627 list_create(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
3628 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3629 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3630 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3631 list_create(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
3632 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3633 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
3634 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3635 list_create(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
3636 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3637 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
3638 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3639 list_create(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
3640 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3641 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
3642 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3643 list_create(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
3644 sizeof (arc_buf_hdr_t
), offsetof(arc_buf_hdr_t
, b_arc_node
));
3648 arc_thread_exit
= 0;
3649 arc_eviction_list
= NULL
;
3650 mutex_init(&arc_eviction_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
3651 bzero(&arc_eviction_hdr
, sizeof (arc_buf_hdr_t
));
3653 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
3654 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
3656 if (arc_ksp
!= NULL
) {
3657 arc_ksp
->ks_data
= &arc_stats
;
3658 kstat_install(arc_ksp
);
3661 (void) thread_create(NULL
, 0, arc_reclaim_thread
, NULL
, 0, &p0
,
3662 TS_RUN
, minclsyspri
);
3667 if (zfs_write_limit_max
== 0)
3668 zfs_write_limit_max
= ptob(physmem
) >> zfs_write_limit_shift
;
3670 zfs_write_limit_shift
= 0;
3671 mutex_init(&zfs_write_limit_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
3677 mutex_enter(&arc_reclaim_thr_lock
);
3679 spl_unregister_shrinker(&arc_shrinker
);
3680 #endif /* _KERNEL */
3682 arc_thread_exit
= 1;
3683 while (arc_thread_exit
!= 0)
3684 cv_wait(&arc_reclaim_thr_cv
, &arc_reclaim_thr_lock
);
3685 mutex_exit(&arc_reclaim_thr_lock
);
3691 if (arc_ksp
!= NULL
) {
3692 kstat_delete(arc_ksp
);
3696 mutex_destroy(&arc_eviction_mtx
);
3697 mutex_destroy(&arc_reclaim_thr_lock
);
3698 cv_destroy(&arc_reclaim_thr_cv
);
3700 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
3701 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3702 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
3703 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
3704 list_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
3705 list_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3706 list_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
3707 list_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
3709 mutex_destroy(&arc_anon
->arcs_mtx
);
3710 mutex_destroy(&arc_mru
->arcs_mtx
);
3711 mutex_destroy(&arc_mru_ghost
->arcs_mtx
);
3712 mutex_destroy(&arc_mfu
->arcs_mtx
);
3713 mutex_destroy(&arc_mfu_ghost
->arcs_mtx
);
3714 mutex_destroy(&arc_l2c_only
->arcs_mtx
);
3716 mutex_destroy(&zfs_write_limit_lock
);
3720 ASSERT(arc_loaned_bytes
== 0);
3726 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3727 * It uses dedicated storage devices to hold cached data, which are populated
3728 * using large infrequent writes. The main role of this cache is to boost
3729 * the performance of random read workloads. The intended L2ARC devices
3730 * include short-stroked disks, solid state disks, and other media with
3731 * substantially faster read latency than disk.
3733 * +-----------------------+
3735 * +-----------------------+
3738 * l2arc_feed_thread() arc_read()
3742 * +---------------+ |
3744 * +---------------+ |
3749 * +-------+ +-------+
3751 * | cache | | cache |
3752 * +-------+ +-------+
3753 * +=========+ .-----.
3754 * : L2ARC : |-_____-|
3755 * : devices : | Disks |
3756 * +=========+ `-_____-'
3758 * Read requests are satisfied from the following sources, in order:
3761 * 2) vdev cache of L2ARC devices
3763 * 4) vdev cache of disks
3766 * Some L2ARC device types exhibit extremely slow write performance.
3767 * To accommodate for this there are some significant differences between
3768 * the L2ARC and traditional cache design:
3770 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3771 * the ARC behave as usual, freeing buffers and placing headers on ghost
3772 * lists. The ARC does not send buffers to the L2ARC during eviction as
3773 * this would add inflated write latencies for all ARC memory pressure.
3775 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3776 * It does this by periodically scanning buffers from the eviction-end of
3777 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3778 * not already there. It scans until a headroom of buffers is satisfied,
3779 * which itself is a buffer for ARC eviction. The thread that does this is
3780 * l2arc_feed_thread(), illustrated below; example sizes are included to
3781 * provide a better sense of ratio than this diagram:
3784 * +---------------------+----------+
3785 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3786 * +---------------------+----------+ | o L2ARC eligible
3787 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3788 * +---------------------+----------+ |
3789 * 15.9 Gbytes ^ 32 Mbytes |
3791 * l2arc_feed_thread()
3793 * l2arc write hand <--[oooo]--'
3797 * +==============================+
3798 * L2ARC dev |####|#|###|###| |####| ... |
3799 * +==============================+
3802 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3803 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3804 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3805 * safe to say that this is an uncommon case, since buffers at the end of
3806 * the ARC lists have moved there due to inactivity.
3808 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3809 * then the L2ARC simply misses copying some buffers. This serves as a
3810 * pressure valve to prevent heavy read workloads from both stalling the ARC
3811 * with waits and clogging the L2ARC with writes. This also helps prevent
3812 * the potential for the L2ARC to churn if it attempts to cache content too
3813 * quickly, such as during backups of the entire pool.
3815 * 5. After system boot and before the ARC has filled main memory, there are
3816 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3817 * lists can remain mostly static. Instead of searching from tail of these
3818 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3819 * for eligible buffers, greatly increasing its chance of finding them.
3821 * The L2ARC device write speed is also boosted during this time so that
3822 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3823 * there are no L2ARC reads, and no fear of degrading read performance
3824 * through increased writes.
3826 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3827 * the vdev queue can aggregate them into larger and fewer writes. Each
3828 * device is written to in a rotor fashion, sweeping writes through
3829 * available space then repeating.
3831 * 7. The L2ARC does not store dirty content. It never needs to flush
3832 * write buffers back to disk based storage.
3834 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3835 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3837 * The performance of the L2ARC can be tweaked by a number of tunables, which
3838 * may be necessary for different workloads:
3840 * l2arc_write_max max write bytes per interval
3841 * l2arc_write_boost extra write bytes during device warmup
3842 * l2arc_noprefetch skip caching prefetched buffers
3843 * l2arc_headroom number of max device writes to precache
3844 * l2arc_feed_secs seconds between L2ARC writing
3846 * Tunables may be removed or added as future performance improvements are
3847 * integrated, and also may become zpool properties.
3849 * There are three key functions that control how the L2ARC warms up:
3851 * l2arc_write_eligible() check if a buffer is eligible to cache
3852 * l2arc_write_size() calculate how much to write
3853 * l2arc_write_interval() calculate sleep delay between writes
3855 * These three functions determine what to write, how much, and how quickly
3860 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*ab
)
3863 * A buffer is *not* eligible for the L2ARC if it:
3864 * 1. belongs to a different spa.
3865 * 2. is already cached on the L2ARC.
3866 * 3. has an I/O in progress (it may be an incomplete read).
3867 * 4. is flagged not eligible (zfs property).
3869 if (ab
->b_spa
!= spa_guid
|| ab
->b_l2hdr
!= NULL
||
3870 HDR_IO_IN_PROGRESS(ab
) || !HDR_L2CACHE(ab
))
3877 l2arc_write_size(l2arc_dev_t
*dev
)
3881 size
= dev
->l2ad_write
;
3883 if (arc_warm
== B_FALSE
)
3884 size
+= dev
->l2ad_boost
;
3891 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
3893 clock_t interval
, next
, now
;
3896 * If the ARC lists are busy, increase our write rate; if the
3897 * lists are stale, idle back. This is achieved by checking
3898 * how much we previously wrote - if it was more than half of
3899 * what we wanted, schedule the next write much sooner.
3901 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
3902 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
3904 interval
= hz
* l2arc_feed_secs
;
3906 now
= ddi_get_lbolt();
3907 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
3913 l2arc_hdr_stat_add(void)
3915 ARCSTAT_INCR(arcstat_l2_hdr_size
, HDR_SIZE
+ L2HDR_SIZE
);
3916 ARCSTAT_INCR(arcstat_hdr_size
, -HDR_SIZE
);
3920 l2arc_hdr_stat_remove(void)
3922 ARCSTAT_INCR(arcstat_l2_hdr_size
, -(HDR_SIZE
+ L2HDR_SIZE
));
3923 ARCSTAT_INCR(arcstat_hdr_size
, HDR_SIZE
);
3927 * Cycle through L2ARC devices. This is how L2ARC load balances.
3928 * If a device is returned, this also returns holding the spa config lock.
3930 static l2arc_dev_t
*
3931 l2arc_dev_get_next(void)
3933 l2arc_dev_t
*first
, *next
= NULL
;
3936 * Lock out the removal of spas (spa_namespace_lock), then removal
3937 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3938 * both locks will be dropped and a spa config lock held instead.
3940 mutex_enter(&spa_namespace_lock
);
3941 mutex_enter(&l2arc_dev_mtx
);
3943 /* if there are no vdevs, there is nothing to do */
3944 if (l2arc_ndev
== 0)
3948 next
= l2arc_dev_last
;
3950 /* loop around the list looking for a non-faulted vdev */
3952 next
= list_head(l2arc_dev_list
);
3954 next
= list_next(l2arc_dev_list
, next
);
3956 next
= list_head(l2arc_dev_list
);
3959 /* if we have come back to the start, bail out */
3962 else if (next
== first
)
3965 } while (vdev_is_dead(next
->l2ad_vdev
));
3967 /* if we were unable to find any usable vdevs, return NULL */
3968 if (vdev_is_dead(next
->l2ad_vdev
))
3971 l2arc_dev_last
= next
;
3974 mutex_exit(&l2arc_dev_mtx
);
3977 * Grab the config lock to prevent the 'next' device from being
3978 * removed while we are writing to it.
3981 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
3982 mutex_exit(&spa_namespace_lock
);
3988 * Free buffers that were tagged for destruction.
3991 l2arc_do_free_on_write(void)
3994 l2arc_data_free_t
*df
, *df_prev
;
3996 mutex_enter(&l2arc_free_on_write_mtx
);
3997 buflist
= l2arc_free_on_write
;
3999 for (df
= list_tail(buflist
); df
; df
= df_prev
) {
4000 df_prev
= list_prev(buflist
, df
);
4001 ASSERT(df
->l2df_data
!= NULL
);
4002 ASSERT(df
->l2df_func
!= NULL
);
4003 df
->l2df_func(df
->l2df_data
, df
->l2df_size
);
4004 list_remove(buflist
, df
);
4005 kmem_free(df
, sizeof (l2arc_data_free_t
));
4008 mutex_exit(&l2arc_free_on_write_mtx
);
4012 * A write to a cache device has completed. Update all headers to allow
4013 * reads from these buffers to begin.
4016 l2arc_write_done(zio_t
*zio
)
4018 l2arc_write_callback_t
*cb
;
4021 arc_buf_hdr_t
*head
, *ab
, *ab_prev
;
4022 l2arc_buf_hdr_t
*abl2
;
4023 kmutex_t
*hash_lock
;
4025 cb
= zio
->io_private
;
4027 dev
= cb
->l2wcb_dev
;
4028 ASSERT(dev
!= NULL
);
4029 head
= cb
->l2wcb_head
;
4030 ASSERT(head
!= NULL
);
4031 buflist
= dev
->l2ad_buflist
;
4032 ASSERT(buflist
!= NULL
);
4033 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
4034 l2arc_write_callback_t
*, cb
);
4036 if (zio
->io_error
!= 0)
4037 ARCSTAT_BUMP(arcstat_l2_writes_error
);
4039 mutex_enter(&l2arc_buflist_mtx
);
4042 * All writes completed, or an error was hit.
4044 for (ab
= list_prev(buflist
, head
); ab
; ab
= ab_prev
) {
4045 ab_prev
= list_prev(buflist
, ab
);
4047 hash_lock
= HDR_LOCK(ab
);
4048 if (!mutex_tryenter(hash_lock
)) {
4050 * This buffer misses out. It may be in a stage
4051 * of eviction. Its ARC_L2_WRITING flag will be
4052 * left set, denying reads to this buffer.
4054 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss
);
4058 if (zio
->io_error
!= 0) {
4060 * Error - drop L2ARC entry.
4062 list_remove(buflist
, ab
);
4065 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4066 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4070 * Allow ARC to begin reads to this L2ARC entry.
4072 ab
->b_flags
&= ~ARC_L2_WRITING
;
4074 mutex_exit(hash_lock
);
4077 atomic_inc_64(&l2arc_writes_done
);
4078 list_remove(buflist
, head
);
4079 kmem_cache_free(hdr_cache
, head
);
4080 mutex_exit(&l2arc_buflist_mtx
);
4082 l2arc_do_free_on_write();
4084 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
4088 * A read to a cache device completed. Validate buffer contents before
4089 * handing over to the regular ARC routines.
4092 l2arc_read_done(zio_t
*zio
)
4094 l2arc_read_callback_t
*cb
;
4097 kmutex_t
*hash_lock
;
4100 ASSERT(zio
->io_vd
!= NULL
);
4101 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
4103 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
4105 cb
= zio
->io_private
;
4107 buf
= cb
->l2rcb_buf
;
4108 ASSERT(buf
!= NULL
);
4110 hash_lock
= HDR_LOCK(buf
->b_hdr
);
4111 mutex_enter(hash_lock
);
4113 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
4116 * Check this survived the L2ARC journey.
4118 equal
= arc_cksum_equal(buf
);
4119 if (equal
&& zio
->io_error
== 0 && !HDR_L2_EVICTED(hdr
)) {
4120 mutex_exit(hash_lock
);
4121 zio
->io_private
= buf
;
4122 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
4123 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
4126 mutex_exit(hash_lock
);
4128 * Buffer didn't survive caching. Increment stats and
4129 * reissue to the original storage device.
4131 if (zio
->io_error
!= 0) {
4132 ARCSTAT_BUMP(arcstat_l2_io_error
);
4134 zio
->io_error
= EIO
;
4137 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
4140 * If there's no waiter, issue an async i/o to the primary
4141 * storage now. If there *is* a waiter, the caller must
4142 * issue the i/o in a context where it's OK to block.
4144 if (zio
->io_waiter
== NULL
) {
4145 zio_t
*pio
= zio_unique_parent(zio
);
4147 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
4149 zio_nowait(zio_read(pio
, cb
->l2rcb_spa
, &cb
->l2rcb_bp
,
4150 buf
->b_data
, zio
->io_size
, arc_read_done
, buf
,
4151 zio
->io_priority
, cb
->l2rcb_flags
, &cb
->l2rcb_zb
));
4155 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
4159 * This is the list priority from which the L2ARC will search for pages to
4160 * cache. This is used within loops (0..3) to cycle through lists in the
4161 * desired order. This order can have a significant effect on cache
4164 * Currently the metadata lists are hit first, MFU then MRU, followed by
4165 * the data lists. This function returns a locked list, and also returns
4169 l2arc_list_locked(int list_num
, kmutex_t
**lock
)
4171 list_t
*list
= NULL
;
4173 ASSERT(list_num
>= 0 && list_num
<= 3);
4177 list
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
4178 *lock
= &arc_mfu
->arcs_mtx
;
4181 list
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
4182 *lock
= &arc_mru
->arcs_mtx
;
4185 list
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
4186 *lock
= &arc_mfu
->arcs_mtx
;
4189 list
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
4190 *lock
= &arc_mru
->arcs_mtx
;
4194 ASSERT(!(MUTEX_HELD(*lock
)));
4200 * Evict buffers from the device write hand to the distance specified in
4201 * bytes. This distance may span populated buffers, it may span nothing.
4202 * This is clearing a region on the L2ARC device ready for writing.
4203 * If the 'all' boolean is set, every buffer is evicted.
4206 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
4209 l2arc_buf_hdr_t
*abl2
;
4210 arc_buf_hdr_t
*ab
, *ab_prev
;
4211 kmutex_t
*hash_lock
;
4214 buflist
= dev
->l2ad_buflist
;
4216 if (buflist
== NULL
)
4219 if (!all
&& dev
->l2ad_first
) {
4221 * This is the first sweep through the device. There is
4227 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- (2 * distance
))) {
4229 * When nearing the end of the device, evict to the end
4230 * before the device write hand jumps to the start.
4232 taddr
= dev
->l2ad_end
;
4234 taddr
= dev
->l2ad_hand
+ distance
;
4236 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
4237 uint64_t, taddr
, boolean_t
, all
);
4240 mutex_enter(&l2arc_buflist_mtx
);
4241 for (ab
= list_tail(buflist
); ab
; ab
= ab_prev
) {
4242 ab_prev
= list_prev(buflist
, ab
);
4244 hash_lock
= HDR_LOCK(ab
);
4245 if (!mutex_tryenter(hash_lock
)) {
4247 * Missed the hash lock. Retry.
4249 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
4250 mutex_exit(&l2arc_buflist_mtx
);
4251 mutex_enter(hash_lock
);
4252 mutex_exit(hash_lock
);
4256 if (HDR_L2_WRITE_HEAD(ab
)) {
4258 * We hit a write head node. Leave it for
4259 * l2arc_write_done().
4261 list_remove(buflist
, ab
);
4262 mutex_exit(hash_lock
);
4266 if (!all
&& ab
->b_l2hdr
!= NULL
&&
4267 (ab
->b_l2hdr
->b_daddr
> taddr
||
4268 ab
->b_l2hdr
->b_daddr
< dev
->l2ad_hand
)) {
4270 * We've evicted to the target address,
4271 * or the end of the device.
4273 mutex_exit(hash_lock
);
4277 if (HDR_FREE_IN_PROGRESS(ab
)) {
4279 * Already on the path to destruction.
4281 mutex_exit(hash_lock
);
4285 if (ab
->b_state
== arc_l2c_only
) {
4286 ASSERT(!HDR_L2_READING(ab
));
4288 * This doesn't exist in the ARC. Destroy.
4289 * arc_hdr_destroy() will call list_remove()
4290 * and decrement arcstat_l2_size.
4292 arc_change_state(arc_anon
, ab
, hash_lock
);
4293 arc_hdr_destroy(ab
);
4296 * Invalidate issued or about to be issued
4297 * reads, since we may be about to write
4298 * over this location.
4300 if (HDR_L2_READING(ab
)) {
4301 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
4302 ab
->b_flags
|= ARC_L2_EVICTED
;
4306 * Tell ARC this no longer exists in L2ARC.
4308 if (ab
->b_l2hdr
!= NULL
) {
4311 kmem_free(abl2
, sizeof (l2arc_buf_hdr_t
));
4312 ARCSTAT_INCR(arcstat_l2_size
, -ab
->b_size
);
4314 list_remove(buflist
, ab
);
4317 * This may have been leftover after a
4320 ab
->b_flags
&= ~ARC_L2_WRITING
;
4322 mutex_exit(hash_lock
);
4324 mutex_exit(&l2arc_buflist_mtx
);
4326 vdev_space_update(dev
->l2ad_vdev
, -(taddr
- dev
->l2ad_evict
), 0, 0);
4327 dev
->l2ad_evict
= taddr
;
4331 * Find and write ARC buffers to the L2ARC device.
4333 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4334 * for reading until they have completed writing.
4337 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
4339 arc_buf_hdr_t
*ab
, *ab_prev
, *head
;
4340 l2arc_buf_hdr_t
*hdrl2
;
4342 uint64_t passed_sz
, write_sz
, buf_sz
, headroom
;
4344 kmutex_t
*hash_lock
, *list_lock
= NULL
;
4345 boolean_t have_lock
, full
;
4346 l2arc_write_callback_t
*cb
;
4348 uint64_t guid
= spa_guid(spa
);
4351 ASSERT(dev
->l2ad_vdev
!= NULL
);
4356 head
= kmem_cache_alloc(hdr_cache
, KM_PUSHPAGE
);
4357 head
->b_flags
|= ARC_L2_WRITE_HEAD
;
4360 * Copy buffers for L2ARC writing.
4362 mutex_enter(&l2arc_buflist_mtx
);
4363 for (try = 0; try <= 3; try++) {
4364 list
= l2arc_list_locked(try, &list_lock
);
4368 * L2ARC fast warmup.
4370 * Until the ARC is warm and starts to evict, read from the
4371 * head of the ARC lists rather than the tail.
4373 headroom
= target_sz
* l2arc_headroom
;
4374 if (arc_warm
== B_FALSE
)
4375 ab
= list_head(list
);
4377 ab
= list_tail(list
);
4379 for (; ab
; ab
= ab_prev
) {
4380 if (arc_warm
== B_FALSE
)
4381 ab_prev
= list_next(list
, ab
);
4383 ab_prev
= list_prev(list
, ab
);
4385 hash_lock
= HDR_LOCK(ab
);
4386 have_lock
= MUTEX_HELD(hash_lock
);
4387 if (!have_lock
&& !mutex_tryenter(hash_lock
)) {
4389 * Skip this buffer rather than waiting.
4394 passed_sz
+= ab
->b_size
;
4395 if (passed_sz
> headroom
) {
4399 mutex_exit(hash_lock
);
4403 if (!l2arc_write_eligible(guid
, ab
)) {
4404 mutex_exit(hash_lock
);
4408 if ((write_sz
+ ab
->b_size
) > target_sz
) {
4410 mutex_exit(hash_lock
);
4416 * Insert a dummy header on the buflist so
4417 * l2arc_write_done() can find where the
4418 * write buffers begin without searching.
4420 list_insert_head(dev
->l2ad_buflist
, head
);
4423 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
4424 cb
->l2wcb_dev
= dev
;
4425 cb
->l2wcb_head
= head
;
4426 pio
= zio_root(spa
, l2arc_write_done
, cb
,
4431 * Create and add a new L2ARC header.
4433 hdrl2
= kmem_zalloc(sizeof (l2arc_buf_hdr_t
), KM_SLEEP
);
4435 hdrl2
->b_daddr
= dev
->l2ad_hand
;
4437 ab
->b_flags
|= ARC_L2_WRITING
;
4438 ab
->b_l2hdr
= hdrl2
;
4439 list_insert_head(dev
->l2ad_buflist
, ab
);
4440 buf_data
= ab
->b_buf
->b_data
;
4441 buf_sz
= ab
->b_size
;
4444 * Compute and store the buffer cksum before
4445 * writing. On debug the cksum is verified first.
4447 arc_cksum_verify(ab
->b_buf
);
4448 arc_cksum_compute(ab
->b_buf
, B_TRUE
);
4450 mutex_exit(hash_lock
);
4452 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
4453 dev
->l2ad_hand
, buf_sz
, buf_data
, ZIO_CHECKSUM_OFF
,
4454 NULL
, NULL
, ZIO_PRIORITY_ASYNC_WRITE
,
4455 ZIO_FLAG_CANFAIL
, B_FALSE
);
4457 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
4459 (void) zio_nowait(wzio
);
4462 * Keep the clock hand suitably device-aligned.
4464 buf_sz
= vdev_psize_to_asize(dev
->l2ad_vdev
, buf_sz
);
4467 dev
->l2ad_hand
+= buf_sz
;
4470 mutex_exit(list_lock
);
4475 mutex_exit(&l2arc_buflist_mtx
);
4478 ASSERT3U(write_sz
, ==, 0);
4479 kmem_cache_free(hdr_cache
, head
);
4483 ASSERT3U(write_sz
, <=, target_sz
);
4484 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
4485 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_sz
);
4486 ARCSTAT_INCR(arcstat_l2_size
, write_sz
);
4487 vdev_space_update(dev
->l2ad_vdev
, write_sz
, 0, 0);
4490 * Bump device hand to the device start if it is approaching the end.
4491 * l2arc_evict() will already have evicted ahead for this case.
4493 if (dev
->l2ad_hand
>= (dev
->l2ad_end
- target_sz
)) {
4494 vdev_space_update(dev
->l2ad_vdev
,
4495 dev
->l2ad_end
- dev
->l2ad_hand
, 0, 0);
4496 dev
->l2ad_hand
= dev
->l2ad_start
;
4497 dev
->l2ad_evict
= dev
->l2ad_start
;
4498 dev
->l2ad_first
= B_FALSE
;
4501 dev
->l2ad_writing
= B_TRUE
;
4502 (void) zio_wait(pio
);
4503 dev
->l2ad_writing
= B_FALSE
;
4509 * This thread feeds the L2ARC at regular intervals. This is the beating
4510 * heart of the L2ARC.
4513 l2arc_feed_thread(void)
4518 uint64_t size
, wrote
;
4519 clock_t begin
, next
= ddi_get_lbolt();
4521 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
4523 mutex_enter(&l2arc_feed_thr_lock
);
4525 while (l2arc_thread_exit
== 0) {
4526 CALLB_CPR_SAFE_BEGIN(&cpr
);
4527 (void) cv_timedwait_interruptible(&l2arc_feed_thr_cv
,
4528 &l2arc_feed_thr_lock
, next
);
4529 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
4530 next
= ddi_get_lbolt() + hz
;
4533 * Quick check for L2ARC devices.
4535 mutex_enter(&l2arc_dev_mtx
);
4536 if (l2arc_ndev
== 0) {
4537 mutex_exit(&l2arc_dev_mtx
);
4540 mutex_exit(&l2arc_dev_mtx
);
4541 begin
= ddi_get_lbolt();
4544 * This selects the next l2arc device to write to, and in
4545 * doing so the next spa to feed from: dev->l2ad_spa. This
4546 * will return NULL if there are now no l2arc devices or if
4547 * they are all faulted.
4549 * If a device is returned, its spa's config lock is also
4550 * held to prevent device removal. l2arc_dev_get_next()
4551 * will grab and release l2arc_dev_mtx.
4553 if ((dev
= l2arc_dev_get_next()) == NULL
)
4556 spa
= dev
->l2ad_spa
;
4557 ASSERT(spa
!= NULL
);
4560 * If the pool is read-only then force the feed thread to
4561 * sleep a little longer.
4563 if (!spa_writeable(spa
)) {
4564 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
4565 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4570 * Avoid contributing to memory pressure.
4572 if (arc_reclaim_needed()) {
4573 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
4574 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4578 ARCSTAT_BUMP(arcstat_l2_feeds
);
4580 size
= l2arc_write_size(dev
);
4583 * Evict L2ARC buffers that will be overwritten.
4585 l2arc_evict(dev
, size
, B_FALSE
);
4588 * Write ARC buffers.
4590 wrote
= l2arc_write_buffers(spa
, dev
, size
);
4593 * Calculate interval between writes.
4595 next
= l2arc_write_interval(begin
, size
, wrote
);
4596 spa_config_exit(spa
, SCL_L2ARC
, dev
);
4599 l2arc_thread_exit
= 0;
4600 cv_broadcast(&l2arc_feed_thr_cv
);
4601 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
4606 l2arc_vdev_present(vdev_t
*vd
)
4610 mutex_enter(&l2arc_dev_mtx
);
4611 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
4612 dev
= list_next(l2arc_dev_list
, dev
)) {
4613 if (dev
->l2ad_vdev
== vd
)
4616 mutex_exit(&l2arc_dev_mtx
);
4618 return (dev
!= NULL
);
4622 * Add a vdev for use by the L2ARC. By this point the spa has already
4623 * validated the vdev and opened it.
4626 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
4628 l2arc_dev_t
*adddev
;
4630 ASSERT(!l2arc_vdev_present(vd
));
4633 * Create a new l2arc device entry.
4635 adddev
= kmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
4636 adddev
->l2ad_spa
= spa
;
4637 adddev
->l2ad_vdev
= vd
;
4638 adddev
->l2ad_write
= l2arc_write_max
;
4639 adddev
->l2ad_boost
= l2arc_write_boost
;
4640 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
;
4641 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
4642 adddev
->l2ad_hand
= adddev
->l2ad_start
;
4643 adddev
->l2ad_evict
= adddev
->l2ad_start
;
4644 adddev
->l2ad_first
= B_TRUE
;
4645 adddev
->l2ad_writing
= B_FALSE
;
4646 list_link_init(&adddev
->l2ad_node
);
4647 ASSERT3U(adddev
->l2ad_write
, >, 0);
4650 * This is a list of all ARC buffers that are still valid on the
4653 adddev
->l2ad_buflist
= kmem_zalloc(sizeof (list_t
), KM_SLEEP
);
4654 list_create(adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
4655 offsetof(arc_buf_hdr_t
, b_l2node
));
4657 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
4660 * Add device to global list
4662 mutex_enter(&l2arc_dev_mtx
);
4663 list_insert_head(l2arc_dev_list
, adddev
);
4664 atomic_inc_64(&l2arc_ndev
);
4665 mutex_exit(&l2arc_dev_mtx
);
4669 * Remove a vdev from the L2ARC.
4672 l2arc_remove_vdev(vdev_t
*vd
)
4674 l2arc_dev_t
*dev
, *nextdev
, *remdev
= NULL
;
4677 * Find the device by vdev
4679 mutex_enter(&l2arc_dev_mtx
);
4680 for (dev
= list_head(l2arc_dev_list
); dev
; dev
= nextdev
) {
4681 nextdev
= list_next(l2arc_dev_list
, dev
);
4682 if (vd
== dev
->l2ad_vdev
) {
4687 ASSERT(remdev
!= NULL
);
4690 * Remove device from global list
4692 list_remove(l2arc_dev_list
, remdev
);
4693 l2arc_dev_last
= NULL
; /* may have been invalidated */
4694 atomic_dec_64(&l2arc_ndev
);
4695 mutex_exit(&l2arc_dev_mtx
);
4698 * Clear all buflists and ARC references. L2ARC device flush.
4700 l2arc_evict(remdev
, 0, B_TRUE
);
4701 list_destroy(remdev
->l2ad_buflist
);
4702 kmem_free(remdev
->l2ad_buflist
, sizeof (list_t
));
4703 kmem_free(remdev
, sizeof (l2arc_dev_t
));
4709 l2arc_thread_exit
= 0;
4711 l2arc_writes_sent
= 0;
4712 l2arc_writes_done
= 0;
4714 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
4715 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
4716 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4717 mutex_init(&l2arc_buflist_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4718 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
4720 l2arc_dev_list
= &L2ARC_dev_list
;
4721 l2arc_free_on_write
= &L2ARC_free_on_write
;
4722 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
4723 offsetof(l2arc_dev_t
, l2ad_node
));
4724 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
4725 offsetof(l2arc_data_free_t
, l2df_list_node
));
4732 * This is called from dmu_fini(), which is called from spa_fini();
4733 * Because of this, we can assume that all l2arc devices have
4734 * already been removed when the pools themselves were removed.
4737 l2arc_do_free_on_write();
4739 mutex_destroy(&l2arc_feed_thr_lock
);
4740 cv_destroy(&l2arc_feed_thr_cv
);
4741 mutex_destroy(&l2arc_dev_mtx
);
4742 mutex_destroy(&l2arc_buflist_mtx
);
4743 mutex_destroy(&l2arc_free_on_write_mtx
);
4745 list_destroy(l2arc_dev_list
);
4746 list_destroy(l2arc_free_on_write
);
4752 if (!(spa_mode_global
& FWRITE
))
4755 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
4756 TS_RUN
, minclsyspri
);
4762 if (!(spa_mode_global
& FWRITE
))
4765 mutex_enter(&l2arc_feed_thr_lock
);
4766 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
4767 l2arc_thread_exit
= 1;
4768 while (l2arc_thread_exit
!= 0)
4769 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
4770 mutex_exit(&l2arc_feed_thr_lock
);
4773 #if defined(_KERNEL) && defined(HAVE_SPL)
4774 EXPORT_SYMBOL(arc_read
);
4775 EXPORT_SYMBOL(arc_buf_remove_ref
);
4776 EXPORT_SYMBOL(arc_getbuf_func
);
4778 module_param(zfs_arc_min
, ulong
, 0444);
4779 MODULE_PARM_DESC(zfs_arc_min
, "Min arc size");
4781 module_param(zfs_arc_max
, ulong
, 0444);
4782 MODULE_PARM_DESC(zfs_arc_max
, "Max arc size");
4784 module_param(zfs_arc_meta_limit
, ulong
, 0444);
4785 MODULE_PARM_DESC(zfs_arc_meta_limit
, "Meta limit for arc size");
4787 module_param(zfs_arc_reduce_dnlc_percent
, int, 0444);
4788 MODULE_PARM_DESC(zfs_arc_reduce_dnlc_percent
, "Meta reclaim percentage");
4790 module_param(zfs_arc_grow_retry
, int, 0444);
4791 MODULE_PARM_DESC(zfs_arc_grow_retry
, "Seconds before growing arc size");
4793 module_param(zfs_arc_shrink_shift
, int, 0444);
4794 MODULE_PARM_DESC(zfs_arc_shrink_shift
, "log2(fraction of arc to reclaim)");
4796 module_param(zfs_arc_p_min_shift
, int, 0444);
4797 MODULE_PARM_DESC(zfs_arc_p_min_shift
, "arc_c shift to calc min/max arc_p");
4799 module_param(l2arc_write_max
, ulong
, 0444);
4800 MODULE_PARM_DESC(l2arc_write_max
, "Max write bytes per interval");
4802 module_param(l2arc_write_boost
, ulong
, 0444);
4803 MODULE_PARM_DESC(l2arc_write_boost
, "Extra write bytes during device warmup");
4805 module_param(l2arc_headroom
, ulong
, 0444);
4806 MODULE_PARM_DESC(l2arc_headroom
, "Number of max device writes to precache");
4808 module_param(l2arc_feed_secs
, ulong
, 0444);
4809 MODULE_PARM_DESC(l2arc_feed_secs
, "Seconds between L2ARC writing");
4811 module_param(l2arc_feed_min_ms
, ulong
, 0444);
4812 MODULE_PARM_DESC(l2arc_feed_min_ms
, "Min feed interval in milliseconds");
4814 module_param(l2arc_noprefetch
, int, 0444);
4815 MODULE_PARM_DESC(l2arc_noprefetch
, "Skip caching prefetched buffers");
4817 module_param(l2arc_feed_again
, int, 0444);
4818 MODULE_PARM_DESC(l2arc_feed_again
, "Turbo L2ARC warmup");
4820 module_param(l2arc_norw
, int, 0444);
4821 MODULE_PARM_DESC(l2arc_norw
, "No reads during writes");