4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2018, Joyent, Inc.
24 * Copyright (c) 2011, 2020, Delphix. All rights reserved.
25 * Copyright (c) 2014, Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
27 * Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
28 * Copyright (c) 2020, George Amanakis. All rights reserved.
29 * Copyright (c) 2019, Klara Inc.
30 * Copyright (c) 2019, Allan Jude
31 * Copyright (c) 2020, The FreeBSD Foundation [1]
33 * [1] Portions of this software were developed by Allan Jude
34 * under sponsorship from the FreeBSD Foundation.
38 * DVA-based Adjustable Replacement Cache
40 * While much of the theory of operation used here is
41 * based on the self-tuning, low overhead replacement cache
42 * presented by Megiddo and Modha at FAST 2003, there are some
43 * significant differences:
45 * 1. The Megiddo and Modha model assumes any page is evictable.
46 * Pages in its cache cannot be "locked" into memory. This makes
47 * the eviction algorithm simple: evict the last page in the list.
48 * This also make the performance characteristics easy to reason
49 * about. Our cache is not so simple. At any given moment, some
50 * subset of the blocks in the cache are un-evictable because we
51 * have handed out a reference to them. Blocks are only evictable
52 * when there are no external references active. This makes
53 * eviction far more problematic: we choose to evict the evictable
54 * blocks that are the "lowest" in the list.
56 * There are times when it is not possible to evict the requested
57 * space. In these circumstances we are unable to adjust the cache
58 * size. To prevent the cache growing unbounded at these times we
59 * implement a "cache throttle" that slows the flow of new data
60 * into the cache until we can make space available.
62 * 2. The Megiddo and Modha model assumes a fixed cache size.
63 * Pages are evicted when the cache is full and there is a cache
64 * miss. Our model has a variable sized cache. It grows with
65 * high use, but also tries to react to memory pressure from the
66 * operating system: decreasing its size when system memory is
69 * 3. The Megiddo and Modha model assumes a fixed page size. All
70 * elements of the cache are therefore exactly the same size. So
71 * when adjusting the cache size following a cache miss, its simply
72 * a matter of choosing a single page to evict. In our model, we
73 * have variable sized cache blocks (ranging from 512 bytes to
74 * 128K bytes). We therefore choose a set of blocks to evict to make
75 * space for a cache miss that approximates as closely as possible
76 * the space used by the new block.
78 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
79 * by N. Megiddo & D. Modha, FAST 2003
85 * A new reference to a cache buffer can be obtained in two
86 * ways: 1) via a hash table lookup using the DVA as a key,
87 * or 2) via one of the ARC lists. The arc_read() interface
88 * uses method 1, while the internal ARC algorithms for
89 * adjusting the cache use method 2. We therefore provide two
90 * types of locks: 1) the hash table lock array, and 2) the
93 * Buffers do not have their own mutexes, rather they rely on the
94 * hash table mutexes for the bulk of their protection (i.e. most
95 * fields in the arc_buf_hdr_t are protected by these mutexes).
97 * buf_hash_find() returns the appropriate mutex (held) when it
98 * locates the requested buffer in the hash table. It returns
99 * NULL for the mutex if the buffer was not in the table.
101 * buf_hash_remove() expects the appropriate hash mutex to be
102 * already held before it is invoked.
104 * Each ARC state also has a mutex which is used to protect the
105 * buffer list associated with the state. When attempting to
106 * obtain a hash table lock while holding an ARC list lock you
107 * must use: mutex_tryenter() to avoid deadlock. Also note that
108 * the active state mutex must be held before the ghost state mutex.
110 * It as also possible to register a callback which is run when the
111 * metadata limit is reached and no buffers can be safely evicted. In
112 * this case the arc user should drop a reference on some arc buffers so
113 * they can be reclaimed. For example, when using the ZPL each dentry
114 * holds a references on a znode. These dentries must be pruned before
115 * the arc buffer holding the znode can be safely evicted.
117 * Note that the majority of the performance stats are manipulated
118 * with atomic operations.
120 * The L2ARC uses the l2ad_mtx on each vdev for the following:
122 * - L2ARC buflist creation
123 * - L2ARC buflist eviction
124 * - L2ARC write completion, which walks L2ARC buflists
125 * - ARC header destruction, as it removes from L2ARC buflists
126 * - ARC header release, as it removes from L2ARC buflists
132 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
133 * This structure can point either to a block that is still in the cache or to
134 * one that is only accessible in an L2 ARC device, or it can provide
135 * information about a block that was recently evicted. If a block is
136 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
137 * information to retrieve it from the L2ARC device. This information is
138 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
139 * that is in this state cannot access the data directly.
141 * Blocks that are actively being referenced or have not been evicted
142 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
143 * the arc_buf_hdr_t that will point to the data block in memory. A block can
144 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
145 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
146 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
148 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the
149 * ability to store the physical data (b_pabd) associated with the DVA of the
150 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
151 * it will match its on-disk compression characteristics. This behavior can be
152 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
153 * compressed ARC functionality is disabled, the b_pabd will point to an
154 * uncompressed version of the on-disk data.
156 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each
157 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
158 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
159 * consumer. The ARC will provide references to this data and will keep it
160 * cached until it is no longer in use. The ARC caches only the L1ARC's physical
161 * data block and will evict any arc_buf_t that is no longer referenced. The
162 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
163 * "overhead_size" kstat.
165 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or
166 * compressed form. The typical case is that consumers will want uncompressed
167 * data, and when that happens a new data buffer is allocated where the data is
168 * decompressed for them to use. Currently the only consumer who wants
169 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it
170 * exists on disk. When this happens, the arc_buf_t's data buffer is shared
171 * with the arc_buf_hdr_t.
173 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
174 * first one is owned by a compressed send consumer (and therefore references
175 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be
176 * used by any other consumer (and has its own uncompressed copy of the data
191 * | b_buf +------------>+-----------+ arc_buf_t
192 * | b_pabd +-+ |b_next +---->+-----------+
193 * +-----------+ | |-----------| |b_next +-->NULL
194 * | |b_comp = T | +-----------+
195 * | |b_data +-+ |b_comp = F |
196 * | +-----------+ | |b_data +-+
197 * +->+------+ | +-----------+ |
199 * data | |<--------------+ | uncompressed
200 * +------+ compressed, | data
201 * shared +-->+------+
206 * When a consumer reads a block, the ARC must first look to see if the
207 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
208 * arc_buf_t and either copies uncompressed data into a new data buffer from an
209 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
210 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
211 * hdr is compressed and the desired compression characteristics of the
212 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
213 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
214 * the last buffer in the hdr's b_buf list, however a shared compressed buf can
215 * be anywhere in the hdr's list.
217 * The diagram below shows an example of an uncompressed ARC hdr that is
218 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is
219 * the last element in the buf list):
231 * | | arc_buf_t (shared)
232 * | b_buf +------------>+---------+ arc_buf_t
233 * | | |b_next +---->+---------+
234 * | b_pabd +-+ |---------| |b_next +-->NULL
235 * +-----------+ | | | +---------+
237 * | +---------+ | |b_data +-+
238 * +->+------+ | +---------+ |
240 * uncompressed | | | |
243 * | uncompressed | | |
246 * +---------------------------------+
248 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd
249 * since the physical block is about to be rewritten. The new data contents
250 * will be contained in the arc_buf_t. As the I/O pipeline performs the write,
251 * it may compress the data before writing it to disk. The ARC will be called
252 * with the transformed data and will memcpy the transformed on-disk block into
253 * a newly allocated b_pabd. Writes are always done into buffers which have
254 * either been loaned (and hence are new and don't have other readers) or
255 * buffers which have been released (and hence have their own hdr, if there
256 * were originally other readers of the buf's original hdr). This ensures that
257 * the ARC only needs to update a single buf and its hdr after a write occurs.
259 * When the L2ARC is in use, it will also take advantage of the b_pabd. The
260 * L2ARC will always write the contents of b_pabd to the L2ARC. This means
261 * that when compressed ARC is enabled that the L2ARC blocks are identical
262 * to the on-disk block in the main data pool. This provides a significant
263 * advantage since the ARC can leverage the bp's checksum when reading from the
264 * L2ARC to determine if the contents are valid. However, if the compressed
265 * ARC is disabled, then the L2ARC's block must be transformed to look
266 * like the physical block in the main data pool before comparing the
267 * checksum and determining its validity.
269 * The L1ARC has a slightly different system for storing encrypted data.
270 * Raw (encrypted + possibly compressed) data has a few subtle differences from
271 * data that is just compressed. The biggest difference is that it is not
272 * possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
273 * The other difference is that encryption cannot be treated as a suggestion.
274 * If a caller would prefer compressed data, but they actually wind up with
275 * uncompressed data the worst thing that could happen is there might be a
276 * performance hit. If the caller requests encrypted data, however, we must be
277 * sure they actually get it or else secret information could be leaked. Raw
278 * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
279 * may have both an encrypted version and a decrypted version of its data at
280 * once. When a caller needs a raw arc_buf_t, it is allocated and the data is
281 * copied out of this header. To avoid complications with b_pabd, raw buffers
287 #include <sys/spa_impl.h>
288 #include <sys/zio_compress.h>
289 #include <sys/zio_checksum.h>
290 #include <sys/zfs_context.h>
292 #include <sys/zfs_refcount.h>
293 #include <sys/vdev.h>
294 #include <sys/vdev_impl.h>
295 #include <sys/dsl_pool.h>
296 #include <sys/multilist.h>
299 #include <sys/fm/fs/zfs.h>
300 #include <sys/callb.h>
301 #include <sys/kstat.h>
302 #include <sys/zthr.h>
303 #include <zfs_fletcher.h>
304 #include <sys/arc_impl.h>
305 #include <sys/trace_zfs.h>
306 #include <sys/aggsum.h>
307 #include <sys/wmsum.h>
308 #include <cityhash.h>
309 #include <sys/vdev_trim.h>
310 #include <sys/zfs_racct.h>
311 #include <sys/zstd/zstd.h>
314 /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
315 boolean_t arc_watch
= B_FALSE
;
319 * This thread's job is to keep enough free memory in the system, by
320 * calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
321 * arc_available_memory().
323 static zthr_t
*arc_reap_zthr
;
326 * This thread's job is to keep arc_size under arc_c, by calling
327 * arc_evict(), which improves arc_is_overflowing().
329 static zthr_t
*arc_evict_zthr
;
330 static arc_buf_hdr_t
**arc_state_evict_markers
;
331 static int arc_state_evict_marker_count
;
333 static kmutex_t arc_evict_lock
;
334 static boolean_t arc_evict_needed
= B_FALSE
;
335 static clock_t arc_last_uncached_flush
;
338 * Count of bytes evicted since boot.
340 static uint64_t arc_evict_count
;
343 * List of arc_evict_waiter_t's, representing threads waiting for the
344 * arc_evict_count to reach specific values.
346 static list_t arc_evict_waiters
;
349 * When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
350 * the requested amount of data to be evicted. For example, by default for
351 * every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
352 * Since this is above 100%, it ensures that progress is made towards getting
353 * arc_size under arc_c. Since this is finite, it ensures that allocations
354 * can still happen, even during the potentially long time that arc_size is
357 static uint_t zfs_arc_eviction_pct
= 200;
360 * The number of headers to evict in arc_evict_state_impl() before
361 * dropping the sublist lock and evicting from another sublist. A lower
362 * value means we're more likely to evict the "correct" header (i.e. the
363 * oldest header in the arc state), but comes with higher overhead
364 * (i.e. more invocations of arc_evict_state_impl()).
366 static uint_t zfs_arc_evict_batch_limit
= 10;
368 /* number of seconds before growing cache again */
369 uint_t arc_grow_retry
= 5;
372 * Minimum time between calls to arc_kmem_reap_soon().
374 static const int arc_kmem_cache_reap_retry_ms
= 1000;
376 /* shift of arc_c for calculating overflow limit in arc_get_data_impl */
377 static int zfs_arc_overflow_shift
= 8;
379 /* log2(fraction of arc to reclaim) */
380 uint_t arc_shrink_shift
= 7;
382 /* percent of pagecache to reclaim arc to */
384 uint_t zfs_arc_pc_percent
= 0;
388 * log2(fraction of ARC which must be free to allow growing).
389 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
390 * when reading a new block into the ARC, we will evict an equal-sized block
393 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
394 * we will still not allow it to grow.
396 uint_t arc_no_grow_shift
= 5;
400 * minimum lifespan of a prefetch block in clock ticks
401 * (initialized in arc_init())
403 static uint_t arc_min_prefetch_ms
;
404 static uint_t arc_min_prescient_prefetch_ms
;
407 * If this percent of memory is free, don't throttle.
409 uint_t arc_lotsfree_percent
= 10;
412 * The arc has filled available memory and has now warmed up.
417 * These tunables are for performance analysis.
419 uint64_t zfs_arc_max
= 0;
420 uint64_t zfs_arc_min
= 0;
421 static uint64_t zfs_arc_dnode_limit
= 0;
422 static uint_t zfs_arc_dnode_reduce_percent
= 10;
423 static uint_t zfs_arc_grow_retry
= 0;
424 static uint_t zfs_arc_shrink_shift
= 0;
425 uint_t zfs_arc_average_blocksize
= 8 * 1024; /* 8KB */
428 * ARC dirty data constraints for arc_tempreserve_space() throttle:
429 * * total dirty data limit
430 * * anon block dirty limit
431 * * each pool's anon allowance
433 static const unsigned long zfs_arc_dirty_limit_percent
= 50;
434 static const unsigned long zfs_arc_anon_limit_percent
= 25;
435 static const unsigned long zfs_arc_pool_dirty_percent
= 20;
438 * Enable or disable compressed arc buffers.
440 int zfs_compressed_arc_enabled
= B_TRUE
;
443 * Balance between metadata and data on ghost hits. Values above 100
444 * increase metadata caching by proportionally reducing effect of ghost
445 * data hits on target data/metadata rate.
447 static uint_t zfs_arc_meta_balance
= 500;
450 * Percentage that can be consumed by dnodes of ARC meta buffers.
452 static uint_t zfs_arc_dnode_limit_percent
= 10;
455 * These tunables are Linux-specific
457 static uint64_t zfs_arc_sys_free
= 0;
458 static uint_t zfs_arc_min_prefetch_ms
= 0;
459 static uint_t zfs_arc_min_prescient_prefetch_ms
= 0;
460 static uint_t zfs_arc_lotsfree_percent
= 10;
463 * Number of arc_prune threads
465 static int zfs_arc_prune_task_threads
= 1;
468 arc_state_t ARC_anon
;
470 arc_state_t ARC_mru_ghost
;
472 arc_state_t ARC_mfu_ghost
;
473 arc_state_t ARC_l2c_only
;
474 arc_state_t ARC_uncached
;
476 arc_stats_t arc_stats
= {
477 { "hits", KSTAT_DATA_UINT64
},
478 { "iohits", KSTAT_DATA_UINT64
},
479 { "misses", KSTAT_DATA_UINT64
},
480 { "demand_data_hits", KSTAT_DATA_UINT64
},
481 { "demand_data_iohits", KSTAT_DATA_UINT64
},
482 { "demand_data_misses", KSTAT_DATA_UINT64
},
483 { "demand_metadata_hits", KSTAT_DATA_UINT64
},
484 { "demand_metadata_iohits", KSTAT_DATA_UINT64
},
485 { "demand_metadata_misses", KSTAT_DATA_UINT64
},
486 { "prefetch_data_hits", KSTAT_DATA_UINT64
},
487 { "prefetch_data_iohits", KSTAT_DATA_UINT64
},
488 { "prefetch_data_misses", KSTAT_DATA_UINT64
},
489 { "prefetch_metadata_hits", KSTAT_DATA_UINT64
},
490 { "prefetch_metadata_iohits", KSTAT_DATA_UINT64
},
491 { "prefetch_metadata_misses", KSTAT_DATA_UINT64
},
492 { "mru_hits", KSTAT_DATA_UINT64
},
493 { "mru_ghost_hits", KSTAT_DATA_UINT64
},
494 { "mfu_hits", KSTAT_DATA_UINT64
},
495 { "mfu_ghost_hits", KSTAT_DATA_UINT64
},
496 { "uncached_hits", KSTAT_DATA_UINT64
},
497 { "deleted", KSTAT_DATA_UINT64
},
498 { "mutex_miss", KSTAT_DATA_UINT64
},
499 { "access_skip", KSTAT_DATA_UINT64
},
500 { "evict_skip", KSTAT_DATA_UINT64
},
501 { "evict_not_enough", KSTAT_DATA_UINT64
},
502 { "evict_l2_cached", KSTAT_DATA_UINT64
},
503 { "evict_l2_eligible", KSTAT_DATA_UINT64
},
504 { "evict_l2_eligible_mfu", KSTAT_DATA_UINT64
},
505 { "evict_l2_eligible_mru", KSTAT_DATA_UINT64
},
506 { "evict_l2_ineligible", KSTAT_DATA_UINT64
},
507 { "evict_l2_skip", KSTAT_DATA_UINT64
},
508 { "hash_elements", KSTAT_DATA_UINT64
},
509 { "hash_elements_max", KSTAT_DATA_UINT64
},
510 { "hash_collisions", KSTAT_DATA_UINT64
},
511 { "hash_chains", KSTAT_DATA_UINT64
},
512 { "hash_chain_max", KSTAT_DATA_UINT64
},
513 { "meta", KSTAT_DATA_UINT64
},
514 { "pd", KSTAT_DATA_UINT64
},
515 { "pm", KSTAT_DATA_UINT64
},
516 { "c", KSTAT_DATA_UINT64
},
517 { "c_min", KSTAT_DATA_UINT64
},
518 { "c_max", KSTAT_DATA_UINT64
},
519 { "size", KSTAT_DATA_UINT64
},
520 { "compressed_size", KSTAT_DATA_UINT64
},
521 { "uncompressed_size", KSTAT_DATA_UINT64
},
522 { "overhead_size", KSTAT_DATA_UINT64
},
523 { "hdr_size", KSTAT_DATA_UINT64
},
524 { "data_size", KSTAT_DATA_UINT64
},
525 { "metadata_size", KSTAT_DATA_UINT64
},
526 { "dbuf_size", KSTAT_DATA_UINT64
},
527 { "dnode_size", KSTAT_DATA_UINT64
},
528 { "bonus_size", KSTAT_DATA_UINT64
},
529 #if defined(COMPAT_FREEBSD11)
530 { "other_size", KSTAT_DATA_UINT64
},
532 { "anon_size", KSTAT_DATA_UINT64
},
533 { "anon_data", KSTAT_DATA_UINT64
},
534 { "anon_metadata", KSTAT_DATA_UINT64
},
535 { "anon_evictable_data", KSTAT_DATA_UINT64
},
536 { "anon_evictable_metadata", KSTAT_DATA_UINT64
},
537 { "mru_size", KSTAT_DATA_UINT64
},
538 { "mru_data", KSTAT_DATA_UINT64
},
539 { "mru_metadata", KSTAT_DATA_UINT64
},
540 { "mru_evictable_data", KSTAT_DATA_UINT64
},
541 { "mru_evictable_metadata", KSTAT_DATA_UINT64
},
542 { "mru_ghost_size", KSTAT_DATA_UINT64
},
543 { "mru_ghost_data", KSTAT_DATA_UINT64
},
544 { "mru_ghost_metadata", KSTAT_DATA_UINT64
},
545 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64
},
546 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
547 { "mfu_size", KSTAT_DATA_UINT64
},
548 { "mfu_data", KSTAT_DATA_UINT64
},
549 { "mfu_metadata", KSTAT_DATA_UINT64
},
550 { "mfu_evictable_data", KSTAT_DATA_UINT64
},
551 { "mfu_evictable_metadata", KSTAT_DATA_UINT64
},
552 { "mfu_ghost_size", KSTAT_DATA_UINT64
},
553 { "mfu_ghost_data", KSTAT_DATA_UINT64
},
554 { "mfu_ghost_metadata", KSTAT_DATA_UINT64
},
555 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64
},
556 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64
},
557 { "uncached_size", KSTAT_DATA_UINT64
},
558 { "uncached_data", KSTAT_DATA_UINT64
},
559 { "uncached_metadata", KSTAT_DATA_UINT64
},
560 { "uncached_evictable_data", KSTAT_DATA_UINT64
},
561 { "uncached_evictable_metadata", KSTAT_DATA_UINT64
},
562 { "l2_hits", KSTAT_DATA_UINT64
},
563 { "l2_misses", KSTAT_DATA_UINT64
},
564 { "l2_prefetch_asize", KSTAT_DATA_UINT64
},
565 { "l2_mru_asize", KSTAT_DATA_UINT64
},
566 { "l2_mfu_asize", KSTAT_DATA_UINT64
},
567 { "l2_bufc_data_asize", KSTAT_DATA_UINT64
},
568 { "l2_bufc_metadata_asize", KSTAT_DATA_UINT64
},
569 { "l2_feeds", KSTAT_DATA_UINT64
},
570 { "l2_rw_clash", KSTAT_DATA_UINT64
},
571 { "l2_read_bytes", KSTAT_DATA_UINT64
},
572 { "l2_write_bytes", KSTAT_DATA_UINT64
},
573 { "l2_writes_sent", KSTAT_DATA_UINT64
},
574 { "l2_writes_done", KSTAT_DATA_UINT64
},
575 { "l2_writes_error", KSTAT_DATA_UINT64
},
576 { "l2_writes_lock_retry", KSTAT_DATA_UINT64
},
577 { "l2_evict_lock_retry", KSTAT_DATA_UINT64
},
578 { "l2_evict_reading", KSTAT_DATA_UINT64
},
579 { "l2_evict_l1cached", KSTAT_DATA_UINT64
},
580 { "l2_free_on_write", KSTAT_DATA_UINT64
},
581 { "l2_abort_lowmem", KSTAT_DATA_UINT64
},
582 { "l2_cksum_bad", KSTAT_DATA_UINT64
},
583 { "l2_io_error", KSTAT_DATA_UINT64
},
584 { "l2_size", KSTAT_DATA_UINT64
},
585 { "l2_asize", KSTAT_DATA_UINT64
},
586 { "l2_hdr_size", KSTAT_DATA_UINT64
},
587 { "l2_log_blk_writes", KSTAT_DATA_UINT64
},
588 { "l2_log_blk_avg_asize", KSTAT_DATA_UINT64
},
589 { "l2_log_blk_asize", KSTAT_DATA_UINT64
},
590 { "l2_log_blk_count", KSTAT_DATA_UINT64
},
591 { "l2_data_to_meta_ratio", KSTAT_DATA_UINT64
},
592 { "l2_rebuild_success", KSTAT_DATA_UINT64
},
593 { "l2_rebuild_unsupported", KSTAT_DATA_UINT64
},
594 { "l2_rebuild_io_errors", KSTAT_DATA_UINT64
},
595 { "l2_rebuild_dh_errors", KSTAT_DATA_UINT64
},
596 { "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64
},
597 { "l2_rebuild_lowmem", KSTAT_DATA_UINT64
},
598 { "l2_rebuild_size", KSTAT_DATA_UINT64
},
599 { "l2_rebuild_asize", KSTAT_DATA_UINT64
},
600 { "l2_rebuild_bufs", KSTAT_DATA_UINT64
},
601 { "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64
},
602 { "l2_rebuild_log_blks", KSTAT_DATA_UINT64
},
603 { "memory_throttle_count", KSTAT_DATA_UINT64
},
604 { "memory_direct_count", KSTAT_DATA_UINT64
},
605 { "memory_indirect_count", KSTAT_DATA_UINT64
},
606 { "memory_all_bytes", KSTAT_DATA_UINT64
},
607 { "memory_free_bytes", KSTAT_DATA_UINT64
},
608 { "memory_available_bytes", KSTAT_DATA_INT64
},
609 { "arc_no_grow", KSTAT_DATA_UINT64
},
610 { "arc_tempreserve", KSTAT_DATA_UINT64
},
611 { "arc_loaned_bytes", KSTAT_DATA_UINT64
},
612 { "arc_prune", KSTAT_DATA_UINT64
},
613 { "arc_meta_used", KSTAT_DATA_UINT64
},
614 { "arc_dnode_limit", KSTAT_DATA_UINT64
},
615 { "async_upgrade_sync", KSTAT_DATA_UINT64
},
616 { "predictive_prefetch", KSTAT_DATA_UINT64
},
617 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64
},
618 { "demand_iohit_predictive_prefetch", KSTAT_DATA_UINT64
},
619 { "prescient_prefetch", KSTAT_DATA_UINT64
},
620 { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64
},
621 { "demand_iohit_prescient_prefetch", KSTAT_DATA_UINT64
},
622 { "arc_need_free", KSTAT_DATA_UINT64
},
623 { "arc_sys_free", KSTAT_DATA_UINT64
},
624 { "arc_raw_size", KSTAT_DATA_UINT64
},
625 { "cached_only_in_progress", KSTAT_DATA_UINT64
},
626 { "abd_chunk_waste_size", KSTAT_DATA_UINT64
},
631 #define ARCSTAT_MAX(stat, val) { \
633 while ((val) > (m = arc_stats.stat.value.ui64) && \
634 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
639 * We define a macro to allow ARC hits/misses to be easily broken down by
640 * two separate conditions, giving a total of four different subtypes for
641 * each of hits and misses (so eight statistics total).
643 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
646 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
648 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
652 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
654 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
659 * This macro allows us to use kstats as floating averages. Each time we
660 * update this kstat, we first factor it and the update value by
661 * ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
662 * average. This macro assumes that integer loads and stores are atomic, but
663 * is not safe for multiple writers updating the kstat in parallel (only the
664 * last writer's update will remain).
666 #define ARCSTAT_F_AVG_FACTOR 3
667 #define ARCSTAT_F_AVG(stat, value) \
669 uint64_t x = ARCSTAT(stat); \
670 x = x - x / ARCSTAT_F_AVG_FACTOR + \
671 (value) / ARCSTAT_F_AVG_FACTOR; \
675 static kstat_t
*arc_ksp
;
678 * There are several ARC variables that are critical to export as kstats --
679 * but we don't want to have to grovel around in the kstat whenever we wish to
680 * manipulate them. For these variables, we therefore define them to be in
681 * terms of the statistic variable. This assures that we are not introducing
682 * the possibility of inconsistency by having shadow copies of the variables,
683 * while still allowing the code to be readable.
685 #define arc_tempreserve ARCSTAT(arcstat_tempreserve)
686 #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
687 #define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
688 #define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
690 hrtime_t arc_growtime
;
691 list_t arc_prune_list
;
692 kmutex_t arc_prune_mtx
;
693 taskq_t
*arc_prune_taskq
;
695 #define GHOST_STATE(state) \
696 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
697 (state) == arc_l2c_only)
699 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
700 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
701 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
702 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
703 #define HDR_PRESCIENT_PREFETCH(hdr) \
704 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
705 #define HDR_COMPRESSION_ENABLED(hdr) \
706 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
708 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
709 #define HDR_UNCACHED(hdr) ((hdr)->b_flags & ARC_FLAG_UNCACHED)
710 #define HDR_L2_READING(hdr) \
711 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
712 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
713 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
714 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
715 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
716 #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
717 #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
718 #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
720 #define HDR_ISTYPE_METADATA(hdr) \
721 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
722 #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
724 #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
725 #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
726 #define HDR_HAS_RABD(hdr) \
727 (HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
728 (hdr)->b_crypt_hdr.b_rabd != NULL)
729 #define HDR_ENCRYPTED(hdr) \
730 (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
731 #define HDR_AUTHENTICATED(hdr) \
732 (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
734 /* For storing compression mode in b_flags */
735 #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
737 #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
738 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
739 #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
740 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
742 #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
743 #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
744 #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
745 #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
751 #define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
752 #define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
753 #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
756 * Hash table routines
759 #define BUF_LOCKS 2048
760 typedef struct buf_hash_table
{
762 arc_buf_hdr_t
**ht_table
;
763 kmutex_t ht_locks
[BUF_LOCKS
] ____cacheline_aligned
;
766 static buf_hash_table_t buf_hash_table
;
768 #define BUF_HASH_INDEX(spa, dva, birth) \
769 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
770 #define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
771 #define HDR_LOCK(hdr) \
772 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
774 uint64_t zfs_crc64_table
[256];
780 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
781 #define L2ARC_HEADROOM 2 /* num of writes */
784 * If we discover during ARC scan any buffers to be compressed, we boost
785 * our headroom for the next scanning cycle by this percentage multiple.
787 #define L2ARC_HEADROOM_BOOST 200
788 #define L2ARC_FEED_SECS 1 /* caching interval secs */
789 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
792 * We can feed L2ARC from two states of ARC buffers, mru and mfu,
793 * and each of the state has two types: data and metadata.
795 #define L2ARC_FEED_TYPES 4
797 /* L2ARC Performance Tunables */
798 uint64_t l2arc_write_max
= L2ARC_WRITE_SIZE
; /* def max write size */
799 uint64_t l2arc_write_boost
= L2ARC_WRITE_SIZE
; /* extra warmup write */
800 uint64_t l2arc_headroom
= L2ARC_HEADROOM
; /* # of dev writes */
801 uint64_t l2arc_headroom_boost
= L2ARC_HEADROOM_BOOST
;
802 uint64_t l2arc_feed_secs
= L2ARC_FEED_SECS
; /* interval seconds */
803 uint64_t l2arc_feed_min_ms
= L2ARC_FEED_MIN_MS
; /* min interval msecs */
804 int l2arc_noprefetch
= B_TRUE
; /* don't cache prefetch bufs */
805 int l2arc_feed_again
= B_TRUE
; /* turbo warmup */
806 int l2arc_norw
= B_FALSE
; /* no reads during writes */
807 static uint_t l2arc_meta_percent
= 33; /* limit on headers size */
812 static list_t L2ARC_dev_list
; /* device list */
813 static list_t
*l2arc_dev_list
; /* device list pointer */
814 static kmutex_t l2arc_dev_mtx
; /* device list mutex */
815 static l2arc_dev_t
*l2arc_dev_last
; /* last device used */
816 static list_t L2ARC_free_on_write
; /* free after write buf list */
817 static list_t
*l2arc_free_on_write
; /* free after write list ptr */
818 static kmutex_t l2arc_free_on_write_mtx
; /* mutex for list */
819 static uint64_t l2arc_ndev
; /* number of devices */
821 typedef struct l2arc_read_callback
{
822 arc_buf_hdr_t
*l2rcb_hdr
; /* read header */
823 blkptr_t l2rcb_bp
; /* original blkptr */
824 zbookmark_phys_t l2rcb_zb
; /* original bookmark */
825 int l2rcb_flags
; /* original flags */
826 abd_t
*l2rcb_abd
; /* temporary buffer */
827 } l2arc_read_callback_t
;
829 typedef struct l2arc_data_free
{
830 /* protected by l2arc_free_on_write_mtx */
833 arc_buf_contents_t l2df_type
;
834 list_node_t l2df_list_node
;
837 typedef enum arc_fill_flags
{
838 ARC_FILL_LOCKED
= 1 << 0, /* hdr lock is held */
839 ARC_FILL_COMPRESSED
= 1 << 1, /* fill with compressed data */
840 ARC_FILL_ENCRYPTED
= 1 << 2, /* fill with encrypted data */
841 ARC_FILL_NOAUTH
= 1 << 3, /* don't attempt to authenticate */
842 ARC_FILL_IN_PLACE
= 1 << 4 /* fill in place (special case) */
845 typedef enum arc_ovf_level
{
846 ARC_OVF_NONE
, /* ARC within target size. */
847 ARC_OVF_SOME
, /* ARC is slightly overflowed. */
848 ARC_OVF_SEVERE
/* ARC is severely overflowed. */
851 static kmutex_t l2arc_feed_thr_lock
;
852 static kcondvar_t l2arc_feed_thr_cv
;
853 static uint8_t l2arc_thread_exit
;
855 static kmutex_t l2arc_rebuild_thr_lock
;
856 static kcondvar_t l2arc_rebuild_thr_cv
;
858 enum arc_hdr_alloc_flags
{
859 ARC_HDR_ALLOC_RDATA
= 0x1,
860 ARC_HDR_USE_RESERVE
= 0x4,
861 ARC_HDR_ALLOC_LINEAR
= 0x8,
865 static abd_t
*arc_get_data_abd(arc_buf_hdr_t
*, uint64_t, const void *, int);
866 static void *arc_get_data_buf(arc_buf_hdr_t
*, uint64_t, const void *);
867 static void arc_get_data_impl(arc_buf_hdr_t
*, uint64_t, const void *, int);
868 static void arc_free_data_abd(arc_buf_hdr_t
*, abd_t
*, uint64_t, const void *);
869 static void arc_free_data_buf(arc_buf_hdr_t
*, void *, uint64_t, const void *);
870 static void arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
,
872 static void arc_hdr_free_abd(arc_buf_hdr_t
*, boolean_t
);
873 static void arc_hdr_alloc_abd(arc_buf_hdr_t
*, int);
874 static void arc_hdr_destroy(arc_buf_hdr_t
*);
875 static void arc_access(arc_buf_hdr_t
*, arc_flags_t
, boolean_t
);
876 static void arc_buf_watch(arc_buf_t
*);
877 static void arc_change_state(arc_state_t
*, arc_buf_hdr_t
*);
879 static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t
*);
880 static uint32_t arc_bufc_to_flags(arc_buf_contents_t
);
881 static inline void arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
882 static inline void arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
);
884 static boolean_t
l2arc_write_eligible(uint64_t, arc_buf_hdr_t
*);
885 static void l2arc_read_done(zio_t
*);
886 static void l2arc_do_free_on_write(void);
887 static void l2arc_hdr_arcstats_update(arc_buf_hdr_t
*hdr
, boolean_t incr
,
888 boolean_t state_only
);
890 #define l2arc_hdr_arcstats_increment(hdr) \
891 l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
892 #define l2arc_hdr_arcstats_decrement(hdr) \
893 l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
894 #define l2arc_hdr_arcstats_increment_state(hdr) \
895 l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
896 #define l2arc_hdr_arcstats_decrement_state(hdr) \
897 l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
900 * l2arc_exclude_special : A zfs module parameter that controls whether buffers
901 * present on special vdevs are eligibile for caching in L2ARC. If
902 * set to 1, exclude dbufs on special vdevs from being cached to
905 int l2arc_exclude_special
= 0;
908 * l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
909 * metadata and data are cached from ARC into L2ARC.
911 static int l2arc_mfuonly
= 0;
915 * l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
916 * the current write size (l2arc_write_max) we should TRIM if we
917 * have filled the device. It is defined as a percentage of the
918 * write size. If set to 100 we trim twice the space required to
919 * accommodate upcoming writes. A minimum of 64MB will be trimmed.
920 * It also enables TRIM of the whole L2ARC device upon creation or
921 * addition to an existing pool or if the header of the device is
922 * invalid upon importing a pool or onlining a cache device. The
923 * default is 0, which disables TRIM on L2ARC altogether as it can
924 * put significant stress on the underlying storage devices. This
925 * will vary depending of how well the specific device handles
928 static uint64_t l2arc_trim_ahead
= 0;
931 * Performance tuning of L2ARC persistence:
933 * l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
934 * an L2ARC device (either at pool import or later) will attempt
935 * to rebuild L2ARC buffer contents.
936 * l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
937 * whether log blocks are written to the L2ARC device. If the L2ARC
938 * device is less than 1GB, the amount of data l2arc_evict()
939 * evicts is significant compared to the amount of restored L2ARC
940 * data. In this case do not write log blocks in L2ARC in order
941 * not to waste space.
943 static int l2arc_rebuild_enabled
= B_TRUE
;
944 static uint64_t l2arc_rebuild_blocks_min_l2size
= 1024 * 1024 * 1024;
946 /* L2ARC persistence rebuild control routines. */
947 void l2arc_rebuild_vdev(vdev_t
*vd
, boolean_t reopen
);
948 static __attribute__((noreturn
)) void l2arc_dev_rebuild_thread(void *arg
);
949 static int l2arc_rebuild(l2arc_dev_t
*dev
);
951 /* L2ARC persistence read I/O routines. */
952 static int l2arc_dev_hdr_read(l2arc_dev_t
*dev
);
953 static int l2arc_log_blk_read(l2arc_dev_t
*dev
,
954 const l2arc_log_blkptr_t
*this_lp
, const l2arc_log_blkptr_t
*next_lp
,
955 l2arc_log_blk_phys_t
*this_lb
, l2arc_log_blk_phys_t
*next_lb
,
956 zio_t
*this_io
, zio_t
**next_io
);
957 static zio_t
*l2arc_log_blk_fetch(vdev_t
*vd
,
958 const l2arc_log_blkptr_t
*lp
, l2arc_log_blk_phys_t
*lb
);
959 static void l2arc_log_blk_fetch_abort(zio_t
*zio
);
961 /* L2ARC persistence block restoration routines. */
962 static void l2arc_log_blk_restore(l2arc_dev_t
*dev
,
963 const l2arc_log_blk_phys_t
*lb
, uint64_t lb_asize
);
964 static void l2arc_hdr_restore(const l2arc_log_ent_phys_t
*le
,
967 /* L2ARC persistence write I/O routines. */
968 static uint64_t l2arc_log_blk_commit(l2arc_dev_t
*dev
, zio_t
*pio
,
969 l2arc_write_callback_t
*cb
);
971 /* L2ARC persistence auxiliary routines. */
972 boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t
*dev
,
973 const l2arc_log_blkptr_t
*lbp
);
974 static boolean_t
l2arc_log_blk_insert(l2arc_dev_t
*dev
,
975 const arc_buf_hdr_t
*ab
);
976 boolean_t
l2arc_range_check_overlap(uint64_t bottom
,
977 uint64_t top
, uint64_t check
);
978 static void l2arc_blk_fetch_done(zio_t
*zio
);
979 static inline uint64_t
980 l2arc_log_blk_overhead(uint64_t write_sz
, l2arc_dev_t
*dev
);
983 * We use Cityhash for this. It's fast, and has good hash properties without
984 * requiring any large static buffers.
987 buf_hash(uint64_t spa
, const dva_t
*dva
, uint64_t birth
)
989 return (cityhash4(spa
, dva
->dva_word
[0], dva
->dva_word
[1], birth
));
992 #define HDR_EMPTY(hdr) \
993 ((hdr)->b_dva.dva_word[0] == 0 && \
994 (hdr)->b_dva.dva_word[1] == 0)
996 #define HDR_EMPTY_OR_LOCKED(hdr) \
997 (HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
999 #define HDR_EQUAL(spa, dva, birth, hdr) \
1000 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
1001 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
1002 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
1005 buf_discard_identity(arc_buf_hdr_t
*hdr
)
1007 hdr
->b_dva
.dva_word
[0] = 0;
1008 hdr
->b_dva
.dva_word
[1] = 0;
1012 static arc_buf_hdr_t
*
1013 buf_hash_find(uint64_t spa
, const blkptr_t
*bp
, kmutex_t
**lockp
)
1015 const dva_t
*dva
= BP_IDENTITY(bp
);
1016 uint64_t birth
= BP_PHYSICAL_BIRTH(bp
);
1017 uint64_t idx
= BUF_HASH_INDEX(spa
, dva
, birth
);
1018 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1021 mutex_enter(hash_lock
);
1022 for (hdr
= buf_hash_table
.ht_table
[idx
]; hdr
!= NULL
;
1023 hdr
= hdr
->b_hash_next
) {
1024 if (HDR_EQUAL(spa
, dva
, birth
, hdr
)) {
1029 mutex_exit(hash_lock
);
1035 * Insert an entry into the hash table. If there is already an element
1036 * equal to elem in the hash table, then the already existing element
1037 * will be returned and the new element will not be inserted.
1038 * Otherwise returns NULL.
1039 * If lockp == NULL, the caller is assumed to already hold the hash lock.
1041 static arc_buf_hdr_t
*
1042 buf_hash_insert(arc_buf_hdr_t
*hdr
, kmutex_t
**lockp
)
1044 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1045 kmutex_t
*hash_lock
= BUF_HASH_LOCK(idx
);
1046 arc_buf_hdr_t
*fhdr
;
1049 ASSERT(!DVA_IS_EMPTY(&hdr
->b_dva
));
1050 ASSERT(hdr
->b_birth
!= 0);
1051 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
1053 if (lockp
!= NULL
) {
1055 mutex_enter(hash_lock
);
1057 ASSERT(MUTEX_HELD(hash_lock
));
1060 for (fhdr
= buf_hash_table
.ht_table
[idx
], i
= 0; fhdr
!= NULL
;
1061 fhdr
= fhdr
->b_hash_next
, i
++) {
1062 if (HDR_EQUAL(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
, fhdr
))
1066 hdr
->b_hash_next
= buf_hash_table
.ht_table
[idx
];
1067 buf_hash_table
.ht_table
[idx
] = hdr
;
1068 arc_hdr_set_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1070 /* collect some hash table performance data */
1072 ARCSTAT_BUMP(arcstat_hash_collisions
);
1074 ARCSTAT_BUMP(arcstat_hash_chains
);
1076 ARCSTAT_MAX(arcstat_hash_chain_max
, i
);
1078 uint64_t he
= atomic_inc_64_nv(
1079 &arc_stats
.arcstat_hash_elements
.value
.ui64
);
1080 ARCSTAT_MAX(arcstat_hash_elements_max
, he
);
1086 buf_hash_remove(arc_buf_hdr_t
*hdr
)
1088 arc_buf_hdr_t
*fhdr
, **hdrp
;
1089 uint64_t idx
= BUF_HASH_INDEX(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
);
1091 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx
)));
1092 ASSERT(HDR_IN_HASH_TABLE(hdr
));
1094 hdrp
= &buf_hash_table
.ht_table
[idx
];
1095 while ((fhdr
= *hdrp
) != hdr
) {
1096 ASSERT3P(fhdr
, !=, NULL
);
1097 hdrp
= &fhdr
->b_hash_next
;
1099 *hdrp
= hdr
->b_hash_next
;
1100 hdr
->b_hash_next
= NULL
;
1101 arc_hdr_clear_flags(hdr
, ARC_FLAG_IN_HASH_TABLE
);
1103 /* collect some hash table performance data */
1104 atomic_dec_64(&arc_stats
.arcstat_hash_elements
.value
.ui64
);
1106 if (buf_hash_table
.ht_table
[idx
] &&
1107 buf_hash_table
.ht_table
[idx
]->b_hash_next
== NULL
)
1108 ARCSTAT_BUMPDOWN(arcstat_hash_chains
);
1112 * Global data structures and functions for the buf kmem cache.
1115 static kmem_cache_t
*hdr_full_cache
;
1116 static kmem_cache_t
*hdr_full_crypt_cache
;
1117 static kmem_cache_t
*hdr_l2only_cache
;
1118 static kmem_cache_t
*buf_cache
;
1123 #if defined(_KERNEL)
1125 * Large allocations which do not require contiguous pages
1126 * should be using vmem_free() in the linux kernel\
1128 vmem_free(buf_hash_table
.ht_table
,
1129 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
1131 kmem_free(buf_hash_table
.ht_table
,
1132 (buf_hash_table
.ht_mask
+ 1) * sizeof (void *));
1134 for (int i
= 0; i
< BUF_LOCKS
; i
++)
1135 mutex_destroy(BUF_HASH_LOCK(i
));
1136 kmem_cache_destroy(hdr_full_cache
);
1137 kmem_cache_destroy(hdr_full_crypt_cache
);
1138 kmem_cache_destroy(hdr_l2only_cache
);
1139 kmem_cache_destroy(buf_cache
);
1143 * Constructor callback - called when the cache is empty
1144 * and a new buf is requested.
1147 hdr_full_cons(void *vbuf
, void *unused
, int kmflag
)
1149 (void) unused
, (void) kmflag
;
1150 arc_buf_hdr_t
*hdr
= vbuf
;
1152 memset(hdr
, 0, HDR_FULL_SIZE
);
1153 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
1154 zfs_refcount_create(&hdr
->b_l1hdr
.b_refcnt
);
1156 mutex_init(&hdr
->b_l1hdr
.b_freeze_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
1158 multilist_link_init(&hdr
->b_l1hdr
.b_arc_node
);
1159 list_link_init(&hdr
->b_l2hdr
.b_l2node
);
1160 arc_space_consume(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1166 hdr_full_crypt_cons(void *vbuf
, void *unused
, int kmflag
)
1169 arc_buf_hdr_t
*hdr
= vbuf
;
1171 hdr_full_cons(vbuf
, unused
, kmflag
);
1172 memset(&hdr
->b_crypt_hdr
, 0, sizeof (hdr
->b_crypt_hdr
));
1173 arc_space_consume(sizeof (hdr
->b_crypt_hdr
), ARC_SPACE_HDRS
);
1179 hdr_l2only_cons(void *vbuf
, void *unused
, int kmflag
)
1181 (void) unused
, (void) kmflag
;
1182 arc_buf_hdr_t
*hdr
= vbuf
;
1184 memset(hdr
, 0, HDR_L2ONLY_SIZE
);
1185 arc_space_consume(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1191 buf_cons(void *vbuf
, void *unused
, int kmflag
)
1193 (void) unused
, (void) kmflag
;
1194 arc_buf_t
*buf
= vbuf
;
1196 memset(buf
, 0, sizeof (arc_buf_t
));
1197 arc_space_consume(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1203 * Destructor callback - called when a cached buf is
1204 * no longer required.
1207 hdr_full_dest(void *vbuf
, void *unused
)
1210 arc_buf_hdr_t
*hdr
= vbuf
;
1212 ASSERT(HDR_EMPTY(hdr
));
1213 zfs_refcount_destroy(&hdr
->b_l1hdr
.b_refcnt
);
1215 mutex_destroy(&hdr
->b_l1hdr
.b_freeze_lock
);
1217 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
1218 arc_space_return(HDR_FULL_SIZE
, ARC_SPACE_HDRS
);
1222 hdr_full_crypt_dest(void *vbuf
, void *unused
)
1224 (void) vbuf
, (void) unused
;
1226 hdr_full_dest(vbuf
, unused
);
1227 arc_space_return(sizeof (((arc_buf_hdr_t
*)NULL
)->b_crypt_hdr
),
1232 hdr_l2only_dest(void *vbuf
, void *unused
)
1235 arc_buf_hdr_t
*hdr
= vbuf
;
1237 ASSERT(HDR_EMPTY(hdr
));
1238 arc_space_return(HDR_L2ONLY_SIZE
, ARC_SPACE_L2HDRS
);
1242 buf_dest(void *vbuf
, void *unused
)
1247 arc_space_return(sizeof (arc_buf_t
), ARC_SPACE_HDRS
);
1253 uint64_t *ct
= NULL
;
1254 uint64_t hsize
= 1ULL << 12;
1258 * The hash table is big enough to fill all of physical memory
1259 * with an average block size of zfs_arc_average_blocksize (default 8K).
1260 * By default, the table will take up
1261 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1263 while (hsize
* zfs_arc_average_blocksize
< arc_all_memory())
1266 buf_hash_table
.ht_mask
= hsize
- 1;
1267 #if defined(_KERNEL)
1269 * Large allocations which do not require contiguous pages
1270 * should be using vmem_alloc() in the linux kernel
1272 buf_hash_table
.ht_table
=
1273 vmem_zalloc(hsize
* sizeof (void*), KM_SLEEP
);
1275 buf_hash_table
.ht_table
=
1276 kmem_zalloc(hsize
* sizeof (void*), KM_NOSLEEP
);
1278 if (buf_hash_table
.ht_table
== NULL
) {
1279 ASSERT(hsize
> (1ULL << 8));
1284 hdr_full_cache
= kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE
,
1285 0, hdr_full_cons
, hdr_full_dest
, NULL
, NULL
, NULL
, 0);
1286 hdr_full_crypt_cache
= kmem_cache_create("arc_buf_hdr_t_full_crypt",
1287 HDR_FULL_CRYPT_SIZE
, 0, hdr_full_crypt_cons
, hdr_full_crypt_dest
,
1288 NULL
, NULL
, NULL
, 0);
1289 hdr_l2only_cache
= kmem_cache_create("arc_buf_hdr_t_l2only",
1290 HDR_L2ONLY_SIZE
, 0, hdr_l2only_cons
, hdr_l2only_dest
, NULL
,
1292 buf_cache
= kmem_cache_create("arc_buf_t", sizeof (arc_buf_t
),
1293 0, buf_cons
, buf_dest
, NULL
, NULL
, NULL
, 0);
1295 for (i
= 0; i
< 256; i
++)
1296 for (ct
= zfs_crc64_table
+ i
, *ct
= i
, j
= 8; j
> 0; j
--)
1297 *ct
= (*ct
>> 1) ^ (-(*ct
& 1) & ZFS_CRC64_POLY
);
1299 for (i
= 0; i
< BUF_LOCKS
; i
++)
1300 mutex_init(BUF_HASH_LOCK(i
), NULL
, MUTEX_DEFAULT
, NULL
);
1303 #define ARC_MINTIME (hz>>4) /* 62 ms */
1306 * This is the size that the buf occupies in memory. If the buf is compressed,
1307 * it will correspond to the compressed size. You should use this method of
1308 * getting the buf size unless you explicitly need the logical size.
1311 arc_buf_size(arc_buf_t
*buf
)
1313 return (ARC_BUF_COMPRESSED(buf
) ?
1314 HDR_GET_PSIZE(buf
->b_hdr
) : HDR_GET_LSIZE(buf
->b_hdr
));
1318 arc_buf_lsize(arc_buf_t
*buf
)
1320 return (HDR_GET_LSIZE(buf
->b_hdr
));
1324 * This function will return B_TRUE if the buffer is encrypted in memory.
1325 * This buffer can be decrypted by calling arc_untransform().
1328 arc_is_encrypted(arc_buf_t
*buf
)
1330 return (ARC_BUF_ENCRYPTED(buf
) != 0);
1334 * Returns B_TRUE if the buffer represents data that has not had its MAC
1338 arc_is_unauthenticated(arc_buf_t
*buf
)
1340 return (HDR_NOAUTH(buf
->b_hdr
) != 0);
1344 arc_get_raw_params(arc_buf_t
*buf
, boolean_t
*byteorder
, uint8_t *salt
,
1345 uint8_t *iv
, uint8_t *mac
)
1347 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1349 ASSERT(HDR_PROTECTED(hdr
));
1351 memcpy(salt
, hdr
->b_crypt_hdr
.b_salt
, ZIO_DATA_SALT_LEN
);
1352 memcpy(iv
, hdr
->b_crypt_hdr
.b_iv
, ZIO_DATA_IV_LEN
);
1353 memcpy(mac
, hdr
->b_crypt_hdr
.b_mac
, ZIO_DATA_MAC_LEN
);
1354 *byteorder
= (hdr
->b_l1hdr
.b_byteswap
== DMU_BSWAP_NUMFUNCS
) ?
1355 ZFS_HOST_BYTEORDER
: !ZFS_HOST_BYTEORDER
;
1359 * Indicates how this buffer is compressed in memory. If it is not compressed
1360 * the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
1361 * arc_untransform() as long as it is also unencrypted.
1364 arc_get_compression(arc_buf_t
*buf
)
1366 return (ARC_BUF_COMPRESSED(buf
) ?
1367 HDR_GET_COMPRESS(buf
->b_hdr
) : ZIO_COMPRESS_OFF
);
1371 * Return the compression algorithm used to store this data in the ARC. If ARC
1372 * compression is enabled or this is an encrypted block, this will be the same
1373 * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
1375 static inline enum zio_compress
1376 arc_hdr_get_compress(arc_buf_hdr_t
*hdr
)
1378 return (HDR_COMPRESSION_ENABLED(hdr
) ?
1379 HDR_GET_COMPRESS(hdr
) : ZIO_COMPRESS_OFF
);
1383 arc_get_complevel(arc_buf_t
*buf
)
1385 return (buf
->b_hdr
->b_complevel
);
1388 static inline boolean_t
1389 arc_buf_is_shared(arc_buf_t
*buf
)
1391 boolean_t shared
= (buf
->b_data
!= NULL
&&
1392 buf
->b_hdr
->b_l1hdr
.b_pabd
!= NULL
&&
1393 abd_is_linear(buf
->b_hdr
->b_l1hdr
.b_pabd
) &&
1394 buf
->b_data
== abd_to_buf(buf
->b_hdr
->b_l1hdr
.b_pabd
));
1395 IMPLY(shared
, HDR_SHARED_DATA(buf
->b_hdr
));
1396 IMPLY(shared
, ARC_BUF_SHARED(buf
));
1397 IMPLY(shared
, ARC_BUF_COMPRESSED(buf
) || ARC_BUF_LAST(buf
));
1400 * It would be nice to assert arc_can_share() too, but the "hdr isn't
1401 * already being shared" requirement prevents us from doing that.
1408 * Free the checksum associated with this header. If there is no checksum, this
1412 arc_cksum_free(arc_buf_hdr_t
*hdr
)
1415 ASSERT(HDR_HAS_L1HDR(hdr
));
1417 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1418 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
) {
1419 kmem_free(hdr
->b_l1hdr
.b_freeze_cksum
, sizeof (zio_cksum_t
));
1420 hdr
->b_l1hdr
.b_freeze_cksum
= NULL
;
1422 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1427 * Return true iff at least one of the bufs on hdr is not compressed.
1428 * Encrypted buffers count as compressed.
1431 arc_hdr_has_uncompressed_buf(arc_buf_hdr_t
*hdr
)
1433 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
|| HDR_EMPTY_OR_LOCKED(hdr
));
1435 for (arc_buf_t
*b
= hdr
->b_l1hdr
.b_buf
; b
!= NULL
; b
= b
->b_next
) {
1436 if (!ARC_BUF_COMPRESSED(b
)) {
1445 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
1446 * matches the checksum that is stored in the hdr. If there is no checksum,
1447 * or if the buf is compressed, this is a no-op.
1450 arc_cksum_verify(arc_buf_t
*buf
)
1453 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1456 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1459 if (ARC_BUF_COMPRESSED(buf
))
1462 ASSERT(HDR_HAS_L1HDR(hdr
));
1464 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1466 if (hdr
->b_l1hdr
.b_freeze_cksum
== NULL
|| HDR_IO_ERROR(hdr
)) {
1467 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1471 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
, &zc
);
1472 if (!ZIO_CHECKSUM_EQUAL(*hdr
->b_l1hdr
.b_freeze_cksum
, zc
))
1473 panic("buffer modified while frozen!");
1474 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1479 * This function makes the assumption that data stored in the L2ARC
1480 * will be transformed exactly as it is in the main pool. Because of
1481 * this we can verify the checksum against the reading process's bp.
1484 arc_cksum_is_equal(arc_buf_hdr_t
*hdr
, zio_t
*zio
)
1486 ASSERT(!BP_IS_EMBEDDED(zio
->io_bp
));
1487 VERIFY3U(BP_GET_PSIZE(zio
->io_bp
), ==, HDR_GET_PSIZE(hdr
));
1490 * Block pointers always store the checksum for the logical data.
1491 * If the block pointer has the gang bit set, then the checksum
1492 * it represents is for the reconstituted data and not for an
1493 * individual gang member. The zio pipeline, however, must be able to
1494 * determine the checksum of each of the gang constituents so it
1495 * treats the checksum comparison differently than what we need
1496 * for l2arc blocks. This prevents us from using the
1497 * zio_checksum_error() interface directly. Instead we must call the
1498 * zio_checksum_error_impl() so that we can ensure the checksum is
1499 * generated using the correct checksum algorithm and accounts for the
1500 * logical I/O size and not just a gang fragment.
1502 return (zio_checksum_error_impl(zio
->io_spa
, zio
->io_bp
,
1503 BP_GET_CHECKSUM(zio
->io_bp
), zio
->io_abd
, zio
->io_size
,
1504 zio
->io_offset
, NULL
) == 0);
1508 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
1509 * checksum and attaches it to the buf's hdr so that we can ensure that the buf
1510 * isn't modified later on. If buf is compressed or there is already a checksum
1511 * on the hdr, this is a no-op (we only checksum uncompressed bufs).
1514 arc_cksum_compute(arc_buf_t
*buf
)
1516 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1520 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1521 ASSERT(HDR_HAS_L1HDR(hdr
));
1522 mutex_enter(&hdr
->b_l1hdr
.b_freeze_lock
);
1523 if (hdr
->b_l1hdr
.b_freeze_cksum
!= NULL
|| ARC_BUF_COMPRESSED(buf
)) {
1524 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1528 ASSERT(!ARC_BUF_ENCRYPTED(buf
));
1529 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1530 hdr
->b_l1hdr
.b_freeze_cksum
= kmem_alloc(sizeof (zio_cksum_t
),
1532 fletcher_2_native(buf
->b_data
, arc_buf_size(buf
), NULL
,
1533 hdr
->b_l1hdr
.b_freeze_cksum
);
1534 mutex_exit(&hdr
->b_l1hdr
.b_freeze_lock
);
1541 arc_buf_sigsegv(int sig
, siginfo_t
*si
, void *unused
)
1543 (void) sig
, (void) unused
;
1544 panic("Got SIGSEGV at address: 0x%lx\n", (long)si
->si_addr
);
1549 arc_buf_unwatch(arc_buf_t
*buf
)
1553 ASSERT0(mprotect(buf
->b_data
, arc_buf_size(buf
),
1554 PROT_READ
| PROT_WRITE
));
1562 arc_buf_watch(arc_buf_t
*buf
)
1566 ASSERT0(mprotect(buf
->b_data
, arc_buf_size(buf
),
1573 static arc_buf_contents_t
1574 arc_buf_type(arc_buf_hdr_t
*hdr
)
1576 arc_buf_contents_t type
;
1577 if (HDR_ISTYPE_METADATA(hdr
)) {
1578 type
= ARC_BUFC_METADATA
;
1580 type
= ARC_BUFC_DATA
;
1582 VERIFY3U(hdr
->b_type
, ==, type
);
1587 arc_is_metadata(arc_buf_t
*buf
)
1589 return (HDR_ISTYPE_METADATA(buf
->b_hdr
) != 0);
1593 arc_bufc_to_flags(arc_buf_contents_t type
)
1597 /* metadata field is 0 if buffer contains normal data */
1599 case ARC_BUFC_METADATA
:
1600 return (ARC_FLAG_BUFC_METADATA
);
1604 panic("undefined ARC buffer type!");
1605 return ((uint32_t)-1);
1609 arc_buf_thaw(arc_buf_t
*buf
)
1611 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1613 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
1614 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
1616 arc_cksum_verify(buf
);
1619 * Compressed buffers do not manipulate the b_freeze_cksum.
1621 if (ARC_BUF_COMPRESSED(buf
))
1624 ASSERT(HDR_HAS_L1HDR(hdr
));
1625 arc_cksum_free(hdr
);
1626 arc_buf_unwatch(buf
);
1630 arc_buf_freeze(arc_buf_t
*buf
)
1632 if (!(zfs_flags
& ZFS_DEBUG_MODIFY
))
1635 if (ARC_BUF_COMPRESSED(buf
))
1638 ASSERT(HDR_HAS_L1HDR(buf
->b_hdr
));
1639 arc_cksum_compute(buf
);
1643 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
1644 * the following functions should be used to ensure that the flags are
1645 * updated in a thread-safe way. When manipulating the flags either
1646 * the hash_lock must be held or the hdr must be undiscoverable. This
1647 * ensures that we're not racing with any other threads when updating
1651 arc_hdr_set_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1653 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1654 hdr
->b_flags
|= flags
;
1658 arc_hdr_clear_flags(arc_buf_hdr_t
*hdr
, arc_flags_t flags
)
1660 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1661 hdr
->b_flags
&= ~flags
;
1665 * Setting the compression bits in the arc_buf_hdr_t's b_flags is
1666 * done in a special way since we have to clear and set bits
1667 * at the same time. Consumers that wish to set the compression bits
1668 * must use this function to ensure that the flags are updated in
1669 * thread-safe manner.
1672 arc_hdr_set_compress(arc_buf_hdr_t
*hdr
, enum zio_compress cmp
)
1674 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1677 * Holes and embedded blocks will always have a psize = 0 so
1678 * we ignore the compression of the blkptr and set the
1679 * want to uncompress them. Mark them as uncompressed.
1681 if (!zfs_compressed_arc_enabled
|| HDR_GET_PSIZE(hdr
) == 0) {
1682 arc_hdr_clear_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1683 ASSERT(!HDR_COMPRESSION_ENABLED(hdr
));
1685 arc_hdr_set_flags(hdr
, ARC_FLAG_COMPRESSED_ARC
);
1686 ASSERT(HDR_COMPRESSION_ENABLED(hdr
));
1689 HDR_SET_COMPRESS(hdr
, cmp
);
1690 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, cmp
);
1694 * Looks for another buf on the same hdr which has the data decompressed, copies
1695 * from it, and returns true. If no such buf exists, returns false.
1698 arc_buf_try_copy_decompressed_data(arc_buf_t
*buf
)
1700 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1701 boolean_t copied
= B_FALSE
;
1703 ASSERT(HDR_HAS_L1HDR(hdr
));
1704 ASSERT3P(buf
->b_data
, !=, NULL
);
1705 ASSERT(!ARC_BUF_COMPRESSED(buf
));
1707 for (arc_buf_t
*from
= hdr
->b_l1hdr
.b_buf
; from
!= NULL
;
1708 from
= from
->b_next
) {
1709 /* can't use our own data buffer */
1714 if (!ARC_BUF_COMPRESSED(from
)) {
1715 memcpy(buf
->b_data
, from
->b_data
, arc_buf_size(buf
));
1723 * There were no decompressed bufs, so there should not be a
1724 * checksum on the hdr either.
1726 if (zfs_flags
& ZFS_DEBUG_MODIFY
)
1727 EQUIV(!copied
, hdr
->b_l1hdr
.b_freeze_cksum
== NULL
);
1734 * Allocates an ARC buf header that's in an evicted & L2-cached state.
1735 * This is used during l2arc reconstruction to make empty ARC buffers
1736 * which circumvent the regular disk->arc->l2arc path and instead come
1737 * into being in the reverse order, i.e. l2arc->arc.
1739 static arc_buf_hdr_t
*
1740 arc_buf_alloc_l2only(size_t size
, arc_buf_contents_t type
, l2arc_dev_t
*dev
,
1741 dva_t dva
, uint64_t daddr
, int32_t psize
, uint64_t birth
,
1742 enum zio_compress compress
, uint8_t complevel
, boolean_t
protected,
1743 boolean_t prefetch
, arc_state_type_t arcs_state
)
1748 hdr
= kmem_cache_alloc(hdr_l2only_cache
, KM_SLEEP
);
1749 hdr
->b_birth
= birth
;
1752 arc_hdr_set_flags(hdr
, arc_bufc_to_flags(type
) | ARC_FLAG_HAS_L2HDR
);
1753 HDR_SET_LSIZE(hdr
, size
);
1754 HDR_SET_PSIZE(hdr
, psize
);
1755 arc_hdr_set_compress(hdr
, compress
);
1756 hdr
->b_complevel
= complevel
;
1758 arc_hdr_set_flags(hdr
, ARC_FLAG_PROTECTED
);
1760 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
1761 hdr
->b_spa
= spa_load_guid(dev
->l2ad_vdev
->vdev_spa
);
1765 hdr
->b_l2hdr
.b_dev
= dev
;
1766 hdr
->b_l2hdr
.b_daddr
= daddr
;
1767 hdr
->b_l2hdr
.b_arcs_state
= arcs_state
;
1773 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
1776 arc_hdr_size(arc_buf_hdr_t
*hdr
)
1780 if (arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
&&
1781 HDR_GET_PSIZE(hdr
) > 0) {
1782 size
= HDR_GET_PSIZE(hdr
);
1784 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, 0);
1785 size
= HDR_GET_LSIZE(hdr
);
1791 arc_hdr_authenticate(arc_buf_hdr_t
*hdr
, spa_t
*spa
, uint64_t dsobj
)
1795 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
1796 uint64_t psize
= HDR_GET_PSIZE(hdr
);
1797 void *tmpbuf
= NULL
;
1798 abd_t
*abd
= hdr
->b_l1hdr
.b_pabd
;
1800 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1801 ASSERT(HDR_AUTHENTICATED(hdr
));
1802 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
1805 * The MAC is calculated on the compressed data that is stored on disk.
1806 * However, if compressed arc is disabled we will only have the
1807 * decompressed data available to us now. Compress it into a temporary
1808 * abd so we can verify the MAC. The performance overhead of this will
1809 * be relatively low, since most objects in an encrypted objset will
1810 * be encrypted (instead of authenticated) anyway.
1812 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
1813 !HDR_COMPRESSION_ENABLED(hdr
)) {
1815 csize
= zio_compress_data(HDR_GET_COMPRESS(hdr
),
1816 hdr
->b_l1hdr
.b_pabd
, &tmpbuf
, lsize
, hdr
->b_complevel
);
1817 ASSERT3P(tmpbuf
, !=, NULL
);
1818 ASSERT3U(csize
, <=, psize
);
1819 abd
= abd_get_from_buf(tmpbuf
, lsize
);
1820 abd_take_ownership_of_buf(abd
, B_TRUE
);
1821 abd_zero_off(abd
, csize
, psize
- csize
);
1825 * Authentication is best effort. We authenticate whenever the key is
1826 * available. If we succeed we clear ARC_FLAG_NOAUTH.
1828 if (hdr
->b_crypt_hdr
.b_ot
== DMU_OT_OBJSET
) {
1829 ASSERT3U(HDR_GET_COMPRESS(hdr
), ==, ZIO_COMPRESS_OFF
);
1830 ASSERT3U(lsize
, ==, psize
);
1831 ret
= spa_do_crypt_objset_mac_abd(B_FALSE
, spa
, dsobj
, abd
,
1832 psize
, hdr
->b_l1hdr
.b_byteswap
!= DMU_BSWAP_NUMFUNCS
);
1834 ret
= spa_do_crypt_mac_abd(B_FALSE
, spa
, dsobj
, abd
, psize
,
1835 hdr
->b_crypt_hdr
.b_mac
);
1839 arc_hdr_clear_flags(hdr
, ARC_FLAG_NOAUTH
);
1840 else if (ret
!= ENOENT
)
1856 * This function will take a header that only has raw encrypted data in
1857 * b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
1858 * b_l1hdr.b_pabd. If designated in the header flags, this function will
1859 * also decompress the data.
1862 arc_hdr_decrypt(arc_buf_hdr_t
*hdr
, spa_t
*spa
, const zbookmark_phys_t
*zb
)
1867 boolean_t no_crypt
= B_FALSE
;
1868 boolean_t bswap
= (hdr
->b_l1hdr
.b_byteswap
!= DMU_BSWAP_NUMFUNCS
);
1870 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1871 ASSERT(HDR_ENCRYPTED(hdr
));
1873 arc_hdr_alloc_abd(hdr
, 0);
1875 ret
= spa_do_crypt_abd(B_FALSE
, spa
, zb
, hdr
->b_crypt_hdr
.b_ot
,
1876 B_FALSE
, bswap
, hdr
->b_crypt_hdr
.b_salt
, hdr
->b_crypt_hdr
.b_iv
,
1877 hdr
->b_crypt_hdr
.b_mac
, HDR_GET_PSIZE(hdr
), hdr
->b_l1hdr
.b_pabd
,
1878 hdr
->b_crypt_hdr
.b_rabd
, &no_crypt
);
1883 abd_copy(hdr
->b_l1hdr
.b_pabd
, hdr
->b_crypt_hdr
.b_rabd
,
1884 HDR_GET_PSIZE(hdr
));
1888 * If this header has disabled arc compression but the b_pabd is
1889 * compressed after decrypting it, we need to decompress the newly
1892 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
1893 !HDR_COMPRESSION_ENABLED(hdr
)) {
1895 * We want to make sure that we are correctly honoring the
1896 * zfs_abd_scatter_enabled setting, so we allocate an abd here
1897 * and then loan a buffer from it, rather than allocating a
1898 * linear buffer and wrapping it in an abd later.
1900 cabd
= arc_get_data_abd(hdr
, arc_hdr_size(hdr
), hdr
, 0);
1901 tmp
= abd_borrow_buf(cabd
, arc_hdr_size(hdr
));
1903 ret
= zio_decompress_data(HDR_GET_COMPRESS(hdr
),
1904 hdr
->b_l1hdr
.b_pabd
, tmp
, HDR_GET_PSIZE(hdr
),
1905 HDR_GET_LSIZE(hdr
), &hdr
->b_complevel
);
1907 abd_return_buf(cabd
, tmp
, arc_hdr_size(hdr
));
1911 abd_return_buf_copy(cabd
, tmp
, arc_hdr_size(hdr
));
1912 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
,
1913 arc_hdr_size(hdr
), hdr
);
1914 hdr
->b_l1hdr
.b_pabd
= cabd
;
1920 arc_hdr_free_abd(hdr
, B_FALSE
);
1922 arc_free_data_buf(hdr
, cabd
, arc_hdr_size(hdr
), hdr
);
1928 * This function is called during arc_buf_fill() to prepare the header's
1929 * abd plaintext pointer for use. This involves authenticated protected
1930 * data and decrypting encrypted data into the plaintext abd.
1933 arc_fill_hdr_crypt(arc_buf_hdr_t
*hdr
, kmutex_t
*hash_lock
, spa_t
*spa
,
1934 const zbookmark_phys_t
*zb
, boolean_t noauth
)
1938 ASSERT(HDR_PROTECTED(hdr
));
1940 if (hash_lock
!= NULL
)
1941 mutex_enter(hash_lock
);
1943 if (HDR_NOAUTH(hdr
) && !noauth
) {
1945 * The caller requested authenticated data but our data has
1946 * not been authenticated yet. Verify the MAC now if we can.
1948 ret
= arc_hdr_authenticate(hdr
, spa
, zb
->zb_objset
);
1951 } else if (HDR_HAS_RABD(hdr
) && hdr
->b_l1hdr
.b_pabd
== NULL
) {
1953 * If we only have the encrypted version of the data, but the
1954 * unencrypted version was requested we take this opportunity
1955 * to store the decrypted version in the header for future use.
1957 ret
= arc_hdr_decrypt(hdr
, spa
, zb
);
1962 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
1964 if (hash_lock
!= NULL
)
1965 mutex_exit(hash_lock
);
1970 if (hash_lock
!= NULL
)
1971 mutex_exit(hash_lock
);
1977 * This function is used by the dbuf code to decrypt bonus buffers in place.
1978 * The dbuf code itself doesn't have any locking for decrypting a shared dnode
1979 * block, so we use the hash lock here to protect against concurrent calls to
1983 arc_buf_untransform_in_place(arc_buf_t
*buf
)
1985 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
1987 ASSERT(HDR_ENCRYPTED(hdr
));
1988 ASSERT3U(hdr
->b_crypt_hdr
.b_ot
, ==, DMU_OT_DNODE
);
1989 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
1990 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
1992 zio_crypt_copy_dnode_bonus(hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
1994 buf
->b_flags
&= ~ARC_BUF_FLAG_ENCRYPTED
;
1995 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
1999 * Given a buf that has a data buffer attached to it, this function will
2000 * efficiently fill the buf with data of the specified compression setting from
2001 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
2002 * are already sharing a data buf, no copy is performed.
2004 * If the buf is marked as compressed but uncompressed data was requested, this
2005 * will allocate a new data buffer for the buf, remove that flag, and fill the
2006 * buf with uncompressed data. You can't request a compressed buf on a hdr with
2007 * uncompressed data, and (since we haven't added support for it yet) if you
2008 * want compressed data your buf must already be marked as compressed and have
2009 * the correct-sized data buffer.
2012 arc_buf_fill(arc_buf_t
*buf
, spa_t
*spa
, const zbookmark_phys_t
*zb
,
2013 arc_fill_flags_t flags
)
2016 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2017 boolean_t hdr_compressed
=
2018 (arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
);
2019 boolean_t compressed
= (flags
& ARC_FILL_COMPRESSED
) != 0;
2020 boolean_t encrypted
= (flags
& ARC_FILL_ENCRYPTED
) != 0;
2021 dmu_object_byteswap_t bswap
= hdr
->b_l1hdr
.b_byteswap
;
2022 kmutex_t
*hash_lock
= (flags
& ARC_FILL_LOCKED
) ? NULL
: HDR_LOCK(hdr
);
2024 ASSERT3P(buf
->b_data
, !=, NULL
);
2025 IMPLY(compressed
, hdr_compressed
|| ARC_BUF_ENCRYPTED(buf
));
2026 IMPLY(compressed
, ARC_BUF_COMPRESSED(buf
));
2027 IMPLY(encrypted
, HDR_ENCRYPTED(hdr
));
2028 IMPLY(encrypted
, ARC_BUF_ENCRYPTED(buf
));
2029 IMPLY(encrypted
, ARC_BUF_COMPRESSED(buf
));
2030 IMPLY(encrypted
, !ARC_BUF_SHARED(buf
));
2033 * If the caller wanted encrypted data we just need to copy it from
2034 * b_rabd and potentially byteswap it. We won't be able to do any
2035 * further transforms on it.
2038 ASSERT(HDR_HAS_RABD(hdr
));
2039 abd_copy_to_buf(buf
->b_data
, hdr
->b_crypt_hdr
.b_rabd
,
2040 HDR_GET_PSIZE(hdr
));
2045 * Adjust encrypted and authenticated headers to accommodate
2046 * the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
2047 * allowed to fail decryption due to keys not being loaded
2048 * without being marked as an IO error.
2050 if (HDR_PROTECTED(hdr
)) {
2051 error
= arc_fill_hdr_crypt(hdr
, hash_lock
, spa
,
2052 zb
, !!(flags
& ARC_FILL_NOAUTH
));
2053 if (error
== EACCES
&& (flags
& ARC_FILL_IN_PLACE
) != 0) {
2055 } else if (error
!= 0) {
2056 if (hash_lock
!= NULL
)
2057 mutex_enter(hash_lock
);
2058 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_ERROR
);
2059 if (hash_lock
!= NULL
)
2060 mutex_exit(hash_lock
);
2066 * There is a special case here for dnode blocks which are
2067 * decrypting their bonus buffers. These blocks may request to
2068 * be decrypted in-place. This is necessary because there may
2069 * be many dnodes pointing into this buffer and there is
2070 * currently no method to synchronize replacing the backing
2071 * b_data buffer and updating all of the pointers. Here we use
2072 * the hash lock to ensure there are no races. If the need
2073 * arises for other types to be decrypted in-place, they must
2074 * add handling here as well.
2076 if ((flags
& ARC_FILL_IN_PLACE
) != 0) {
2077 ASSERT(!hdr_compressed
);
2078 ASSERT(!compressed
);
2081 if (HDR_ENCRYPTED(hdr
) && ARC_BUF_ENCRYPTED(buf
)) {
2082 ASSERT3U(hdr
->b_crypt_hdr
.b_ot
, ==, DMU_OT_DNODE
);
2084 if (hash_lock
!= NULL
)
2085 mutex_enter(hash_lock
);
2086 arc_buf_untransform_in_place(buf
);
2087 if (hash_lock
!= NULL
)
2088 mutex_exit(hash_lock
);
2090 /* Compute the hdr's checksum if necessary */
2091 arc_cksum_compute(buf
);
2097 if (hdr_compressed
== compressed
) {
2098 if (!arc_buf_is_shared(buf
)) {
2099 abd_copy_to_buf(buf
->b_data
, hdr
->b_l1hdr
.b_pabd
,
2103 ASSERT(hdr_compressed
);
2104 ASSERT(!compressed
);
2107 * If the buf is sharing its data with the hdr, unlink it and
2108 * allocate a new data buffer for the buf.
2110 if (arc_buf_is_shared(buf
)) {
2111 ASSERT(ARC_BUF_COMPRESSED(buf
));
2113 /* We need to give the buf its own b_data */
2114 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
2116 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
2117 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2119 /* Previously overhead was 0; just add new overhead */
2120 ARCSTAT_INCR(arcstat_overhead_size
, HDR_GET_LSIZE(hdr
));
2121 } else if (ARC_BUF_COMPRESSED(buf
)) {
2122 /* We need to reallocate the buf's b_data */
2123 arc_free_data_buf(hdr
, buf
->b_data
, HDR_GET_PSIZE(hdr
),
2126 arc_get_data_buf(hdr
, HDR_GET_LSIZE(hdr
), buf
);
2128 /* We increased the size of b_data; update overhead */
2129 ARCSTAT_INCR(arcstat_overhead_size
,
2130 HDR_GET_LSIZE(hdr
) - HDR_GET_PSIZE(hdr
));
2134 * Regardless of the buf's previous compression settings, it
2135 * should not be compressed at the end of this function.
2137 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
2140 * Try copying the data from another buf which already has a
2141 * decompressed version. If that's not possible, it's time to
2142 * bite the bullet and decompress the data from the hdr.
2144 if (arc_buf_try_copy_decompressed_data(buf
)) {
2145 /* Skip byteswapping and checksumming (already done) */
2148 error
= zio_decompress_data(HDR_GET_COMPRESS(hdr
),
2149 hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
2150 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
),
2154 * Absent hardware errors or software bugs, this should
2155 * be impossible, but log it anyway so we can debug it.
2159 "hdr %px, compress %d, psize %d, lsize %d",
2160 hdr
, arc_hdr_get_compress(hdr
),
2161 HDR_GET_PSIZE(hdr
), HDR_GET_LSIZE(hdr
));
2162 if (hash_lock
!= NULL
)
2163 mutex_enter(hash_lock
);
2164 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_ERROR
);
2165 if (hash_lock
!= NULL
)
2166 mutex_exit(hash_lock
);
2167 return (SET_ERROR(EIO
));
2173 /* Byteswap the buf's data if necessary */
2174 if (bswap
!= DMU_BSWAP_NUMFUNCS
) {
2175 ASSERT(!HDR_SHARED_DATA(hdr
));
2176 ASSERT3U(bswap
, <, DMU_BSWAP_NUMFUNCS
);
2177 dmu_ot_byteswap
[bswap
].ob_func(buf
->b_data
, HDR_GET_LSIZE(hdr
));
2180 /* Compute the hdr's checksum if necessary */
2181 arc_cksum_compute(buf
);
2187 * If this function is being called to decrypt an encrypted buffer or verify an
2188 * authenticated one, the key must be loaded and a mapping must be made
2189 * available in the keystore via spa_keystore_create_mapping() or one of its
2193 arc_untransform(arc_buf_t
*buf
, spa_t
*spa
, const zbookmark_phys_t
*zb
,
2197 arc_fill_flags_t flags
= 0;
2200 flags
|= ARC_FILL_IN_PLACE
;
2202 ret
= arc_buf_fill(buf
, spa
, zb
, flags
);
2203 if (ret
== ECKSUM
) {
2205 * Convert authentication and decryption errors to EIO
2206 * (and generate an ereport) before leaving the ARC.
2208 ret
= SET_ERROR(EIO
);
2209 spa_log_error(spa
, zb
, &buf
->b_hdr
->b_birth
);
2210 (void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION
,
2211 spa
, NULL
, zb
, NULL
, 0);
2218 * Increment the amount of evictable space in the arc_state_t's refcount.
2219 * We account for the space used by the hdr and the arc buf individually
2220 * so that we can add and remove them from the refcount individually.
2223 arc_evictable_space_increment(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2225 arc_buf_contents_t type
= arc_buf_type(hdr
);
2227 ASSERT(HDR_HAS_L1HDR(hdr
));
2229 if (GHOST_STATE(state
)) {
2230 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2231 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2232 ASSERT(!HDR_HAS_RABD(hdr
));
2233 (void) zfs_refcount_add_many(&state
->arcs_esize
[type
],
2234 HDR_GET_LSIZE(hdr
), hdr
);
2238 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2239 (void) zfs_refcount_add_many(&state
->arcs_esize
[type
],
2240 arc_hdr_size(hdr
), hdr
);
2242 if (HDR_HAS_RABD(hdr
)) {
2243 (void) zfs_refcount_add_many(&state
->arcs_esize
[type
],
2244 HDR_GET_PSIZE(hdr
), hdr
);
2247 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2248 buf
= buf
->b_next
) {
2249 if (arc_buf_is_shared(buf
))
2251 (void) zfs_refcount_add_many(&state
->arcs_esize
[type
],
2252 arc_buf_size(buf
), buf
);
2257 * Decrement the amount of evictable space in the arc_state_t's refcount.
2258 * We account for the space used by the hdr and the arc buf individually
2259 * so that we can add and remove them from the refcount individually.
2262 arc_evictable_space_decrement(arc_buf_hdr_t
*hdr
, arc_state_t
*state
)
2264 arc_buf_contents_t type
= arc_buf_type(hdr
);
2266 ASSERT(HDR_HAS_L1HDR(hdr
));
2268 if (GHOST_STATE(state
)) {
2269 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2270 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2271 ASSERT(!HDR_HAS_RABD(hdr
));
2272 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
2273 HDR_GET_LSIZE(hdr
), hdr
);
2277 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2278 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
2279 arc_hdr_size(hdr
), hdr
);
2281 if (HDR_HAS_RABD(hdr
)) {
2282 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
2283 HDR_GET_PSIZE(hdr
), hdr
);
2286 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2287 buf
= buf
->b_next
) {
2288 if (arc_buf_is_shared(buf
))
2290 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
2291 arc_buf_size(buf
), buf
);
2296 * Add a reference to this hdr indicating that someone is actively
2297 * referencing that memory. When the refcount transitions from 0 to 1,
2298 * we remove it from the respective arc_state_t list to indicate that
2299 * it is not evictable.
2302 add_reference(arc_buf_hdr_t
*hdr
, const void *tag
)
2304 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2306 ASSERT(HDR_HAS_L1HDR(hdr
));
2307 if (!HDR_EMPTY(hdr
) && !MUTEX_HELD(HDR_LOCK(hdr
))) {
2308 ASSERT(state
== arc_anon
);
2309 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2310 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
2313 if ((zfs_refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
) == 1) &&
2314 state
!= arc_anon
&& state
!= arc_l2c_only
) {
2315 /* We don't use the L2-only state list. */
2316 multilist_remove(&state
->arcs_list
[arc_buf_type(hdr
)], hdr
);
2317 arc_evictable_space_decrement(hdr
, state
);
2322 * Remove a reference from this hdr. When the reference transitions from
2323 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
2324 * list making it eligible for eviction.
2327 remove_reference(arc_buf_hdr_t
*hdr
, const void *tag
)
2330 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2332 ASSERT(HDR_HAS_L1HDR(hdr
));
2333 ASSERT(state
== arc_anon
|| MUTEX_HELD(HDR_LOCK(hdr
)));
2334 ASSERT(!GHOST_STATE(state
)); /* arc_l2c_only counts as a ghost. */
2336 if ((cnt
= zfs_refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
)) != 0)
2339 if (state
== arc_anon
) {
2340 arc_hdr_destroy(hdr
);
2343 if (state
== arc_uncached
&& !HDR_PREFETCH(hdr
)) {
2344 arc_change_state(arc_anon
, hdr
);
2345 arc_hdr_destroy(hdr
);
2348 multilist_insert(&state
->arcs_list
[arc_buf_type(hdr
)], hdr
);
2349 arc_evictable_space_increment(hdr
, state
);
2354 * Returns detailed information about a specific arc buffer. When the
2355 * state_index argument is set the function will calculate the arc header
2356 * list position for its arc state. Since this requires a linear traversal
2357 * callers are strongly encourage not to do this. However, it can be helpful
2358 * for targeted analysis so the functionality is provided.
2361 arc_buf_info(arc_buf_t
*ab
, arc_buf_info_t
*abi
, int state_index
)
2364 arc_buf_hdr_t
*hdr
= ab
->b_hdr
;
2365 l1arc_buf_hdr_t
*l1hdr
= NULL
;
2366 l2arc_buf_hdr_t
*l2hdr
= NULL
;
2367 arc_state_t
*state
= NULL
;
2369 memset(abi
, 0, sizeof (arc_buf_info_t
));
2374 abi
->abi_flags
= hdr
->b_flags
;
2376 if (HDR_HAS_L1HDR(hdr
)) {
2377 l1hdr
= &hdr
->b_l1hdr
;
2378 state
= l1hdr
->b_state
;
2380 if (HDR_HAS_L2HDR(hdr
))
2381 l2hdr
= &hdr
->b_l2hdr
;
2384 abi
->abi_bufcnt
= 0;
2385 for (arc_buf_t
*buf
= l1hdr
->b_buf
; buf
; buf
= buf
->b_next
)
2387 abi
->abi_access
= l1hdr
->b_arc_access
;
2388 abi
->abi_mru_hits
= l1hdr
->b_mru_hits
;
2389 abi
->abi_mru_ghost_hits
= l1hdr
->b_mru_ghost_hits
;
2390 abi
->abi_mfu_hits
= l1hdr
->b_mfu_hits
;
2391 abi
->abi_mfu_ghost_hits
= l1hdr
->b_mfu_ghost_hits
;
2392 abi
->abi_holds
= zfs_refcount_count(&l1hdr
->b_refcnt
);
2396 abi
->abi_l2arc_dattr
= l2hdr
->b_daddr
;
2397 abi
->abi_l2arc_hits
= l2hdr
->b_hits
;
2400 abi
->abi_state_type
= state
? state
->arcs_state
: ARC_STATE_ANON
;
2401 abi
->abi_state_contents
= arc_buf_type(hdr
);
2402 abi
->abi_size
= arc_hdr_size(hdr
);
2406 * Move the supplied buffer to the indicated state. The hash lock
2407 * for the buffer must be held by the caller.
2410 arc_change_state(arc_state_t
*new_state
, arc_buf_hdr_t
*hdr
)
2412 arc_state_t
*old_state
;
2414 boolean_t update_old
, update_new
;
2415 arc_buf_contents_t type
= arc_buf_type(hdr
);
2418 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
2419 * in arc_read() when bringing a buffer out of the L2ARC. However, the
2420 * L1 hdr doesn't always exist when we change state to arc_anon before
2421 * destroying a header, in which case reallocating to add the L1 hdr is
2424 if (HDR_HAS_L1HDR(hdr
)) {
2425 old_state
= hdr
->b_l1hdr
.b_state
;
2426 refcnt
= zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
);
2427 update_old
= (hdr
->b_l1hdr
.b_buf
!= NULL
||
2428 hdr
->b_l1hdr
.b_pabd
!= NULL
|| HDR_HAS_RABD(hdr
));
2430 IMPLY(GHOST_STATE(old_state
), hdr
->b_l1hdr
.b_buf
== NULL
);
2431 IMPLY(GHOST_STATE(new_state
), hdr
->b_l1hdr
.b_buf
== NULL
);
2432 IMPLY(old_state
== arc_anon
, hdr
->b_l1hdr
.b_buf
== NULL
||
2433 ARC_BUF_LAST(hdr
->b_l1hdr
.b_buf
));
2435 old_state
= arc_l2c_only
;
2437 update_old
= B_FALSE
;
2439 update_new
= update_old
;
2440 if (GHOST_STATE(old_state
))
2441 update_old
= B_TRUE
;
2442 if (GHOST_STATE(new_state
))
2443 update_new
= B_TRUE
;
2445 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
2446 ASSERT3P(new_state
, !=, old_state
);
2449 * If this buffer is evictable, transfer it from the
2450 * old state list to the new state list.
2453 if (old_state
!= arc_anon
&& old_state
!= arc_l2c_only
) {
2454 ASSERT(HDR_HAS_L1HDR(hdr
));
2455 /* remove_reference() saves on insert. */
2456 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
2457 multilist_remove(&old_state
->arcs_list
[type
],
2459 arc_evictable_space_decrement(hdr
, old_state
);
2462 if (new_state
!= arc_anon
&& new_state
!= arc_l2c_only
) {
2464 * An L1 header always exists here, since if we're
2465 * moving to some L1-cached state (i.e. not l2c_only or
2466 * anonymous), we realloc the header to add an L1hdr
2469 ASSERT(HDR_HAS_L1HDR(hdr
));
2470 multilist_insert(&new_state
->arcs_list
[type
], hdr
);
2471 arc_evictable_space_increment(hdr
, new_state
);
2475 ASSERT(!HDR_EMPTY(hdr
));
2476 if (new_state
== arc_anon
&& HDR_IN_HASH_TABLE(hdr
))
2477 buf_hash_remove(hdr
);
2479 /* adjust state sizes (ignore arc_l2c_only) */
2481 if (update_new
&& new_state
!= arc_l2c_only
) {
2482 ASSERT(HDR_HAS_L1HDR(hdr
));
2483 if (GHOST_STATE(new_state
)) {
2486 * When moving a header to a ghost state, we first
2487 * remove all arc buffers. Thus, we'll have no arc
2488 * buffer to use for the reference. As a result, we
2489 * use the arc header pointer for the reference.
2491 (void) zfs_refcount_add_many(
2492 &new_state
->arcs_size
[type
],
2493 HDR_GET_LSIZE(hdr
), hdr
);
2494 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2495 ASSERT(!HDR_HAS_RABD(hdr
));
2499 * Each individual buffer holds a unique reference,
2500 * thus we must remove each of these references one
2503 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2504 buf
= buf
->b_next
) {
2507 * When the arc_buf_t is sharing the data
2508 * block with the hdr, the owner of the
2509 * reference belongs to the hdr. Only
2510 * add to the refcount if the arc_buf_t is
2513 if (arc_buf_is_shared(buf
))
2516 (void) zfs_refcount_add_many(
2517 &new_state
->arcs_size
[type
],
2518 arc_buf_size(buf
), buf
);
2521 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2522 (void) zfs_refcount_add_many(
2523 &new_state
->arcs_size
[type
],
2524 arc_hdr_size(hdr
), hdr
);
2527 if (HDR_HAS_RABD(hdr
)) {
2528 (void) zfs_refcount_add_many(
2529 &new_state
->arcs_size
[type
],
2530 HDR_GET_PSIZE(hdr
), hdr
);
2535 if (update_old
&& old_state
!= arc_l2c_only
) {
2536 ASSERT(HDR_HAS_L1HDR(hdr
));
2537 if (GHOST_STATE(old_state
)) {
2538 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2539 ASSERT(!HDR_HAS_RABD(hdr
));
2542 * When moving a header off of a ghost state,
2543 * the header will not contain any arc buffers.
2544 * We use the arc header pointer for the reference
2545 * which is exactly what we did when we put the
2546 * header on the ghost state.
2549 (void) zfs_refcount_remove_many(
2550 &old_state
->arcs_size
[type
],
2551 HDR_GET_LSIZE(hdr
), hdr
);
2555 * Each individual buffer holds a unique reference,
2556 * thus we must remove each of these references one
2559 for (arc_buf_t
*buf
= hdr
->b_l1hdr
.b_buf
; buf
!= NULL
;
2560 buf
= buf
->b_next
) {
2563 * When the arc_buf_t is sharing the data
2564 * block with the hdr, the owner of the
2565 * reference belongs to the hdr. Only
2566 * add to the refcount if the arc_buf_t is
2569 if (arc_buf_is_shared(buf
))
2572 (void) zfs_refcount_remove_many(
2573 &old_state
->arcs_size
[type
],
2574 arc_buf_size(buf
), buf
);
2576 ASSERT(hdr
->b_l1hdr
.b_pabd
!= NULL
||
2579 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
2580 (void) zfs_refcount_remove_many(
2581 &old_state
->arcs_size
[type
],
2582 arc_hdr_size(hdr
), hdr
);
2585 if (HDR_HAS_RABD(hdr
)) {
2586 (void) zfs_refcount_remove_many(
2587 &old_state
->arcs_size
[type
],
2588 HDR_GET_PSIZE(hdr
), hdr
);
2593 if (HDR_HAS_L1HDR(hdr
)) {
2594 hdr
->b_l1hdr
.b_state
= new_state
;
2596 if (HDR_HAS_L2HDR(hdr
) && new_state
!= arc_l2c_only
) {
2597 l2arc_hdr_arcstats_decrement_state(hdr
);
2598 hdr
->b_l2hdr
.b_arcs_state
= new_state
->arcs_state
;
2599 l2arc_hdr_arcstats_increment_state(hdr
);
2605 arc_space_consume(uint64_t space
, arc_space_type_t type
)
2607 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2612 case ARC_SPACE_DATA
:
2613 ARCSTAT_INCR(arcstat_data_size
, space
);
2615 case ARC_SPACE_META
:
2616 ARCSTAT_INCR(arcstat_metadata_size
, space
);
2618 case ARC_SPACE_BONUS
:
2619 ARCSTAT_INCR(arcstat_bonus_size
, space
);
2621 case ARC_SPACE_DNODE
:
2622 ARCSTAT_INCR(arcstat_dnode_size
, space
);
2624 case ARC_SPACE_DBUF
:
2625 ARCSTAT_INCR(arcstat_dbuf_size
, space
);
2627 case ARC_SPACE_HDRS
:
2628 ARCSTAT_INCR(arcstat_hdr_size
, space
);
2630 case ARC_SPACE_L2HDRS
:
2631 aggsum_add(&arc_sums
.arcstat_l2_hdr_size
, space
);
2633 case ARC_SPACE_ABD_CHUNK_WASTE
:
2635 * Note: this includes space wasted by all scatter ABD's, not
2636 * just those allocated by the ARC. But the vast majority of
2637 * scatter ABD's come from the ARC, because other users are
2640 ARCSTAT_INCR(arcstat_abd_chunk_waste_size
, space
);
2644 if (type
!= ARC_SPACE_DATA
&& type
!= ARC_SPACE_ABD_CHUNK_WASTE
)
2645 ARCSTAT_INCR(arcstat_meta_used
, space
);
2647 aggsum_add(&arc_sums
.arcstat_size
, space
);
2651 arc_space_return(uint64_t space
, arc_space_type_t type
)
2653 ASSERT(type
>= 0 && type
< ARC_SPACE_NUMTYPES
);
2658 case ARC_SPACE_DATA
:
2659 ARCSTAT_INCR(arcstat_data_size
, -space
);
2661 case ARC_SPACE_META
:
2662 ARCSTAT_INCR(arcstat_metadata_size
, -space
);
2664 case ARC_SPACE_BONUS
:
2665 ARCSTAT_INCR(arcstat_bonus_size
, -space
);
2667 case ARC_SPACE_DNODE
:
2668 ARCSTAT_INCR(arcstat_dnode_size
, -space
);
2670 case ARC_SPACE_DBUF
:
2671 ARCSTAT_INCR(arcstat_dbuf_size
, -space
);
2673 case ARC_SPACE_HDRS
:
2674 ARCSTAT_INCR(arcstat_hdr_size
, -space
);
2676 case ARC_SPACE_L2HDRS
:
2677 aggsum_add(&arc_sums
.arcstat_l2_hdr_size
, -space
);
2679 case ARC_SPACE_ABD_CHUNK_WASTE
:
2680 ARCSTAT_INCR(arcstat_abd_chunk_waste_size
, -space
);
2684 if (type
!= ARC_SPACE_DATA
&& type
!= ARC_SPACE_ABD_CHUNK_WASTE
)
2685 ARCSTAT_INCR(arcstat_meta_used
, -space
);
2687 ASSERT(aggsum_compare(&arc_sums
.arcstat_size
, space
) >= 0);
2688 aggsum_add(&arc_sums
.arcstat_size
, -space
);
2692 * Given a hdr and a buf, returns whether that buf can share its b_data buffer
2693 * with the hdr's b_pabd.
2696 arc_can_share(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2699 * The criteria for sharing a hdr's data are:
2700 * 1. the buffer is not encrypted
2701 * 2. the hdr's compression matches the buf's compression
2702 * 3. the hdr doesn't need to be byteswapped
2703 * 4. the hdr isn't already being shared
2704 * 5. the buf is either compressed or it is the last buf in the hdr list
2706 * Criterion #5 maintains the invariant that shared uncompressed
2707 * bufs must be the final buf in the hdr's b_buf list. Reading this, you
2708 * might ask, "if a compressed buf is allocated first, won't that be the
2709 * last thing in the list?", but in that case it's impossible to create
2710 * a shared uncompressed buf anyway (because the hdr must be compressed
2711 * to have the compressed buf). You might also think that #3 is
2712 * sufficient to make this guarantee, however it's possible
2713 * (specifically in the rare L2ARC write race mentioned in
2714 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that
2715 * is shareable, but wasn't at the time of its allocation. Rather than
2716 * allow a new shared uncompressed buf to be created and then shuffle
2717 * the list around to make it the last element, this simply disallows
2718 * sharing if the new buf isn't the first to be added.
2720 ASSERT3P(buf
->b_hdr
, ==, hdr
);
2721 boolean_t hdr_compressed
=
2722 arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
;
2723 boolean_t buf_compressed
= ARC_BUF_COMPRESSED(buf
) != 0;
2724 return (!ARC_BUF_ENCRYPTED(buf
) &&
2725 buf_compressed
== hdr_compressed
&&
2726 hdr
->b_l1hdr
.b_byteswap
== DMU_BSWAP_NUMFUNCS
&&
2727 !HDR_SHARED_DATA(hdr
) &&
2728 (ARC_BUF_LAST(buf
) || ARC_BUF_COMPRESSED(buf
)));
2732 * Allocate a buf for this hdr. If you care about the data that's in the hdr,
2733 * or if you want a compressed buffer, pass those flags in. Returns 0 if the
2734 * copy was made successfully, or an error code otherwise.
2737 arc_buf_alloc_impl(arc_buf_hdr_t
*hdr
, spa_t
*spa
, const zbookmark_phys_t
*zb
,
2738 const void *tag
, boolean_t encrypted
, boolean_t compressed
,
2739 boolean_t noauth
, boolean_t fill
, arc_buf_t
**ret
)
2742 arc_fill_flags_t flags
= ARC_FILL_LOCKED
;
2744 ASSERT(HDR_HAS_L1HDR(hdr
));
2745 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
2746 VERIFY(hdr
->b_type
== ARC_BUFC_DATA
||
2747 hdr
->b_type
== ARC_BUFC_METADATA
);
2748 ASSERT3P(ret
, !=, NULL
);
2749 ASSERT3P(*ret
, ==, NULL
);
2750 IMPLY(encrypted
, compressed
);
2752 buf
= *ret
= kmem_cache_alloc(buf_cache
, KM_PUSHPAGE
);
2755 buf
->b_next
= hdr
->b_l1hdr
.b_buf
;
2758 add_reference(hdr
, tag
);
2761 * We're about to change the hdr's b_flags. We must either
2762 * hold the hash_lock or be undiscoverable.
2764 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
2767 * Only honor requests for compressed bufs if the hdr is actually
2768 * compressed. This must be overridden if the buffer is encrypted since
2769 * encrypted buffers cannot be decompressed.
2772 buf
->b_flags
|= ARC_BUF_FLAG_COMPRESSED
;
2773 buf
->b_flags
|= ARC_BUF_FLAG_ENCRYPTED
;
2774 flags
|= ARC_FILL_COMPRESSED
| ARC_FILL_ENCRYPTED
;
2775 } else if (compressed
&&
2776 arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
) {
2777 buf
->b_flags
|= ARC_BUF_FLAG_COMPRESSED
;
2778 flags
|= ARC_FILL_COMPRESSED
;
2783 flags
|= ARC_FILL_NOAUTH
;
2787 * If the hdr's data can be shared then we share the data buffer and
2788 * set the appropriate bit in the hdr's b_flags to indicate the hdr is
2789 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
2790 * buffer to store the buf's data.
2792 * There are two additional restrictions here because we're sharing
2793 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
2794 * actively involved in an L2ARC write, because if this buf is used by
2795 * an arc_write() then the hdr's data buffer will be released when the
2796 * write completes, even though the L2ARC write might still be using it.
2797 * Second, the hdr's ABD must be linear so that the buf's user doesn't
2798 * need to be ABD-aware. It must be allocated via
2799 * zio_[data_]buf_alloc(), not as a page, because we need to be able
2800 * to abd_release_ownership_of_buf(), which isn't allowed on "linear
2801 * page" buffers because the ABD code needs to handle freeing them
2804 boolean_t can_share
= arc_can_share(hdr
, buf
) &&
2805 !HDR_L2_WRITING(hdr
) &&
2806 hdr
->b_l1hdr
.b_pabd
!= NULL
&&
2807 abd_is_linear(hdr
->b_l1hdr
.b_pabd
) &&
2808 !abd_is_linear_page(hdr
->b_l1hdr
.b_pabd
);
2810 /* Set up b_data and sharing */
2812 buf
->b_data
= abd_to_buf(hdr
->b_l1hdr
.b_pabd
);
2813 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2814 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2817 arc_get_data_buf(hdr
, arc_buf_size(buf
), buf
);
2818 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
2820 VERIFY3P(buf
->b_data
, !=, NULL
);
2822 hdr
->b_l1hdr
.b_buf
= buf
;
2825 * If the user wants the data from the hdr, we need to either copy or
2826 * decompress the data.
2829 ASSERT3P(zb
, !=, NULL
);
2830 return (arc_buf_fill(buf
, spa
, zb
, flags
));
2836 static const char *arc_onloan_tag
= "onloan";
2839 arc_loaned_bytes_update(int64_t delta
)
2841 atomic_add_64(&arc_loaned_bytes
, delta
);
2843 /* assert that it did not wrap around */
2844 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
2848 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
2849 * flight data by arc_tempreserve_space() until they are "returned". Loaned
2850 * buffers must be returned to the arc before they can be used by the DMU or
2854 arc_loan_buf(spa_t
*spa
, boolean_t is_metadata
, int size
)
2856 arc_buf_t
*buf
= arc_alloc_buf(spa
, arc_onloan_tag
,
2857 is_metadata
? ARC_BUFC_METADATA
: ARC_BUFC_DATA
, size
);
2859 arc_loaned_bytes_update(arc_buf_size(buf
));
2865 arc_loan_compressed_buf(spa_t
*spa
, uint64_t psize
, uint64_t lsize
,
2866 enum zio_compress compression_type
, uint8_t complevel
)
2868 arc_buf_t
*buf
= arc_alloc_compressed_buf(spa
, arc_onloan_tag
,
2869 psize
, lsize
, compression_type
, complevel
);
2871 arc_loaned_bytes_update(arc_buf_size(buf
));
2877 arc_loan_raw_buf(spa_t
*spa
, uint64_t dsobj
, boolean_t byteorder
,
2878 const uint8_t *salt
, const uint8_t *iv
, const uint8_t *mac
,
2879 dmu_object_type_t ot
, uint64_t psize
, uint64_t lsize
,
2880 enum zio_compress compression_type
, uint8_t complevel
)
2882 arc_buf_t
*buf
= arc_alloc_raw_buf(spa
, arc_onloan_tag
, dsobj
,
2883 byteorder
, salt
, iv
, mac
, ot
, psize
, lsize
, compression_type
,
2886 atomic_add_64(&arc_loaned_bytes
, psize
);
2892 * Return a loaned arc buffer to the arc.
2895 arc_return_buf(arc_buf_t
*buf
, const void *tag
)
2897 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2899 ASSERT3P(buf
->b_data
, !=, NULL
);
2900 ASSERT(HDR_HAS_L1HDR(hdr
));
2901 (void) zfs_refcount_add(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2902 (void) zfs_refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2904 arc_loaned_bytes_update(-arc_buf_size(buf
));
2907 /* Detach an arc_buf from a dbuf (tag) */
2909 arc_loan_inuse_buf(arc_buf_t
*buf
, const void *tag
)
2911 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
2913 ASSERT3P(buf
->b_data
, !=, NULL
);
2914 ASSERT(HDR_HAS_L1HDR(hdr
));
2915 (void) zfs_refcount_add(&hdr
->b_l1hdr
.b_refcnt
, arc_onloan_tag
);
2916 (void) zfs_refcount_remove(&hdr
->b_l1hdr
.b_refcnt
, tag
);
2918 arc_loaned_bytes_update(arc_buf_size(buf
));
2922 l2arc_free_abd_on_write(abd_t
*abd
, size_t size
, arc_buf_contents_t type
)
2924 l2arc_data_free_t
*df
= kmem_alloc(sizeof (*df
), KM_SLEEP
);
2927 df
->l2df_size
= size
;
2928 df
->l2df_type
= type
;
2929 mutex_enter(&l2arc_free_on_write_mtx
);
2930 list_insert_head(l2arc_free_on_write
, df
);
2931 mutex_exit(&l2arc_free_on_write_mtx
);
2935 arc_hdr_free_on_write(arc_buf_hdr_t
*hdr
, boolean_t free_rdata
)
2937 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
2938 arc_buf_contents_t type
= arc_buf_type(hdr
);
2939 uint64_t size
= (free_rdata
) ? HDR_GET_PSIZE(hdr
) : arc_hdr_size(hdr
);
2941 /* protected by hash lock, if in the hash table */
2942 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
2943 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
2944 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
2946 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
2949 (void) zfs_refcount_remove_many(&state
->arcs_size
[type
], size
, hdr
);
2950 if (type
== ARC_BUFC_METADATA
) {
2951 arc_space_return(size
, ARC_SPACE_META
);
2953 ASSERT(type
== ARC_BUFC_DATA
);
2954 arc_space_return(size
, ARC_SPACE_DATA
);
2958 l2arc_free_abd_on_write(hdr
->b_crypt_hdr
.b_rabd
, size
, type
);
2960 l2arc_free_abd_on_write(hdr
->b_l1hdr
.b_pabd
, size
, type
);
2965 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the
2966 * data buffer, we transfer the refcount ownership to the hdr and update
2967 * the appropriate kstats.
2970 arc_share_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
2972 ASSERT(arc_can_share(hdr
, buf
));
2973 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
2974 ASSERT(!ARC_BUF_ENCRYPTED(buf
));
2975 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
2978 * Start sharing the data buffer. We transfer the
2979 * refcount ownership to the hdr since it always owns
2980 * the refcount whenever an arc_buf_t is shared.
2982 zfs_refcount_transfer_ownership_many(
2983 &hdr
->b_l1hdr
.b_state
->arcs_size
[arc_buf_type(hdr
)],
2984 arc_hdr_size(hdr
), buf
, hdr
);
2985 hdr
->b_l1hdr
.b_pabd
= abd_get_from_buf(buf
->b_data
, arc_buf_size(buf
));
2986 abd_take_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
,
2987 HDR_ISTYPE_METADATA(hdr
));
2988 arc_hdr_set_flags(hdr
, ARC_FLAG_SHARED_DATA
);
2989 buf
->b_flags
|= ARC_BUF_FLAG_SHARED
;
2992 * Since we've transferred ownership to the hdr we need
2993 * to increment its compressed and uncompressed kstats and
2994 * decrement the overhead size.
2996 ARCSTAT_INCR(arcstat_compressed_size
, arc_hdr_size(hdr
));
2997 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
2998 ARCSTAT_INCR(arcstat_overhead_size
, -arc_buf_size(buf
));
3002 arc_unshare_buf(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
3004 ASSERT(arc_buf_is_shared(buf
));
3005 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
3006 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
3009 * We are no longer sharing this buffer so we need
3010 * to transfer its ownership to the rightful owner.
3012 zfs_refcount_transfer_ownership_many(
3013 &hdr
->b_l1hdr
.b_state
->arcs_size
[arc_buf_type(hdr
)],
3014 arc_hdr_size(hdr
), hdr
, buf
);
3015 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
3016 abd_release_ownership_of_buf(hdr
->b_l1hdr
.b_pabd
);
3017 abd_free(hdr
->b_l1hdr
.b_pabd
);
3018 hdr
->b_l1hdr
.b_pabd
= NULL
;
3019 buf
->b_flags
&= ~ARC_BUF_FLAG_SHARED
;
3022 * Since the buffer is no longer shared between
3023 * the arc buf and the hdr, count it as overhead.
3025 ARCSTAT_INCR(arcstat_compressed_size
, -arc_hdr_size(hdr
));
3026 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
3027 ARCSTAT_INCR(arcstat_overhead_size
, arc_buf_size(buf
));
3031 * Remove an arc_buf_t from the hdr's buf list and return the last
3032 * arc_buf_t on the list. If no buffers remain on the list then return
3036 arc_buf_remove(arc_buf_hdr_t
*hdr
, arc_buf_t
*buf
)
3038 ASSERT(HDR_HAS_L1HDR(hdr
));
3039 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
3041 arc_buf_t
**bufp
= &hdr
->b_l1hdr
.b_buf
;
3042 arc_buf_t
*lastbuf
= NULL
;
3045 * Remove the buf from the hdr list and locate the last
3046 * remaining buffer on the list.
3048 while (*bufp
!= NULL
) {
3050 *bufp
= buf
->b_next
;
3053 * If we've removed a buffer in the middle of
3054 * the list then update the lastbuf and update
3057 if (*bufp
!= NULL
) {
3059 bufp
= &(*bufp
)->b_next
;
3063 ASSERT3P(lastbuf
, !=, buf
);
3064 IMPLY(lastbuf
!= NULL
, ARC_BUF_LAST(lastbuf
));
3070 * Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
3074 arc_buf_destroy_impl(arc_buf_t
*buf
)
3076 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3079 * Free up the data associated with the buf but only if we're not
3080 * sharing this with the hdr. If we are sharing it with the hdr, the
3081 * hdr is responsible for doing the free.
3083 if (buf
->b_data
!= NULL
) {
3085 * We're about to change the hdr's b_flags. We must either
3086 * hold the hash_lock or be undiscoverable.
3088 ASSERT(HDR_EMPTY_OR_LOCKED(hdr
));
3090 arc_cksum_verify(buf
);
3091 arc_buf_unwatch(buf
);
3093 if (arc_buf_is_shared(buf
)) {
3094 arc_hdr_clear_flags(hdr
, ARC_FLAG_SHARED_DATA
);
3096 uint64_t size
= arc_buf_size(buf
);
3097 arc_free_data_buf(hdr
, buf
->b_data
, size
, buf
);
3098 ARCSTAT_INCR(arcstat_overhead_size
, -size
);
3103 * If we have no more encrypted buffers and we've already
3104 * gotten a copy of the decrypted data we can free b_rabd
3105 * to save some space.
3107 if (ARC_BUF_ENCRYPTED(buf
) && HDR_HAS_RABD(hdr
) &&
3108 hdr
->b_l1hdr
.b_pabd
!= NULL
&& !HDR_IO_IN_PROGRESS(hdr
)) {
3110 for (b
= hdr
->b_l1hdr
.b_buf
; b
; b
= b
->b_next
) {
3111 if (b
!= buf
&& ARC_BUF_ENCRYPTED(b
))
3115 arc_hdr_free_abd(hdr
, B_TRUE
);
3119 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
3121 if (ARC_BUF_SHARED(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
3123 * If the current arc_buf_t is sharing its data buffer with the
3124 * hdr, then reassign the hdr's b_pabd to share it with the new
3125 * buffer at the end of the list. The shared buffer is always
3126 * the last one on the hdr's buffer list.
3128 * There is an equivalent case for compressed bufs, but since
3129 * they aren't guaranteed to be the last buf in the list and
3130 * that is an exceedingly rare case, we just allow that space be
3131 * wasted temporarily. We must also be careful not to share
3132 * encrypted buffers, since they cannot be shared.
3134 if (lastbuf
!= NULL
&& !ARC_BUF_ENCRYPTED(lastbuf
)) {
3135 /* Only one buf can be shared at once */
3136 VERIFY(!arc_buf_is_shared(lastbuf
));
3137 /* hdr is uncompressed so can't have compressed buf */
3138 VERIFY(!ARC_BUF_COMPRESSED(lastbuf
));
3140 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
3141 arc_hdr_free_abd(hdr
, B_FALSE
);
3144 * We must setup a new shared block between the
3145 * last buffer and the hdr. The data would have
3146 * been allocated by the arc buf so we need to transfer
3147 * ownership to the hdr since it's now being shared.
3149 arc_share_buf(hdr
, lastbuf
);
3151 } else if (HDR_SHARED_DATA(hdr
)) {
3153 * Uncompressed shared buffers are always at the end
3154 * of the list. Compressed buffers don't have the
3155 * same requirements. This makes it hard to
3156 * simply assert that the lastbuf is shared so
3157 * we rely on the hdr's compression flags to determine
3158 * if we have a compressed, shared buffer.
3160 ASSERT3P(lastbuf
, !=, NULL
);
3161 ASSERT(arc_buf_is_shared(lastbuf
) ||
3162 arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
);
3166 * Free the checksum if we're removing the last uncompressed buf from
3169 if (!arc_hdr_has_uncompressed_buf(hdr
)) {
3170 arc_cksum_free(hdr
);
3173 /* clean up the buf */
3175 kmem_cache_free(buf_cache
, buf
);
3179 arc_hdr_alloc_abd(arc_buf_hdr_t
*hdr
, int alloc_flags
)
3182 boolean_t alloc_rdata
= ((alloc_flags
& ARC_HDR_ALLOC_RDATA
) != 0);
3184 ASSERT3U(HDR_GET_LSIZE(hdr
), >, 0);
3185 ASSERT(HDR_HAS_L1HDR(hdr
));
3186 ASSERT(!HDR_SHARED_DATA(hdr
) || alloc_rdata
);
3187 IMPLY(alloc_rdata
, HDR_PROTECTED(hdr
));
3190 size
= HDR_GET_PSIZE(hdr
);
3191 ASSERT3P(hdr
->b_crypt_hdr
.b_rabd
, ==, NULL
);
3192 hdr
->b_crypt_hdr
.b_rabd
= arc_get_data_abd(hdr
, size
, hdr
,
3194 ASSERT3P(hdr
->b_crypt_hdr
.b_rabd
, !=, NULL
);
3195 ARCSTAT_INCR(arcstat_raw_size
, size
);
3197 size
= arc_hdr_size(hdr
);
3198 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
3199 hdr
->b_l1hdr
.b_pabd
= arc_get_data_abd(hdr
, size
, hdr
,
3201 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
3204 ARCSTAT_INCR(arcstat_compressed_size
, size
);
3205 ARCSTAT_INCR(arcstat_uncompressed_size
, HDR_GET_LSIZE(hdr
));
3209 arc_hdr_free_abd(arc_buf_hdr_t
*hdr
, boolean_t free_rdata
)
3211 uint64_t size
= (free_rdata
) ? HDR_GET_PSIZE(hdr
) : arc_hdr_size(hdr
);
3213 ASSERT(HDR_HAS_L1HDR(hdr
));
3214 ASSERT(hdr
->b_l1hdr
.b_pabd
!= NULL
|| HDR_HAS_RABD(hdr
));
3215 IMPLY(free_rdata
, HDR_HAS_RABD(hdr
));
3218 * If the hdr is currently being written to the l2arc then
3219 * we defer freeing the data by adding it to the l2arc_free_on_write
3220 * list. The l2arc will free the data once it's finished
3221 * writing it to the l2arc device.
3223 if (HDR_L2_WRITING(hdr
)) {
3224 arc_hdr_free_on_write(hdr
, free_rdata
);
3225 ARCSTAT_BUMP(arcstat_l2_free_on_write
);
3226 } else if (free_rdata
) {
3227 arc_free_data_abd(hdr
, hdr
->b_crypt_hdr
.b_rabd
, size
, hdr
);
3229 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
, size
, hdr
);
3233 hdr
->b_crypt_hdr
.b_rabd
= NULL
;
3234 ARCSTAT_INCR(arcstat_raw_size
, -size
);
3236 hdr
->b_l1hdr
.b_pabd
= NULL
;
3239 if (hdr
->b_l1hdr
.b_pabd
== NULL
&& !HDR_HAS_RABD(hdr
))
3240 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
3242 ARCSTAT_INCR(arcstat_compressed_size
, -size
);
3243 ARCSTAT_INCR(arcstat_uncompressed_size
, -HDR_GET_LSIZE(hdr
));
3247 * Allocate empty anonymous ARC header. The header will get its identity
3248 * assigned and buffers attached later as part of read or write operations.
3250 * In case of read arc_read() assigns header its identify (b_dva + b_birth),
3251 * inserts it into ARC hash to become globally visible and allocates physical
3252 * (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
3253 * completion arc_read_done() allocates ARC buffer(s) as needed, potentially
3254 * sharing one of them with the physical ABD buffer.
3256 * In case of write arc_alloc_buf() allocates ARC buffer to be filled with
3257 * data. Then after compression and/or encryption arc_write_ready() allocates
3258 * and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
3259 * buffer. On disk write completion arc_write_done() assigns the header its
3260 * new identity (b_dva + b_birth) and inserts into ARC hash.
3262 * In case of partial overwrite the old data is read first as described. Then
3263 * arc_release() either allocates new anonymous ARC header and moves the ARC
3264 * buffer to it, or reuses the old ARC header by discarding its identity and
3265 * removing it from ARC hash. After buffer modification normal write process
3266 * follows as described.
3268 static arc_buf_hdr_t
*
3269 arc_hdr_alloc(uint64_t spa
, int32_t psize
, int32_t lsize
,
3270 boolean_t
protected, enum zio_compress compression_type
, uint8_t complevel
,
3271 arc_buf_contents_t type
)
3275 VERIFY(type
== ARC_BUFC_DATA
|| type
== ARC_BUFC_METADATA
);
3277 hdr
= kmem_cache_alloc(hdr_full_crypt_cache
, KM_PUSHPAGE
);
3279 hdr
= kmem_cache_alloc(hdr_full_cache
, KM_PUSHPAGE
);
3282 ASSERT(HDR_EMPTY(hdr
));
3284 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
3286 HDR_SET_PSIZE(hdr
, psize
);
3287 HDR_SET_LSIZE(hdr
, lsize
);
3291 arc_hdr_set_flags(hdr
, arc_bufc_to_flags(type
) | ARC_FLAG_HAS_L1HDR
);
3292 arc_hdr_set_compress(hdr
, compression_type
);
3293 hdr
->b_complevel
= complevel
;
3295 arc_hdr_set_flags(hdr
, ARC_FLAG_PROTECTED
);
3297 hdr
->b_l1hdr
.b_state
= arc_anon
;
3298 hdr
->b_l1hdr
.b_arc_access
= 0;
3299 hdr
->b_l1hdr
.b_mru_hits
= 0;
3300 hdr
->b_l1hdr
.b_mru_ghost_hits
= 0;
3301 hdr
->b_l1hdr
.b_mfu_hits
= 0;
3302 hdr
->b_l1hdr
.b_mfu_ghost_hits
= 0;
3303 hdr
->b_l1hdr
.b_buf
= NULL
;
3305 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3311 * Transition between the two allocation states for the arc_buf_hdr struct.
3312 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
3313 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
3314 * version is used when a cache buffer is only in the L2ARC in order to reduce
3317 static arc_buf_hdr_t
*
3318 arc_hdr_realloc(arc_buf_hdr_t
*hdr
, kmem_cache_t
*old
, kmem_cache_t
*new)
3320 ASSERT(HDR_HAS_L2HDR(hdr
));
3322 arc_buf_hdr_t
*nhdr
;
3323 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
3325 ASSERT((old
== hdr_full_cache
&& new == hdr_l2only_cache
) ||
3326 (old
== hdr_l2only_cache
&& new == hdr_full_cache
));
3329 * if the caller wanted a new full header and the header is to be
3330 * encrypted we will actually allocate the header from the full crypt
3331 * cache instead. The same applies to freeing from the old cache.
3333 if (HDR_PROTECTED(hdr
) && new == hdr_full_cache
)
3334 new = hdr_full_crypt_cache
;
3335 if (HDR_PROTECTED(hdr
) && old
== hdr_full_cache
)
3336 old
= hdr_full_crypt_cache
;
3338 nhdr
= kmem_cache_alloc(new, KM_PUSHPAGE
);
3340 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
3341 buf_hash_remove(hdr
);
3343 memcpy(nhdr
, hdr
, HDR_L2ONLY_SIZE
);
3345 if (new == hdr_full_cache
|| new == hdr_full_crypt_cache
) {
3346 arc_hdr_set_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
3348 * arc_access and arc_change_state need to be aware that a
3349 * header has just come out of L2ARC, so we set its state to
3350 * l2c_only even though it's about to change.
3352 nhdr
->b_l1hdr
.b_state
= arc_l2c_only
;
3354 /* Verify previous threads set to NULL before freeing */
3355 ASSERT3P(nhdr
->b_l1hdr
.b_pabd
, ==, NULL
);
3356 ASSERT(!HDR_HAS_RABD(hdr
));
3358 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
3360 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
3364 * If we've reached here, We must have been called from
3365 * arc_evict_hdr(), as such we should have already been
3366 * removed from any ghost list we were previously on
3367 * (which protects us from racing with arc_evict_state),
3368 * thus no locking is needed during this check.
3370 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3373 * A buffer must not be moved into the arc_l2c_only
3374 * state if it's not finished being written out to the
3375 * l2arc device. Otherwise, the b_l1hdr.b_pabd field
3376 * might try to be accessed, even though it was removed.
3378 VERIFY(!HDR_L2_WRITING(hdr
));
3379 VERIFY3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
3380 ASSERT(!HDR_HAS_RABD(hdr
));
3382 arc_hdr_clear_flags(nhdr
, ARC_FLAG_HAS_L1HDR
);
3385 * The header has been reallocated so we need to re-insert it into any
3388 (void) buf_hash_insert(nhdr
, NULL
);
3390 ASSERT(list_link_active(&hdr
->b_l2hdr
.b_l2node
));
3392 mutex_enter(&dev
->l2ad_mtx
);
3395 * We must place the realloc'ed header back into the list at
3396 * the same spot. Otherwise, if it's placed earlier in the list,
3397 * l2arc_write_buffers() could find it during the function's
3398 * write phase, and try to write it out to the l2arc.
3400 list_insert_after(&dev
->l2ad_buflist
, hdr
, nhdr
);
3401 list_remove(&dev
->l2ad_buflist
, hdr
);
3403 mutex_exit(&dev
->l2ad_mtx
);
3406 * Since we're using the pointer address as the tag when
3407 * incrementing and decrementing the l2ad_alloc refcount, we
3408 * must remove the old pointer (that we're about to destroy) and
3409 * add the new pointer to the refcount. Otherwise we'd remove
3410 * the wrong pointer address when calling arc_hdr_destroy() later.
3413 (void) zfs_refcount_remove_many(&dev
->l2ad_alloc
,
3414 arc_hdr_size(hdr
), hdr
);
3415 (void) zfs_refcount_add_many(&dev
->l2ad_alloc
,
3416 arc_hdr_size(nhdr
), nhdr
);
3418 buf_discard_identity(hdr
);
3419 kmem_cache_free(old
, hdr
);
3425 * This function allows an L1 header to be reallocated as a crypt
3426 * header and vice versa. If we are going to a crypt header, the
3427 * new fields will be zeroed out.
3429 static arc_buf_hdr_t
*
3430 arc_hdr_realloc_crypt(arc_buf_hdr_t
*hdr
, boolean_t need_crypt
)
3432 arc_buf_hdr_t
*nhdr
;
3434 kmem_cache_t
*ncache
, *ocache
;
3437 * This function requires that hdr is in the arc_anon state.
3438 * Therefore it won't have any L2ARC data for us to worry
3441 ASSERT(HDR_HAS_L1HDR(hdr
));
3442 ASSERT(!HDR_HAS_L2HDR(hdr
));
3443 ASSERT3U(!!HDR_PROTECTED(hdr
), !=, need_crypt
);
3444 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3445 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3446 ASSERT(!list_link_active(&hdr
->b_l2hdr
.b_l2node
));
3447 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
3450 ncache
= hdr_full_crypt_cache
;
3451 ocache
= hdr_full_cache
;
3453 ncache
= hdr_full_cache
;
3454 ocache
= hdr_full_crypt_cache
;
3457 nhdr
= kmem_cache_alloc(ncache
, KM_PUSHPAGE
);
3460 * Copy all members that aren't locks or condvars to the new header.
3461 * No lists are pointing to us (as we asserted above), so we don't
3462 * need to worry about the list nodes.
3464 nhdr
->b_dva
= hdr
->b_dva
;
3465 nhdr
->b_birth
= hdr
->b_birth
;
3466 nhdr
->b_type
= hdr
->b_type
;
3467 nhdr
->b_flags
= hdr
->b_flags
;
3468 nhdr
->b_psize
= hdr
->b_psize
;
3469 nhdr
->b_lsize
= hdr
->b_lsize
;
3470 nhdr
->b_spa
= hdr
->b_spa
;
3472 nhdr
->b_l1hdr
.b_freeze_cksum
= hdr
->b_l1hdr
.b_freeze_cksum
;
3474 nhdr
->b_l1hdr
.b_byteswap
= hdr
->b_l1hdr
.b_byteswap
;
3475 nhdr
->b_l1hdr
.b_state
= hdr
->b_l1hdr
.b_state
;
3476 nhdr
->b_l1hdr
.b_arc_access
= hdr
->b_l1hdr
.b_arc_access
;
3477 nhdr
->b_l1hdr
.b_mru_hits
= hdr
->b_l1hdr
.b_mru_hits
;
3478 nhdr
->b_l1hdr
.b_mru_ghost_hits
= hdr
->b_l1hdr
.b_mru_ghost_hits
;
3479 nhdr
->b_l1hdr
.b_mfu_hits
= hdr
->b_l1hdr
.b_mfu_hits
;
3480 nhdr
->b_l1hdr
.b_mfu_ghost_hits
= hdr
->b_l1hdr
.b_mfu_ghost_hits
;
3481 nhdr
->b_l1hdr
.b_acb
= hdr
->b_l1hdr
.b_acb
;
3482 nhdr
->b_l1hdr
.b_pabd
= hdr
->b_l1hdr
.b_pabd
;
3485 * This zfs_refcount_add() exists only to ensure that the individual
3486 * arc buffers always point to a header that is referenced, avoiding
3487 * a small race condition that could trigger ASSERTs.
3489 (void) zfs_refcount_add(&nhdr
->b_l1hdr
.b_refcnt
, FTAG
);
3490 nhdr
->b_l1hdr
.b_buf
= hdr
->b_l1hdr
.b_buf
;
3491 for (buf
= nhdr
->b_l1hdr
.b_buf
; buf
!= NULL
; buf
= buf
->b_next
)
3494 zfs_refcount_transfer(&nhdr
->b_l1hdr
.b_refcnt
, &hdr
->b_l1hdr
.b_refcnt
);
3495 (void) zfs_refcount_remove(&nhdr
->b_l1hdr
.b_refcnt
, FTAG
);
3496 ASSERT0(zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
3499 arc_hdr_set_flags(nhdr
, ARC_FLAG_PROTECTED
);
3501 arc_hdr_clear_flags(nhdr
, ARC_FLAG_PROTECTED
);
3504 /* unset all members of the original hdr */
3505 memset(&hdr
->b_dva
, 0, sizeof (dva_t
));
3513 hdr
->b_l1hdr
.b_freeze_cksum
= NULL
;
3515 hdr
->b_l1hdr
.b_buf
= NULL
;
3516 hdr
->b_l1hdr
.b_byteswap
= 0;
3517 hdr
->b_l1hdr
.b_state
= NULL
;
3518 hdr
->b_l1hdr
.b_arc_access
= 0;
3519 hdr
->b_l1hdr
.b_mru_hits
= 0;
3520 hdr
->b_l1hdr
.b_mru_ghost_hits
= 0;
3521 hdr
->b_l1hdr
.b_mfu_hits
= 0;
3522 hdr
->b_l1hdr
.b_mfu_ghost_hits
= 0;
3523 hdr
->b_l1hdr
.b_acb
= NULL
;
3524 hdr
->b_l1hdr
.b_pabd
= NULL
;
3526 if (ocache
== hdr_full_crypt_cache
) {
3527 ASSERT(!HDR_HAS_RABD(hdr
));
3528 hdr
->b_crypt_hdr
.b_ot
= DMU_OT_NONE
;
3529 hdr
->b_crypt_hdr
.b_dsobj
= 0;
3530 memset(hdr
->b_crypt_hdr
.b_salt
, 0, ZIO_DATA_SALT_LEN
);
3531 memset(hdr
->b_crypt_hdr
.b_iv
, 0, ZIO_DATA_IV_LEN
);
3532 memset(hdr
->b_crypt_hdr
.b_mac
, 0, ZIO_DATA_MAC_LEN
);
3535 buf_discard_identity(hdr
);
3536 kmem_cache_free(ocache
, hdr
);
3542 * This function is used by the send / receive code to convert a newly
3543 * allocated arc_buf_t to one that is suitable for a raw encrypted write. It
3544 * is also used to allow the root objset block to be updated without altering
3545 * its embedded MACs. Both block types will always be uncompressed so we do not
3546 * have to worry about compression type or psize.
3549 arc_convert_to_raw(arc_buf_t
*buf
, uint64_t dsobj
, boolean_t byteorder
,
3550 dmu_object_type_t ot
, const uint8_t *salt
, const uint8_t *iv
,
3553 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3555 ASSERT(ot
== DMU_OT_DNODE
|| ot
== DMU_OT_OBJSET
);
3556 ASSERT(HDR_HAS_L1HDR(hdr
));
3557 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3559 buf
->b_flags
|= (ARC_BUF_FLAG_COMPRESSED
| ARC_BUF_FLAG_ENCRYPTED
);
3560 if (!HDR_PROTECTED(hdr
))
3561 hdr
= arc_hdr_realloc_crypt(hdr
, B_TRUE
);
3562 hdr
->b_crypt_hdr
.b_dsobj
= dsobj
;
3563 hdr
->b_crypt_hdr
.b_ot
= ot
;
3564 hdr
->b_l1hdr
.b_byteswap
= (byteorder
== ZFS_HOST_BYTEORDER
) ?
3565 DMU_BSWAP_NUMFUNCS
: DMU_OT_BYTESWAP(ot
);
3566 if (!arc_hdr_has_uncompressed_buf(hdr
))
3567 arc_cksum_free(hdr
);
3570 memcpy(hdr
->b_crypt_hdr
.b_salt
, salt
, ZIO_DATA_SALT_LEN
);
3572 memcpy(hdr
->b_crypt_hdr
.b_iv
, iv
, ZIO_DATA_IV_LEN
);
3574 memcpy(hdr
->b_crypt_hdr
.b_mac
, mac
, ZIO_DATA_MAC_LEN
);
3578 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
3579 * The buf is returned thawed since we expect the consumer to modify it.
3582 arc_alloc_buf(spa_t
*spa
, const void *tag
, arc_buf_contents_t type
,
3585 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), size
, size
,
3586 B_FALSE
, ZIO_COMPRESS_OFF
, 0, type
);
3588 arc_buf_t
*buf
= NULL
;
3589 VERIFY0(arc_buf_alloc_impl(hdr
, spa
, NULL
, tag
, B_FALSE
, B_FALSE
,
3590 B_FALSE
, B_FALSE
, &buf
));
3597 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
3598 * for bufs containing metadata.
3601 arc_alloc_compressed_buf(spa_t
*spa
, const void *tag
, uint64_t psize
,
3602 uint64_t lsize
, enum zio_compress compression_type
, uint8_t complevel
)
3604 ASSERT3U(lsize
, >, 0);
3605 ASSERT3U(lsize
, >=, psize
);
3606 ASSERT3U(compression_type
, >, ZIO_COMPRESS_OFF
);
3607 ASSERT3U(compression_type
, <, ZIO_COMPRESS_FUNCTIONS
);
3609 arc_buf_hdr_t
*hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
3610 B_FALSE
, compression_type
, complevel
, ARC_BUFC_DATA
);
3612 arc_buf_t
*buf
= NULL
;
3613 VERIFY0(arc_buf_alloc_impl(hdr
, spa
, NULL
, tag
, B_FALSE
,
3614 B_TRUE
, B_FALSE
, B_FALSE
, &buf
));
3618 * To ensure that the hdr has the correct data in it if we call
3619 * arc_untransform() on this buf before it's been written to disk,
3620 * it's easiest if we just set up sharing between the buf and the hdr.
3622 arc_share_buf(hdr
, buf
);
3628 arc_alloc_raw_buf(spa_t
*spa
, const void *tag
, uint64_t dsobj
,
3629 boolean_t byteorder
, const uint8_t *salt
, const uint8_t *iv
,
3630 const uint8_t *mac
, dmu_object_type_t ot
, uint64_t psize
, uint64_t lsize
,
3631 enum zio_compress compression_type
, uint8_t complevel
)
3635 arc_buf_contents_t type
= DMU_OT_IS_METADATA(ot
) ?
3636 ARC_BUFC_METADATA
: ARC_BUFC_DATA
;
3638 ASSERT3U(lsize
, >, 0);
3639 ASSERT3U(lsize
, >=, psize
);
3640 ASSERT3U(compression_type
, >=, ZIO_COMPRESS_OFF
);
3641 ASSERT3U(compression_type
, <, ZIO_COMPRESS_FUNCTIONS
);
3643 hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
, B_TRUE
,
3644 compression_type
, complevel
, type
);
3646 hdr
->b_crypt_hdr
.b_dsobj
= dsobj
;
3647 hdr
->b_crypt_hdr
.b_ot
= ot
;
3648 hdr
->b_l1hdr
.b_byteswap
= (byteorder
== ZFS_HOST_BYTEORDER
) ?
3649 DMU_BSWAP_NUMFUNCS
: DMU_OT_BYTESWAP(ot
);
3650 memcpy(hdr
->b_crypt_hdr
.b_salt
, salt
, ZIO_DATA_SALT_LEN
);
3651 memcpy(hdr
->b_crypt_hdr
.b_iv
, iv
, ZIO_DATA_IV_LEN
);
3652 memcpy(hdr
->b_crypt_hdr
.b_mac
, mac
, ZIO_DATA_MAC_LEN
);
3655 * This buffer will be considered encrypted even if the ot is not an
3656 * encrypted type. It will become authenticated instead in
3657 * arc_write_ready().
3660 VERIFY0(arc_buf_alloc_impl(hdr
, spa
, NULL
, tag
, B_TRUE
, B_TRUE
,
3661 B_FALSE
, B_FALSE
, &buf
));
3668 l2arc_hdr_arcstats_update(arc_buf_hdr_t
*hdr
, boolean_t incr
,
3669 boolean_t state_only
)
3671 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
3672 l2arc_dev_t
*dev
= l2hdr
->b_dev
;
3673 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
3674 uint64_t psize
= HDR_GET_PSIZE(hdr
);
3675 uint64_t asize
= vdev_psize_to_asize(dev
->l2ad_vdev
, psize
);
3676 arc_buf_contents_t type
= hdr
->b_type
;
3691 /* If the buffer is a prefetch, count it as such. */
3692 if (HDR_PREFETCH(hdr
)) {
3693 ARCSTAT_INCR(arcstat_l2_prefetch_asize
, asize_s
);
3696 * We use the value stored in the L2 header upon initial
3697 * caching in L2ARC. This value will be updated in case
3698 * an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
3699 * metadata (log entry) cannot currently be updated. Having
3700 * the ARC state in the L2 header solves the problem of a
3701 * possibly absent L1 header (apparent in buffers restored
3702 * from persistent L2ARC).
3704 switch (hdr
->b_l2hdr
.b_arcs_state
) {
3705 case ARC_STATE_MRU_GHOST
:
3707 ARCSTAT_INCR(arcstat_l2_mru_asize
, asize_s
);
3709 case ARC_STATE_MFU_GHOST
:
3711 ARCSTAT_INCR(arcstat_l2_mfu_asize
, asize_s
);
3721 ARCSTAT_INCR(arcstat_l2_psize
, psize_s
);
3722 ARCSTAT_INCR(arcstat_l2_lsize
, lsize_s
);
3726 ARCSTAT_INCR(arcstat_l2_bufc_data_asize
, asize_s
);
3728 case ARC_BUFC_METADATA
:
3729 ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize
, asize_s
);
3738 arc_hdr_l2hdr_destroy(arc_buf_hdr_t
*hdr
)
3740 l2arc_buf_hdr_t
*l2hdr
= &hdr
->b_l2hdr
;
3741 l2arc_dev_t
*dev
= l2hdr
->b_dev
;
3742 uint64_t psize
= HDR_GET_PSIZE(hdr
);
3743 uint64_t asize
= vdev_psize_to_asize(dev
->l2ad_vdev
, psize
);
3745 ASSERT(MUTEX_HELD(&dev
->l2ad_mtx
));
3746 ASSERT(HDR_HAS_L2HDR(hdr
));
3748 list_remove(&dev
->l2ad_buflist
, hdr
);
3750 l2arc_hdr_arcstats_decrement(hdr
);
3751 vdev_space_update(dev
->l2ad_vdev
, -asize
, 0, 0);
3753 (void) zfs_refcount_remove_many(&dev
->l2ad_alloc
, arc_hdr_size(hdr
),
3755 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
3759 arc_hdr_destroy(arc_buf_hdr_t
*hdr
)
3761 if (HDR_HAS_L1HDR(hdr
)) {
3762 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
3763 ASSERT3P(hdr
->b_l1hdr
.b_state
, ==, arc_anon
);
3765 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3766 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
3768 if (HDR_HAS_L2HDR(hdr
)) {
3769 l2arc_dev_t
*dev
= hdr
->b_l2hdr
.b_dev
;
3770 boolean_t buflist_held
= MUTEX_HELD(&dev
->l2ad_mtx
);
3773 mutex_enter(&dev
->l2ad_mtx
);
3776 * Even though we checked this conditional above, we
3777 * need to check this again now that we have the
3778 * l2ad_mtx. This is because we could be racing with
3779 * another thread calling l2arc_evict() which might have
3780 * destroyed this header's L2 portion as we were waiting
3781 * to acquire the l2ad_mtx. If that happens, we don't
3782 * want to re-destroy the header's L2 portion.
3784 if (HDR_HAS_L2HDR(hdr
)) {
3786 if (!HDR_EMPTY(hdr
))
3787 buf_discard_identity(hdr
);
3789 arc_hdr_l2hdr_destroy(hdr
);
3793 mutex_exit(&dev
->l2ad_mtx
);
3797 * The header's identify can only be safely discarded once it is no
3798 * longer discoverable. This requires removing it from the hash table
3799 * and the l2arc header list. After this point the hash lock can not
3800 * be used to protect the header.
3802 if (!HDR_EMPTY(hdr
))
3803 buf_discard_identity(hdr
);
3805 if (HDR_HAS_L1HDR(hdr
)) {
3806 arc_cksum_free(hdr
);
3808 while (hdr
->b_l1hdr
.b_buf
!= NULL
)
3809 arc_buf_destroy_impl(hdr
->b_l1hdr
.b_buf
);
3811 if (hdr
->b_l1hdr
.b_pabd
!= NULL
)
3812 arc_hdr_free_abd(hdr
, B_FALSE
);
3814 if (HDR_HAS_RABD(hdr
))
3815 arc_hdr_free_abd(hdr
, B_TRUE
);
3818 ASSERT3P(hdr
->b_hash_next
, ==, NULL
);
3819 if (HDR_HAS_L1HDR(hdr
)) {
3820 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
3821 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
3823 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
3826 if (!HDR_PROTECTED(hdr
)) {
3827 kmem_cache_free(hdr_full_cache
, hdr
);
3829 kmem_cache_free(hdr_full_crypt_cache
, hdr
);
3832 kmem_cache_free(hdr_l2only_cache
, hdr
);
3837 arc_buf_destroy(arc_buf_t
*buf
, const void *tag
)
3839 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
3841 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
3842 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, buf
);
3843 ASSERT(ARC_BUF_LAST(buf
));
3844 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3845 VERIFY0(remove_reference(hdr
, tag
));
3849 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
3850 mutex_enter(hash_lock
);
3852 ASSERT3P(hdr
, ==, buf
->b_hdr
);
3853 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, NULL
);
3854 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
3855 ASSERT3P(hdr
->b_l1hdr
.b_state
, !=, arc_anon
);
3856 ASSERT3P(buf
->b_data
, !=, NULL
);
3858 arc_buf_destroy_impl(buf
);
3859 (void) remove_reference(hdr
, tag
);
3860 mutex_exit(hash_lock
);
3864 * Evict the arc_buf_hdr that is provided as a parameter. The resultant
3865 * state of the header is dependent on its state prior to entering this
3866 * function. The following transitions are possible:
3868 * - arc_mru -> arc_mru_ghost
3869 * - arc_mfu -> arc_mfu_ghost
3870 * - arc_mru_ghost -> arc_l2c_only
3871 * - arc_mru_ghost -> deleted
3872 * - arc_mfu_ghost -> arc_l2c_only
3873 * - arc_mfu_ghost -> deleted
3874 * - arc_uncached -> deleted
3876 * Return total size of evicted data buffers for eviction progress tracking.
3877 * When evicting from ghost states return logical buffer size to make eviction
3878 * progress at the same (or at least comparable) rate as from non-ghost states.
3880 * Return *real_evicted for actual ARC size reduction to wake up threads
3881 * waiting for it. For non-ghost states it includes size of evicted data
3882 * buffers (the headers are not freed there). For ghost states it includes
3883 * only the evicted headers size.
3886 arc_evict_hdr(arc_buf_hdr_t
*hdr
, uint64_t *real_evicted
)
3888 arc_state_t
*evicted_state
, *state
;
3889 int64_t bytes_evicted
= 0;
3890 uint_t min_lifetime
= HDR_PRESCIENT_PREFETCH(hdr
) ?
3891 arc_min_prescient_prefetch_ms
: arc_min_prefetch_ms
;
3893 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
3894 ASSERT(HDR_HAS_L1HDR(hdr
));
3895 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
3896 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
3897 ASSERT0(zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
));
3900 state
= hdr
->b_l1hdr
.b_state
;
3901 if (GHOST_STATE(state
)) {
3904 * l2arc_write_buffers() relies on a header's L1 portion
3905 * (i.e. its b_pabd field) during it's write phase.
3906 * Thus, we cannot push a header onto the arc_l2c_only
3907 * state (removing its L1 piece) until the header is
3908 * done being written to the l2arc.
3910 if (HDR_HAS_L2HDR(hdr
) && HDR_L2_WRITING(hdr
)) {
3911 ARCSTAT_BUMP(arcstat_evict_l2_skip
);
3912 return (bytes_evicted
);
3915 ARCSTAT_BUMP(arcstat_deleted
);
3916 bytes_evicted
+= HDR_GET_LSIZE(hdr
);
3918 DTRACE_PROBE1(arc__delete
, arc_buf_hdr_t
*, hdr
);
3920 if (HDR_HAS_L2HDR(hdr
)) {
3921 ASSERT(hdr
->b_l1hdr
.b_pabd
== NULL
);
3922 ASSERT(!HDR_HAS_RABD(hdr
));
3924 * This buffer is cached on the 2nd Level ARC;
3925 * don't destroy the header.
3927 arc_change_state(arc_l2c_only
, hdr
);
3929 * dropping from L1+L2 cached to L2-only,
3930 * realloc to remove the L1 header.
3932 (void) arc_hdr_realloc(hdr
, hdr_full_cache
,
3934 *real_evicted
+= HDR_FULL_SIZE
- HDR_L2ONLY_SIZE
;
3936 arc_change_state(arc_anon
, hdr
);
3937 arc_hdr_destroy(hdr
);
3938 *real_evicted
+= HDR_FULL_SIZE
;
3940 return (bytes_evicted
);
3943 ASSERT(state
== arc_mru
|| state
== arc_mfu
|| state
== arc_uncached
);
3944 evicted_state
= (state
== arc_uncached
) ? arc_anon
:
3945 ((state
== arc_mru
) ? arc_mru_ghost
: arc_mfu_ghost
);
3947 /* prefetch buffers have a minimum lifespan */
3948 if ((hdr
->b_flags
& (ARC_FLAG_PREFETCH
| ARC_FLAG_INDIRECT
)) &&
3949 ddi_get_lbolt() - hdr
->b_l1hdr
.b_arc_access
<
3950 MSEC_TO_TICK(min_lifetime
)) {
3951 ARCSTAT_BUMP(arcstat_evict_skip
);
3952 return (bytes_evicted
);
3955 if (HDR_HAS_L2HDR(hdr
)) {
3956 ARCSTAT_INCR(arcstat_evict_l2_cached
, HDR_GET_LSIZE(hdr
));
3958 if (l2arc_write_eligible(hdr
->b_spa
, hdr
)) {
3959 ARCSTAT_INCR(arcstat_evict_l2_eligible
,
3960 HDR_GET_LSIZE(hdr
));
3962 switch (state
->arcs_state
) {
3965 arcstat_evict_l2_eligible_mru
,
3966 HDR_GET_LSIZE(hdr
));
3970 arcstat_evict_l2_eligible_mfu
,
3971 HDR_GET_LSIZE(hdr
));
3977 ARCSTAT_INCR(arcstat_evict_l2_ineligible
,
3978 HDR_GET_LSIZE(hdr
));
3982 bytes_evicted
+= arc_hdr_size(hdr
);
3983 *real_evicted
+= arc_hdr_size(hdr
);
3986 * If this hdr is being evicted and has a compressed buffer then we
3987 * discard it here before we change states. This ensures that the
3988 * accounting is updated correctly in arc_free_data_impl().
3990 if (hdr
->b_l1hdr
.b_pabd
!= NULL
)
3991 arc_hdr_free_abd(hdr
, B_FALSE
);
3993 if (HDR_HAS_RABD(hdr
))
3994 arc_hdr_free_abd(hdr
, B_TRUE
);
3996 arc_change_state(evicted_state
, hdr
);
3997 DTRACE_PROBE1(arc__evict
, arc_buf_hdr_t
*, hdr
);
3998 if (evicted_state
== arc_anon
) {
3999 arc_hdr_destroy(hdr
);
4000 *real_evicted
+= HDR_FULL_SIZE
;
4002 ASSERT(HDR_IN_HASH_TABLE(hdr
));
4005 return (bytes_evicted
);
4009 arc_set_need_free(void)
4011 ASSERT(MUTEX_HELD(&arc_evict_lock
));
4012 int64_t remaining
= arc_free_memory() - arc_sys_free
/ 2;
4013 arc_evict_waiter_t
*aw
= list_tail(&arc_evict_waiters
);
4015 arc_need_free
= MAX(-remaining
, 0);
4018 MAX(-remaining
, (int64_t)(aw
->aew_count
- arc_evict_count
));
4023 arc_evict_state_impl(multilist_t
*ml
, int idx
, arc_buf_hdr_t
*marker
,
4024 uint64_t spa
, uint64_t bytes
)
4026 multilist_sublist_t
*mls
;
4027 uint64_t bytes_evicted
= 0, real_evicted
= 0;
4029 kmutex_t
*hash_lock
;
4030 uint_t evict_count
= zfs_arc_evict_batch_limit
;
4032 ASSERT3P(marker
, !=, NULL
);
4034 mls
= multilist_sublist_lock(ml
, idx
);
4036 for (hdr
= multilist_sublist_prev(mls
, marker
); likely(hdr
!= NULL
);
4037 hdr
= multilist_sublist_prev(mls
, marker
)) {
4038 if ((evict_count
== 0) || (bytes_evicted
>= bytes
))
4042 * To keep our iteration location, move the marker
4043 * forward. Since we're not holding hdr's hash lock, we
4044 * must be very careful and not remove 'hdr' from the
4045 * sublist. Otherwise, other consumers might mistake the
4046 * 'hdr' as not being on a sublist when they call the
4047 * multilist_link_active() function (they all rely on
4048 * the hash lock protecting concurrent insertions and
4049 * removals). multilist_sublist_move_forward() was
4050 * specifically implemented to ensure this is the case
4051 * (only 'marker' will be removed and re-inserted).
4053 multilist_sublist_move_forward(mls
, marker
);
4056 * The only case where the b_spa field should ever be
4057 * zero, is the marker headers inserted by
4058 * arc_evict_state(). It's possible for multiple threads
4059 * to be calling arc_evict_state() concurrently (e.g.
4060 * dsl_pool_close() and zio_inject_fault()), so we must
4061 * skip any markers we see from these other threads.
4063 if (hdr
->b_spa
== 0)
4066 /* we're only interested in evicting buffers of a certain spa */
4067 if (spa
!= 0 && hdr
->b_spa
!= spa
) {
4068 ARCSTAT_BUMP(arcstat_evict_skip
);
4072 hash_lock
= HDR_LOCK(hdr
);
4075 * We aren't calling this function from any code path
4076 * that would already be holding a hash lock, so we're
4077 * asserting on this assumption to be defensive in case
4078 * this ever changes. Without this check, it would be
4079 * possible to incorrectly increment arcstat_mutex_miss
4080 * below (e.g. if the code changed such that we called
4081 * this function with a hash lock held).
4083 ASSERT(!MUTEX_HELD(hash_lock
));
4085 if (mutex_tryenter(hash_lock
)) {
4087 uint64_t evicted
= arc_evict_hdr(hdr
, &revicted
);
4088 mutex_exit(hash_lock
);
4090 bytes_evicted
+= evicted
;
4091 real_evicted
+= revicted
;
4094 * If evicted is zero, arc_evict_hdr() must have
4095 * decided to skip this header, don't increment
4096 * evict_count in this case.
4102 ARCSTAT_BUMP(arcstat_mutex_miss
);
4106 multilist_sublist_unlock(mls
);
4109 * Increment the count of evicted bytes, and wake up any threads that
4110 * are waiting for the count to reach this value. Since the list is
4111 * ordered by ascending aew_count, we pop off the beginning of the
4112 * list until we reach the end, or a waiter that's past the current
4113 * "count". Doing this outside the loop reduces the number of times
4114 * we need to acquire the global arc_evict_lock.
4116 * Only wake when there's sufficient free memory in the system
4117 * (specifically, arc_sys_free/2, which by default is a bit more than
4118 * 1/64th of RAM). See the comments in arc_wait_for_eviction().
4120 mutex_enter(&arc_evict_lock
);
4121 arc_evict_count
+= real_evicted
;
4123 if (arc_free_memory() > arc_sys_free
/ 2) {
4124 arc_evict_waiter_t
*aw
;
4125 while ((aw
= list_head(&arc_evict_waiters
)) != NULL
&&
4126 aw
->aew_count
<= arc_evict_count
) {
4127 list_remove(&arc_evict_waiters
, aw
);
4128 cv_broadcast(&aw
->aew_cv
);
4131 arc_set_need_free();
4132 mutex_exit(&arc_evict_lock
);
4135 * If the ARC size is reduced from arc_c_max to arc_c_min (especially
4136 * if the average cached block is small), eviction can be on-CPU for
4137 * many seconds. To ensure that other threads that may be bound to
4138 * this CPU are able to make progress, make a voluntary preemption
4141 kpreempt(KPREEMPT_SYNC
);
4143 return (bytes_evicted
);
4147 * Allocate an array of buffer headers used as placeholders during arc state
4150 static arc_buf_hdr_t
**
4151 arc_state_alloc_markers(int count
)
4153 arc_buf_hdr_t
**markers
;
4155 markers
= kmem_zalloc(sizeof (*markers
) * count
, KM_SLEEP
);
4156 for (int i
= 0; i
< count
; i
++) {
4157 markers
[i
] = kmem_cache_alloc(hdr_full_cache
, KM_SLEEP
);
4160 * A b_spa of 0 is used to indicate that this header is
4161 * a marker. This fact is used in arc_evict_state_impl().
4163 markers
[i
]->b_spa
= 0;
4170 arc_state_free_markers(arc_buf_hdr_t
**markers
, int count
)
4172 for (int i
= 0; i
< count
; i
++)
4173 kmem_cache_free(hdr_full_cache
, markers
[i
]);
4174 kmem_free(markers
, sizeof (*markers
) * count
);
4178 * Evict buffers from the given arc state, until we've removed the
4179 * specified number of bytes. Move the removed buffers to the
4180 * appropriate evict state.
4182 * This function makes a "best effort". It skips over any buffers
4183 * it can't get a hash_lock on, and so, may not catch all candidates.
4184 * It may also return without evicting as much space as requested.
4186 * If bytes is specified using the special value ARC_EVICT_ALL, this
4187 * will evict all available (i.e. unlocked and evictable) buffers from
4188 * the given arc state; which is used by arc_flush().
4191 arc_evict_state(arc_state_t
*state
, arc_buf_contents_t type
, uint64_t spa
,
4194 uint64_t total_evicted
= 0;
4195 multilist_t
*ml
= &state
->arcs_list
[type
];
4197 arc_buf_hdr_t
**markers
;
4199 num_sublists
= multilist_get_num_sublists(ml
);
4202 * If we've tried to evict from each sublist, made some
4203 * progress, but still have not hit the target number of bytes
4204 * to evict, we want to keep trying. The markers allow us to
4205 * pick up where we left off for each individual sublist, rather
4206 * than starting from the tail each time.
4208 if (zthr_iscurthread(arc_evict_zthr
)) {
4209 markers
= arc_state_evict_markers
;
4210 ASSERT3S(num_sublists
, <=, arc_state_evict_marker_count
);
4212 markers
= arc_state_alloc_markers(num_sublists
);
4214 for (int i
= 0; i
< num_sublists
; i
++) {
4215 multilist_sublist_t
*mls
;
4217 mls
= multilist_sublist_lock(ml
, i
);
4218 multilist_sublist_insert_tail(mls
, markers
[i
]);
4219 multilist_sublist_unlock(mls
);
4223 * While we haven't hit our target number of bytes to evict, or
4224 * we're evicting all available buffers.
4226 while (total_evicted
< bytes
) {
4227 int sublist_idx
= multilist_get_random_index(ml
);
4228 uint64_t scan_evicted
= 0;
4231 * Start eviction using a randomly selected sublist,
4232 * this is to try and evenly balance eviction across all
4233 * sublists. Always starting at the same sublist
4234 * (e.g. index 0) would cause evictions to favor certain
4235 * sublists over others.
4237 for (int i
= 0; i
< num_sublists
; i
++) {
4238 uint64_t bytes_remaining
;
4239 uint64_t bytes_evicted
;
4241 if (total_evicted
< bytes
)
4242 bytes_remaining
= bytes
- total_evicted
;
4246 bytes_evicted
= arc_evict_state_impl(ml
, sublist_idx
,
4247 markers
[sublist_idx
], spa
, bytes_remaining
);
4249 scan_evicted
+= bytes_evicted
;
4250 total_evicted
+= bytes_evicted
;
4252 /* we've reached the end, wrap to the beginning */
4253 if (++sublist_idx
>= num_sublists
)
4258 * If we didn't evict anything during this scan, we have
4259 * no reason to believe we'll evict more during another
4260 * scan, so break the loop.
4262 if (scan_evicted
== 0) {
4263 /* This isn't possible, let's make that obvious */
4264 ASSERT3S(bytes
, !=, 0);
4267 * When bytes is ARC_EVICT_ALL, the only way to
4268 * break the loop is when scan_evicted is zero.
4269 * In that case, we actually have evicted enough,
4270 * so we don't want to increment the kstat.
4272 if (bytes
!= ARC_EVICT_ALL
) {
4273 ASSERT3S(total_evicted
, <, bytes
);
4274 ARCSTAT_BUMP(arcstat_evict_not_enough
);
4281 for (int i
= 0; i
< num_sublists
; i
++) {
4282 multilist_sublist_t
*mls
= multilist_sublist_lock(ml
, i
);
4283 multilist_sublist_remove(mls
, markers
[i
]);
4284 multilist_sublist_unlock(mls
);
4286 if (markers
!= arc_state_evict_markers
)
4287 arc_state_free_markers(markers
, num_sublists
);
4289 return (total_evicted
);
4293 * Flush all "evictable" data of the given type from the arc state
4294 * specified. This will not evict any "active" buffers (i.e. referenced).
4296 * When 'retry' is set to B_FALSE, the function will make a single pass
4297 * over the state and evict any buffers that it can. Since it doesn't
4298 * continually retry the eviction, it might end up leaving some buffers
4299 * in the ARC due to lock misses.
4301 * When 'retry' is set to B_TRUE, the function will continually retry the
4302 * eviction until *all* evictable buffers have been removed from the
4303 * state. As a result, if concurrent insertions into the state are
4304 * allowed (e.g. if the ARC isn't shutting down), this function might
4305 * wind up in an infinite loop, continually trying to evict buffers.
4308 arc_flush_state(arc_state_t
*state
, uint64_t spa
, arc_buf_contents_t type
,
4311 uint64_t evicted
= 0;
4313 while (zfs_refcount_count(&state
->arcs_esize
[type
]) != 0) {
4314 evicted
+= arc_evict_state(state
, type
, spa
, ARC_EVICT_ALL
);
4324 * Evict the specified number of bytes from the state specified. This
4325 * function prevents us from trying to evict more from a state's list
4326 * than is "evictable", and to skip evicting altogether when passed a
4327 * negative value for "bytes". In contrast, arc_evict_state() will
4328 * evict everything it can, when passed a negative value for "bytes".
4331 arc_evict_impl(arc_state_t
*state
, arc_buf_contents_t type
, int64_t bytes
)
4335 if (bytes
> 0 && zfs_refcount_count(&state
->arcs_esize
[type
]) > 0) {
4336 delta
= MIN(zfs_refcount_count(&state
->arcs_esize
[type
]),
4338 return (arc_evict_state(state
, type
, 0, delta
));
4345 * Adjust specified fraction, taking into account initial ghost state(s) size,
4346 * ghost hit bytes towards increasing the fraction, ghost hit bytes towards
4347 * decreasing it, plus a balance factor, controlling the decrease rate, used
4348 * to balance metadata vs data.
4351 arc_evict_adj(uint64_t frac
, uint64_t total
, uint64_t up
, uint64_t down
,
4354 if (total
< 8 || up
+ down
== 0)
4358 * We should not have more ghost hits than ghost size, but they
4359 * may get close. Restrict maximum adjustment in that case.
4361 if (up
+ down
>= total
/ 4) {
4362 uint64_t scale
= (up
+ down
) / (total
/ 8);
4367 /* Get maximal dynamic range by choosing optimal shifts. */
4368 int s
= highbit64(total
);
4369 s
= MIN(64 - s
, 32);
4371 uint64_t ofrac
= (1ULL << 32) - frac
;
4373 if (frac
>= 4 * ofrac
)
4374 up
/= frac
/ (2 * ofrac
+ 1);
4375 up
= (up
<< s
) / (total
>> (32 - s
));
4376 if (ofrac
>= 4 * frac
)
4377 down
/= ofrac
/ (2 * frac
+ 1);
4378 down
= (down
<< s
) / (total
>> (32 - s
));
4379 down
= down
* 100 / balance
;
4381 return (frac
+ up
- down
);
4385 * Evict buffers from the cache, such that arcstat_size is capped by arc_c.
4390 uint64_t asize
, bytes
, total_evicted
= 0;
4391 int64_t e
, mrud
, mrum
, mfud
, mfum
, w
;
4392 static uint64_t ogrd
, ogrm
, ogfd
, ogfm
;
4393 static uint64_t gsrd
, gsrm
, gsfd
, gsfm
;
4394 uint64_t ngrd
, ngrm
, ngfd
, ngfm
;
4396 /* Get current size of ARC states we can evict from. */
4397 mrud
= zfs_refcount_count(&arc_mru
->arcs_size
[ARC_BUFC_DATA
]) +
4398 zfs_refcount_count(&arc_anon
->arcs_size
[ARC_BUFC_DATA
]);
4399 mrum
= zfs_refcount_count(&arc_mru
->arcs_size
[ARC_BUFC_METADATA
]) +
4400 zfs_refcount_count(&arc_anon
->arcs_size
[ARC_BUFC_METADATA
]);
4401 mfud
= zfs_refcount_count(&arc_mfu
->arcs_size
[ARC_BUFC_DATA
]);
4402 mfum
= zfs_refcount_count(&arc_mfu
->arcs_size
[ARC_BUFC_METADATA
]);
4403 uint64_t d
= mrud
+ mfud
;
4404 uint64_t m
= mrum
+ mfum
;
4407 /* Get ARC ghost hits since last eviction. */
4408 ngrd
= wmsum_value(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_DATA
]);
4409 uint64_t grd
= ngrd
- ogrd
;
4411 ngrm
= wmsum_value(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_METADATA
]);
4412 uint64_t grm
= ngrm
- ogrm
;
4414 ngfd
= wmsum_value(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_DATA
]);
4415 uint64_t gfd
= ngfd
- ogfd
;
4417 ngfm
= wmsum_value(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_METADATA
]);
4418 uint64_t gfm
= ngfm
- ogfm
;
4421 /* Adjust ARC states balance based on ghost hits. */
4422 arc_meta
= arc_evict_adj(arc_meta
, gsrd
+ gsrm
+ gsfd
+ gsfm
,
4423 grm
+ gfm
, grd
+ gfd
, zfs_arc_meta_balance
);
4424 arc_pd
= arc_evict_adj(arc_pd
, gsrd
+ gsfd
, grd
, gfd
, 100);
4425 arc_pm
= arc_evict_adj(arc_pm
, gsrm
+ gsfm
, grm
, gfm
, 100);
4427 asize
= aggsum_value(&arc_sums
.arcstat_size
);
4428 int64_t wt
= t
- (asize
- arc_c
);
4431 * Try to reduce pinned dnodes if more than 3/4 of wanted metadata
4432 * target is not evictable or if they go over arc_dnode_limit.
4435 int64_t dn
= wmsum_value(&arc_sums
.arcstat_dnode_size
);
4436 w
= wt
* (int64_t)(arc_meta
>> 16) >> 16;
4437 if (zfs_refcount_count(&arc_mru
->arcs_size
[ARC_BUFC_METADATA
]) +
4438 zfs_refcount_count(&arc_mfu
->arcs_size
[ARC_BUFC_METADATA
]) -
4439 zfs_refcount_count(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]) -
4440 zfs_refcount_count(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]) >
4442 prune
= dn
/ sizeof (dnode_t
) *
4443 zfs_arc_dnode_reduce_percent
/ 100;
4444 } else if (dn
> arc_dnode_limit
) {
4445 prune
= (dn
- arc_dnode_limit
) / sizeof (dnode_t
) *
4446 zfs_arc_dnode_reduce_percent
/ 100;
4449 arc_prune_async(prune
);
4451 /* Evict MRU metadata. */
4452 w
= wt
* (int64_t)(arc_meta
* arc_pm
>> 48) >> 16;
4453 e
= MIN((int64_t)(asize
- arc_c
), (int64_t)(mrum
- w
));
4454 bytes
= arc_evict_impl(arc_mru
, ARC_BUFC_METADATA
, e
);
4455 total_evicted
+= bytes
;
4459 /* Evict MFU metadata. */
4460 w
= wt
* (int64_t)(arc_meta
>> 16) >> 16;
4461 e
= MIN((int64_t)(asize
- arc_c
), (int64_t)(m
- w
));
4462 bytes
= arc_evict_impl(arc_mfu
, ARC_BUFC_METADATA
, e
);
4463 total_evicted
+= bytes
;
4467 /* Evict MRU data. */
4468 wt
-= m
- total_evicted
;
4469 w
= wt
* (int64_t)(arc_pd
>> 16) >> 16;
4470 e
= MIN((int64_t)(asize
- arc_c
), (int64_t)(mrud
- w
));
4471 bytes
= arc_evict_impl(arc_mru
, ARC_BUFC_DATA
, e
);
4472 total_evicted
+= bytes
;
4476 /* Evict MFU data. */
4478 bytes
= arc_evict_impl(arc_mfu
, ARC_BUFC_DATA
, e
);
4480 total_evicted
+= bytes
;
4485 * Size of each state's ghost list represents how much that state
4486 * may grow by shrinking the other states. Would it need to shrink
4487 * other states to zero (that is unlikely), its ghost size would be
4488 * equal to sum of other three state sizes. But excessive ghost
4489 * size may result in false ghost hits (too far back), that may
4490 * never result in real cache hits if several states are competing.
4491 * So choose some arbitraty point of 1/2 of other state sizes.
4493 gsrd
= (mrum
+ mfud
+ mfum
) / 2;
4494 e
= zfs_refcount_count(&arc_mru_ghost
->arcs_size
[ARC_BUFC_DATA
]) -
4496 (void) arc_evict_impl(arc_mru_ghost
, ARC_BUFC_DATA
, e
);
4498 gsrm
= (mrud
+ mfud
+ mfum
) / 2;
4499 e
= zfs_refcount_count(&arc_mru_ghost
->arcs_size
[ARC_BUFC_METADATA
]) -
4501 (void) arc_evict_impl(arc_mru_ghost
, ARC_BUFC_METADATA
, e
);
4503 gsfd
= (mrud
+ mrum
+ mfum
) / 2;
4504 e
= zfs_refcount_count(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_DATA
]) -
4506 (void) arc_evict_impl(arc_mfu_ghost
, ARC_BUFC_DATA
, e
);
4508 gsfm
= (mrud
+ mrum
+ mfud
) / 2;
4509 e
= zfs_refcount_count(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_METADATA
]) -
4511 (void) arc_evict_impl(arc_mfu_ghost
, ARC_BUFC_METADATA
, e
);
4513 return (total_evicted
);
4517 arc_flush(spa_t
*spa
, boolean_t retry
)
4522 * If retry is B_TRUE, a spa must not be specified since we have
4523 * no good way to determine if all of a spa's buffers have been
4524 * evicted from an arc state.
4526 ASSERT(!retry
|| spa
== NULL
);
4529 guid
= spa_load_guid(spa
);
4531 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_DATA
, retry
);
4532 (void) arc_flush_state(arc_mru
, guid
, ARC_BUFC_METADATA
, retry
);
4534 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_DATA
, retry
);
4535 (void) arc_flush_state(arc_mfu
, guid
, ARC_BUFC_METADATA
, retry
);
4537 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_DATA
, retry
);
4538 (void) arc_flush_state(arc_mru_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
4540 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_DATA
, retry
);
4541 (void) arc_flush_state(arc_mfu_ghost
, guid
, ARC_BUFC_METADATA
, retry
);
4543 (void) arc_flush_state(arc_uncached
, guid
, ARC_BUFC_DATA
, retry
);
4544 (void) arc_flush_state(arc_uncached
, guid
, ARC_BUFC_METADATA
, retry
);
4548 arc_reduce_target_size(int64_t to_free
)
4556 * All callers want the ARC to actually evict (at least) this much
4557 * memory. Therefore we reduce from the lower of the current size and
4558 * the target size. This way, even if arc_c is much higher than
4559 * arc_size (as can be the case after many calls to arc_freed(), we will
4560 * immediately have arc_c < arc_size and therefore the arc_evict_zthr
4563 uint64_t asize
= aggsum_value(&arc_sums
.arcstat_size
);
4565 to_free
+= c
- asize
;
4566 arc_c
= MAX((int64_t)c
- to_free
, (int64_t)arc_c_min
);
4568 /* See comment in arc_evict_cb_check() on why lock+flag */
4569 mutex_enter(&arc_evict_lock
);
4570 arc_evict_needed
= B_TRUE
;
4571 mutex_exit(&arc_evict_lock
);
4572 zthr_wakeup(arc_evict_zthr
);
4576 * Determine if the system is under memory pressure and is asking
4577 * to reclaim memory. A return value of B_TRUE indicates that the system
4578 * is under memory pressure and that the arc should adjust accordingly.
4581 arc_reclaim_needed(void)
4583 return (arc_available_memory() < 0);
4587 arc_kmem_reap_soon(void)
4590 kmem_cache_t
*prev_cache
= NULL
;
4591 kmem_cache_t
*prev_data_cache
= NULL
;
4596 * Reclaim unused memory from all kmem caches.
4602 for (i
= 0; i
< SPA_MAXBLOCKSIZE
>> SPA_MINBLOCKSHIFT
; i
++) {
4604 /* reach upper limit of cache size on 32-bit */
4605 if (zio_buf_cache
[i
] == NULL
)
4608 if (zio_buf_cache
[i
] != prev_cache
) {
4609 prev_cache
= zio_buf_cache
[i
];
4610 kmem_cache_reap_now(zio_buf_cache
[i
]);
4612 if (zio_data_buf_cache
[i
] != prev_data_cache
) {
4613 prev_data_cache
= zio_data_buf_cache
[i
];
4614 kmem_cache_reap_now(zio_data_buf_cache
[i
]);
4617 kmem_cache_reap_now(buf_cache
);
4618 kmem_cache_reap_now(hdr_full_cache
);
4619 kmem_cache_reap_now(hdr_l2only_cache
);
4620 kmem_cache_reap_now(zfs_btree_leaf_cache
);
4621 abd_cache_reap_now();
4625 arc_evict_cb_check(void *arg
, zthr_t
*zthr
)
4627 (void) arg
, (void) zthr
;
4631 * This is necessary in order to keep the kstat information
4632 * up to date for tools that display kstat data such as the
4633 * mdb ::arc dcmd and the Linux crash utility. These tools
4634 * typically do not call kstat's update function, but simply
4635 * dump out stats from the most recent update. Without
4636 * this call, these commands may show stale stats for the
4637 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
4638 * with this call, the data might be out of date if the
4639 * evict thread hasn't been woken recently; but that should
4640 * suffice. The arc_state_t structures can be queried
4641 * directly if more accurate information is needed.
4643 if (arc_ksp
!= NULL
)
4644 arc_ksp
->ks_update(arc_ksp
, KSTAT_READ
);
4648 * We have to rely on arc_wait_for_eviction() to tell us when to
4649 * evict, rather than checking if we are overflowing here, so that we
4650 * are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
4651 * If we have become "not overflowing" since arc_wait_for_eviction()
4652 * checked, we need to wake it up. We could broadcast the CV here,
4653 * but arc_wait_for_eviction() may have not yet gone to sleep. We
4654 * would need to use a mutex to ensure that this function doesn't
4655 * broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
4656 * the arc_evict_lock). However, the lock ordering of such a lock
4657 * would necessarily be incorrect with respect to the zthr_lock,
4658 * which is held before this function is called, and is held by
4659 * arc_wait_for_eviction() when it calls zthr_wakeup().
4661 if (arc_evict_needed
)
4665 * If we have buffers in uncached state, evict them periodically.
4667 return ((zfs_refcount_count(&arc_uncached
->arcs_esize
[ARC_BUFC_DATA
]) +
4668 zfs_refcount_count(&arc_uncached
->arcs_esize
[ARC_BUFC_METADATA
]) &&
4669 ddi_get_lbolt() - arc_last_uncached_flush
>
4670 MSEC_TO_TICK(arc_min_prefetch_ms
/ 2)));
4674 * Keep arc_size under arc_c by running arc_evict which evicts data
4678 arc_evict_cb(void *arg
, zthr_t
*zthr
)
4680 (void) arg
, (void) zthr
;
4682 uint64_t evicted
= 0;
4683 fstrans_cookie_t cookie
= spl_fstrans_mark();
4685 /* Always try to evict from uncached state. */
4686 arc_last_uncached_flush
= ddi_get_lbolt();
4687 evicted
+= arc_flush_state(arc_uncached
, 0, ARC_BUFC_DATA
, B_FALSE
);
4688 evicted
+= arc_flush_state(arc_uncached
, 0, ARC_BUFC_METADATA
, B_FALSE
);
4690 /* Evict from other states only if told to. */
4691 if (arc_evict_needed
)
4692 evicted
+= arc_evict();
4695 * If evicted is zero, we couldn't evict anything
4696 * via arc_evict(). This could be due to hash lock
4697 * collisions, but more likely due to the majority of
4698 * arc buffers being unevictable. Therefore, even if
4699 * arc_size is above arc_c, another pass is unlikely to
4700 * be helpful and could potentially cause us to enter an
4701 * infinite loop. Additionally, zthr_iscancelled() is
4702 * checked here so that if the arc is shutting down, the
4703 * broadcast will wake any remaining arc evict waiters.
4705 mutex_enter(&arc_evict_lock
);
4706 arc_evict_needed
= !zthr_iscancelled(arc_evict_zthr
) &&
4707 evicted
> 0 && aggsum_compare(&arc_sums
.arcstat_size
, arc_c
) > 0;
4708 if (!arc_evict_needed
) {
4710 * We're either no longer overflowing, or we
4711 * can't evict anything more, so we should wake
4712 * arc_get_data_impl() sooner.
4714 arc_evict_waiter_t
*aw
;
4715 while ((aw
= list_remove_head(&arc_evict_waiters
)) != NULL
) {
4716 cv_broadcast(&aw
->aew_cv
);
4718 arc_set_need_free();
4720 mutex_exit(&arc_evict_lock
);
4721 spl_fstrans_unmark(cookie
);
4725 arc_reap_cb_check(void *arg
, zthr_t
*zthr
)
4727 (void) arg
, (void) zthr
;
4729 int64_t free_memory
= arc_available_memory();
4730 static int reap_cb_check_counter
= 0;
4733 * If a kmem reap is already active, don't schedule more. We must
4734 * check for this because kmem_cache_reap_soon() won't actually
4735 * block on the cache being reaped (this is to prevent callers from
4736 * becoming implicitly blocked by a system-wide kmem reap -- which,
4737 * on a system with many, many full magazines, can take minutes).
4739 if (!kmem_cache_reap_active() && free_memory
< 0) {
4741 arc_no_grow
= B_TRUE
;
4744 * Wait at least zfs_grow_retry (default 5) seconds
4745 * before considering growing.
4747 arc_growtime
= gethrtime() + SEC2NSEC(arc_grow_retry
);
4749 } else if (free_memory
< arc_c
>> arc_no_grow_shift
) {
4750 arc_no_grow
= B_TRUE
;
4751 } else if (gethrtime() >= arc_growtime
) {
4752 arc_no_grow
= B_FALSE
;
4756 * Called unconditionally every 60 seconds to reclaim unused
4757 * zstd compression and decompression context. This is done
4758 * here to avoid the need for an independent thread.
4760 if (!((reap_cb_check_counter
++) % 60))
4761 zfs_zstd_cache_reap_now();
4767 * Keep enough free memory in the system by reaping the ARC's kmem
4768 * caches. To cause more slabs to be reapable, we may reduce the
4769 * target size of the cache (arc_c), causing the arc_evict_cb()
4770 * to free more buffers.
4773 arc_reap_cb(void *arg
, zthr_t
*zthr
)
4775 (void) arg
, (void) zthr
;
4777 int64_t free_memory
;
4778 fstrans_cookie_t cookie
= spl_fstrans_mark();
4781 * Kick off asynchronous kmem_reap()'s of all our caches.
4783 arc_kmem_reap_soon();
4786 * Wait at least arc_kmem_cache_reap_retry_ms between
4787 * arc_kmem_reap_soon() calls. Without this check it is possible to
4788 * end up in a situation where we spend lots of time reaping
4789 * caches, while we're near arc_c_min. Waiting here also gives the
4790 * subsequent free memory check a chance of finding that the
4791 * asynchronous reap has already freed enough memory, and we don't
4792 * need to call arc_reduce_target_size().
4794 delay((hz
* arc_kmem_cache_reap_retry_ms
+ 999) / 1000);
4797 * Reduce the target size as needed to maintain the amount of free
4798 * memory in the system at a fraction of the arc_size (1/128th by
4799 * default). If oversubscribed (free_memory < 0) then reduce the
4800 * target arc_size by the deficit amount plus the fractional
4801 * amount. If free memory is positive but less than the fractional
4802 * amount, reduce by what is needed to hit the fractional amount.
4804 free_memory
= arc_available_memory();
4806 int64_t can_free
= arc_c
- arc_c_min
;
4808 int64_t to_free
= (can_free
>> arc_shrink_shift
) - free_memory
;
4810 arc_reduce_target_size(to_free
);
4812 spl_fstrans_unmark(cookie
);
4817 * Determine the amount of memory eligible for eviction contained in the
4818 * ARC. All clean data reported by the ghost lists can always be safely
4819 * evicted. Due to arc_c_min, the same does not hold for all clean data
4820 * contained by the regular mru and mfu lists.
4822 * In the case of the regular mru and mfu lists, we need to report as
4823 * much clean data as possible, such that evicting that same reported
4824 * data will not bring arc_size below arc_c_min. Thus, in certain
4825 * circumstances, the total amount of clean data in the mru and mfu
4826 * lists might not actually be evictable.
4828 * The following two distinct cases are accounted for:
4830 * 1. The sum of the amount of dirty data contained by both the mru and
4831 * mfu lists, plus the ARC's other accounting (e.g. the anon list),
4832 * is greater than or equal to arc_c_min.
4833 * (i.e. amount of dirty data >= arc_c_min)
4835 * This is the easy case; all clean data contained by the mru and mfu
4836 * lists is evictable. Evicting all clean data can only drop arc_size
4837 * to the amount of dirty data, which is greater than arc_c_min.
4839 * 2. The sum of the amount of dirty data contained by both the mru and
4840 * mfu lists, plus the ARC's other accounting (e.g. the anon list),
4841 * is less than arc_c_min.
4842 * (i.e. arc_c_min > amount of dirty data)
4844 * 2.1. arc_size is greater than or equal arc_c_min.
4845 * (i.e. arc_size >= arc_c_min > amount of dirty data)
4847 * In this case, not all clean data from the regular mru and mfu
4848 * lists is actually evictable; we must leave enough clean data
4849 * to keep arc_size above arc_c_min. Thus, the maximum amount of
4850 * evictable data from the two lists combined, is exactly the
4851 * difference between arc_size and arc_c_min.
4853 * 2.2. arc_size is less than arc_c_min
4854 * (i.e. arc_c_min > arc_size > amount of dirty data)
4856 * In this case, none of the data contained in the mru and mfu
4857 * lists is evictable, even if it's clean. Since arc_size is
4858 * already below arc_c_min, evicting any more would only
4859 * increase this negative difference.
4862 #endif /* _KERNEL */
4865 * Adapt arc info given the number of bytes we are trying to add and
4866 * the state that we are coming from. This function is only called
4867 * when we are adding new content to the cache.
4870 arc_adapt(uint64_t bytes
)
4873 * Wake reap thread if we do not have any available memory
4875 if (arc_reclaim_needed()) {
4876 zthr_wakeup(arc_reap_zthr
);
4883 if (arc_c
>= arc_c_max
)
4887 * If we're within (2 * maxblocksize) bytes of the target
4888 * cache size, increment the target cache size
4890 if (aggsum_upper_bound(&arc_sums
.arcstat_size
) +
4891 2 * SPA_MAXBLOCKSIZE
>= arc_c
) {
4892 uint64_t dc
= MAX(bytes
, SPA_OLD_MAXBLOCKSIZE
);
4893 if (atomic_add_64_nv(&arc_c
, dc
) > arc_c_max
)
4899 * Check if arc_size has grown past our upper threshold, determined by
4900 * zfs_arc_overflow_shift.
4902 static arc_ovf_level_t
4903 arc_is_overflowing(boolean_t use_reserve
)
4905 /* Always allow at least one block of overflow */
4906 int64_t overflow
= MAX(SPA_MAXBLOCKSIZE
,
4907 arc_c
>> zfs_arc_overflow_shift
);
4910 * We just compare the lower bound here for performance reasons. Our
4911 * primary goals are to make sure that the arc never grows without
4912 * bound, and that it can reach its maximum size. This check
4913 * accomplishes both goals. The maximum amount we could run over by is
4914 * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
4915 * in the ARC. In practice, that's in the tens of MB, which is low
4916 * enough to be safe.
4918 int64_t over
= aggsum_lower_bound(&arc_sums
.arcstat_size
) -
4919 arc_c
- overflow
/ 2;
4922 return (over
< 0 ? ARC_OVF_NONE
:
4923 over
< overflow
? ARC_OVF_SOME
: ARC_OVF_SEVERE
);
4927 arc_get_data_abd(arc_buf_hdr_t
*hdr
, uint64_t size
, const void *tag
,
4930 arc_buf_contents_t type
= arc_buf_type(hdr
);
4932 arc_get_data_impl(hdr
, size
, tag
, alloc_flags
);
4933 if (alloc_flags
& ARC_HDR_ALLOC_LINEAR
)
4934 return (abd_alloc_linear(size
, type
== ARC_BUFC_METADATA
));
4936 return (abd_alloc(size
, type
== ARC_BUFC_METADATA
));
4940 arc_get_data_buf(arc_buf_hdr_t
*hdr
, uint64_t size
, const void *tag
)
4942 arc_buf_contents_t type
= arc_buf_type(hdr
);
4944 arc_get_data_impl(hdr
, size
, tag
, 0);
4945 if (type
== ARC_BUFC_METADATA
) {
4946 return (zio_buf_alloc(size
));
4948 ASSERT(type
== ARC_BUFC_DATA
);
4949 return (zio_data_buf_alloc(size
));
4954 * Wait for the specified amount of data (in bytes) to be evicted from the
4955 * ARC, and for there to be sufficient free memory in the system. Waiting for
4956 * eviction ensures that the memory used by the ARC decreases. Waiting for
4957 * free memory ensures that the system won't run out of free pages, regardless
4958 * of ARC behavior and settings. See arc_lowmem_init().
4961 arc_wait_for_eviction(uint64_t amount
, boolean_t use_reserve
)
4963 switch (arc_is_overflowing(use_reserve
)) {
4968 * This is a bit racy without taking arc_evict_lock, but the
4969 * worst that can happen is we either call zthr_wakeup() extra
4970 * time due to race with other thread here, or the set flag
4971 * get cleared by arc_evict_cb(), which is unlikely due to
4972 * big hysteresis, but also not important since at this level
4973 * of overflow the eviction is purely advisory. Same time
4974 * taking the global lock here every time without waiting for
4975 * the actual eviction creates a significant lock contention.
4977 if (!arc_evict_needed
) {
4978 arc_evict_needed
= B_TRUE
;
4979 zthr_wakeup(arc_evict_zthr
);
4982 case ARC_OVF_SEVERE
:
4985 arc_evict_waiter_t aw
;
4986 list_link_init(&aw
.aew_node
);
4987 cv_init(&aw
.aew_cv
, NULL
, CV_DEFAULT
, NULL
);
4989 uint64_t last_count
= 0;
4990 mutex_enter(&arc_evict_lock
);
4991 if (!list_is_empty(&arc_evict_waiters
)) {
4992 arc_evict_waiter_t
*last
=
4993 list_tail(&arc_evict_waiters
);
4994 last_count
= last
->aew_count
;
4995 } else if (!arc_evict_needed
) {
4996 arc_evict_needed
= B_TRUE
;
4997 zthr_wakeup(arc_evict_zthr
);
5000 * Note, the last waiter's count may be less than
5001 * arc_evict_count if we are low on memory in which
5002 * case arc_evict_state_impl() may have deferred
5003 * wakeups (but still incremented arc_evict_count).
5005 aw
.aew_count
= MAX(last_count
, arc_evict_count
) + amount
;
5007 list_insert_tail(&arc_evict_waiters
, &aw
);
5009 arc_set_need_free();
5011 DTRACE_PROBE3(arc__wait__for__eviction
,
5013 uint64_t, arc_evict_count
,
5014 uint64_t, aw
.aew_count
);
5017 * We will be woken up either when arc_evict_count reaches
5018 * aew_count, or when the ARC is no longer overflowing and
5019 * eviction completes.
5020 * In case of "false" wakeup, we will still be on the list.
5023 cv_wait(&aw
.aew_cv
, &arc_evict_lock
);
5024 } while (list_link_active(&aw
.aew_node
));
5025 mutex_exit(&arc_evict_lock
);
5027 cv_destroy(&aw
.aew_cv
);
5033 * Allocate a block and return it to the caller. If we are hitting the
5034 * hard limit for the cache size, we must sleep, waiting for the eviction
5035 * thread to catch up. If we're past the target size but below the hard
5036 * limit, we'll only signal the reclaim thread and continue on.
5039 arc_get_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, const void *tag
,
5045 * If arc_size is currently overflowing, we must be adding data
5046 * faster than we are evicting. To ensure we don't compound the
5047 * problem by adding more data and forcing arc_size to grow even
5048 * further past it's target size, we wait for the eviction thread to
5049 * make some progress. We also wait for there to be sufficient free
5050 * memory in the system, as measured by arc_free_memory().
5052 * Specifically, we wait for zfs_arc_eviction_pct percent of the
5053 * requested size to be evicted. This should be more than 100%, to
5054 * ensure that that progress is also made towards getting arc_size
5055 * under arc_c. See the comment above zfs_arc_eviction_pct.
5057 arc_wait_for_eviction(size
* zfs_arc_eviction_pct
/ 100,
5058 alloc_flags
& ARC_HDR_USE_RESERVE
);
5060 arc_buf_contents_t type
= arc_buf_type(hdr
);
5061 if (type
== ARC_BUFC_METADATA
) {
5062 arc_space_consume(size
, ARC_SPACE_META
);
5064 arc_space_consume(size
, ARC_SPACE_DATA
);
5068 * Update the state size. Note that ghost states have a
5069 * "ghost size" and so don't need to be updated.
5071 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
5072 if (!GHOST_STATE(state
)) {
5074 (void) zfs_refcount_add_many(&state
->arcs_size
[type
], size
,
5078 * If this is reached via arc_read, the link is
5079 * protected by the hash lock. If reached via
5080 * arc_buf_alloc, the header should not be accessed by
5081 * any other thread. And, if reached via arc_read_done,
5082 * the hash lock will protect it if it's found in the
5083 * hash table; otherwise no other thread should be
5084 * trying to [add|remove]_reference it.
5086 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
5087 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
5088 (void) zfs_refcount_add_many(&state
->arcs_esize
[type
],
5095 arc_free_data_abd(arc_buf_hdr_t
*hdr
, abd_t
*abd
, uint64_t size
,
5098 arc_free_data_impl(hdr
, size
, tag
);
5103 arc_free_data_buf(arc_buf_hdr_t
*hdr
, void *buf
, uint64_t size
, const void *tag
)
5105 arc_buf_contents_t type
= arc_buf_type(hdr
);
5107 arc_free_data_impl(hdr
, size
, tag
);
5108 if (type
== ARC_BUFC_METADATA
) {
5109 zio_buf_free(buf
, size
);
5111 ASSERT(type
== ARC_BUFC_DATA
);
5112 zio_data_buf_free(buf
, size
);
5117 * Free the arc data buffer.
5120 arc_free_data_impl(arc_buf_hdr_t
*hdr
, uint64_t size
, const void *tag
)
5122 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
5123 arc_buf_contents_t type
= arc_buf_type(hdr
);
5125 /* protected by hash lock, if in the hash table */
5126 if (multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
)) {
5127 ASSERT(zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
));
5128 ASSERT(state
!= arc_anon
&& state
!= arc_l2c_only
);
5130 (void) zfs_refcount_remove_many(&state
->arcs_esize
[type
],
5133 (void) zfs_refcount_remove_many(&state
->arcs_size
[type
], size
, tag
);
5135 VERIFY3U(hdr
->b_type
, ==, type
);
5136 if (type
== ARC_BUFC_METADATA
) {
5137 arc_space_return(size
, ARC_SPACE_META
);
5139 ASSERT(type
== ARC_BUFC_DATA
);
5140 arc_space_return(size
, ARC_SPACE_DATA
);
5145 * This routine is called whenever a buffer is accessed.
5148 arc_access(arc_buf_hdr_t
*hdr
, arc_flags_t arc_flags
, boolean_t hit
)
5150 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
5151 ASSERT(HDR_HAS_L1HDR(hdr
));
5154 * Update buffer prefetch status.
5156 boolean_t was_prefetch
= HDR_PREFETCH(hdr
);
5157 boolean_t now_prefetch
= arc_flags
& ARC_FLAG_PREFETCH
;
5158 if (was_prefetch
!= now_prefetch
) {
5160 ARCSTAT_CONDSTAT(hit
, demand_hit
, demand_iohit
,
5161 HDR_PRESCIENT_PREFETCH(hdr
), prescient
, predictive
,
5164 if (HDR_HAS_L2HDR(hdr
))
5165 l2arc_hdr_arcstats_decrement_state(hdr
);
5167 arc_hdr_clear_flags(hdr
,
5168 ARC_FLAG_PREFETCH
| ARC_FLAG_PRESCIENT_PREFETCH
);
5170 arc_hdr_set_flags(hdr
, ARC_FLAG_PREFETCH
);
5172 if (HDR_HAS_L2HDR(hdr
))
5173 l2arc_hdr_arcstats_increment_state(hdr
);
5176 if (arc_flags
& ARC_FLAG_PRESCIENT_PREFETCH
) {
5177 arc_hdr_set_flags(hdr
, ARC_FLAG_PRESCIENT_PREFETCH
);
5178 ARCSTAT_BUMP(arcstat_prescient_prefetch
);
5180 ARCSTAT_BUMP(arcstat_predictive_prefetch
);
5183 if (arc_flags
& ARC_FLAG_L2CACHE
)
5184 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
5186 clock_t now
= ddi_get_lbolt();
5187 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
5188 arc_state_t
*new_state
;
5190 * This buffer is not in the cache, and does not appear in
5191 * our "ghost" lists. Add it to the MRU or uncached state.
5193 ASSERT0(hdr
->b_l1hdr
.b_arc_access
);
5194 hdr
->b_l1hdr
.b_arc_access
= now
;
5195 if (HDR_UNCACHED(hdr
)) {
5196 new_state
= arc_uncached
;
5197 DTRACE_PROBE1(new_state__uncached
, arc_buf_hdr_t
*,
5200 new_state
= arc_mru
;
5201 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
5203 arc_change_state(new_state
, hdr
);
5204 } else if (hdr
->b_l1hdr
.b_state
== arc_mru
) {
5206 * This buffer has been accessed once recently and either
5207 * its read is still in progress or it is in the cache.
5209 if (HDR_IO_IN_PROGRESS(hdr
)) {
5210 hdr
->b_l1hdr
.b_arc_access
= now
;
5213 hdr
->b_l1hdr
.b_mru_hits
++;
5214 ARCSTAT_BUMP(arcstat_mru_hits
);
5217 * If the previous access was a prefetch, then it already
5218 * handled possible promotion, so nothing more to do for now.
5221 hdr
->b_l1hdr
.b_arc_access
= now
;
5226 * If more than ARC_MINTIME have passed from the previous
5227 * hit, promote the buffer to the MFU state.
5229 if (ddi_time_after(now
, hdr
->b_l1hdr
.b_arc_access
+
5231 hdr
->b_l1hdr
.b_arc_access
= now
;
5232 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
5233 arc_change_state(arc_mfu
, hdr
);
5235 } else if (hdr
->b_l1hdr
.b_state
== arc_mru_ghost
) {
5236 arc_state_t
*new_state
;
5238 * This buffer has been accessed once recently, but was
5239 * evicted from the cache. Would we have bigger MRU, it
5240 * would be an MRU hit, so handle it the same way, except
5241 * we don't need to check the previous access time.
5243 hdr
->b_l1hdr
.b_mru_ghost_hits
++;
5244 ARCSTAT_BUMP(arcstat_mru_ghost_hits
);
5245 hdr
->b_l1hdr
.b_arc_access
= now
;
5246 wmsum_add(&arc_mru_ghost
->arcs_hits
[arc_buf_type(hdr
)],
5249 new_state
= arc_mru
;
5250 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
5252 new_state
= arc_mfu
;
5253 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
5255 arc_change_state(new_state
, hdr
);
5256 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu
) {
5258 * This buffer has been accessed more than once and either
5259 * still in the cache or being restored from one of ghosts.
5261 if (!HDR_IO_IN_PROGRESS(hdr
)) {
5262 hdr
->b_l1hdr
.b_mfu_hits
++;
5263 ARCSTAT_BUMP(arcstat_mfu_hits
);
5265 hdr
->b_l1hdr
.b_arc_access
= now
;
5266 } else if (hdr
->b_l1hdr
.b_state
== arc_mfu_ghost
) {
5268 * This buffer has been accessed more than once recently, but
5269 * has been evicted from the cache. Would we have bigger MFU
5270 * it would stay in cache, so move it back to MFU state.
5272 hdr
->b_l1hdr
.b_mfu_ghost_hits
++;
5273 ARCSTAT_BUMP(arcstat_mfu_ghost_hits
);
5274 hdr
->b_l1hdr
.b_arc_access
= now
;
5275 wmsum_add(&arc_mfu_ghost
->arcs_hits
[arc_buf_type(hdr
)],
5277 DTRACE_PROBE1(new_state__mfu
, arc_buf_hdr_t
*, hdr
);
5278 arc_change_state(arc_mfu
, hdr
);
5279 } else if (hdr
->b_l1hdr
.b_state
== arc_uncached
) {
5281 * This buffer is uncacheable, but we got a hit. Probably
5282 * a demand read after prefetch. Nothing more to do here.
5284 if (!HDR_IO_IN_PROGRESS(hdr
))
5285 ARCSTAT_BUMP(arcstat_uncached_hits
);
5286 hdr
->b_l1hdr
.b_arc_access
= now
;
5287 } else if (hdr
->b_l1hdr
.b_state
== arc_l2c_only
) {
5289 * This buffer is on the 2nd Level ARC and was not accessed
5290 * for a long time, so treat it as new and put into MRU.
5292 hdr
->b_l1hdr
.b_arc_access
= now
;
5293 DTRACE_PROBE1(new_state__mru
, arc_buf_hdr_t
*, hdr
);
5294 arc_change_state(arc_mru
, hdr
);
5296 cmn_err(CE_PANIC
, "invalid arc state 0x%p",
5297 hdr
->b_l1hdr
.b_state
);
5302 * This routine is called by dbuf_hold() to update the arc_access() state
5303 * which otherwise would be skipped for entries in the dbuf cache.
5306 arc_buf_access(arc_buf_t
*buf
)
5308 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
5311 * Avoid taking the hash_lock when possible as an optimization.
5312 * The header must be checked again under the hash_lock in order
5313 * to handle the case where it is concurrently being released.
5315 if (hdr
->b_l1hdr
.b_state
== arc_anon
|| HDR_EMPTY(hdr
))
5318 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
5319 mutex_enter(hash_lock
);
5321 if (hdr
->b_l1hdr
.b_state
== arc_anon
|| HDR_EMPTY(hdr
)) {
5322 mutex_exit(hash_lock
);
5323 ARCSTAT_BUMP(arcstat_access_skip
);
5327 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
5328 hdr
->b_l1hdr
.b_state
== arc_mfu
||
5329 hdr
->b_l1hdr
.b_state
== arc_uncached
);
5331 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
5332 arc_access(hdr
, 0, B_TRUE
);
5333 mutex_exit(hash_lock
);
5335 ARCSTAT_BUMP(arcstat_hits
);
5336 ARCSTAT_CONDSTAT(B_TRUE
/* demand */, demand
, prefetch
,
5337 !HDR_ISTYPE_METADATA(hdr
), data
, metadata
, hits
);
5340 /* a generic arc_read_done_func_t which you can use */
5342 arc_bcopy_func(zio_t
*zio
, const zbookmark_phys_t
*zb
, const blkptr_t
*bp
,
5343 arc_buf_t
*buf
, void *arg
)
5345 (void) zio
, (void) zb
, (void) bp
;
5350 memcpy(arg
, buf
->b_data
, arc_buf_size(buf
));
5351 arc_buf_destroy(buf
, arg
);
5354 /* a generic arc_read_done_func_t */
5356 arc_getbuf_func(zio_t
*zio
, const zbookmark_phys_t
*zb
, const blkptr_t
*bp
,
5357 arc_buf_t
*buf
, void *arg
)
5359 (void) zb
, (void) bp
;
5360 arc_buf_t
**bufp
= arg
;
5363 ASSERT(zio
== NULL
|| zio
->io_error
!= 0);
5366 ASSERT(zio
== NULL
|| zio
->io_error
== 0);
5368 ASSERT(buf
->b_data
!= NULL
);
5373 arc_hdr_verify(arc_buf_hdr_t
*hdr
, blkptr_t
*bp
)
5375 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
)) {
5376 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, 0);
5377 ASSERT3U(arc_hdr_get_compress(hdr
), ==, ZIO_COMPRESS_OFF
);
5379 if (HDR_COMPRESSION_ENABLED(hdr
)) {
5380 ASSERT3U(arc_hdr_get_compress(hdr
), ==,
5381 BP_GET_COMPRESS(bp
));
5383 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(bp
));
5384 ASSERT3U(HDR_GET_PSIZE(hdr
), ==, BP_GET_PSIZE(bp
));
5385 ASSERT3U(!!HDR_PROTECTED(hdr
), ==, BP_IS_PROTECTED(bp
));
5390 arc_read_done(zio_t
*zio
)
5392 blkptr_t
*bp
= zio
->io_bp
;
5393 arc_buf_hdr_t
*hdr
= zio
->io_private
;
5394 kmutex_t
*hash_lock
= NULL
;
5395 arc_callback_t
*callback_list
;
5396 arc_callback_t
*acb
;
5399 * The hdr was inserted into hash-table and removed from lists
5400 * prior to starting I/O. We should find this header, since
5401 * it's in the hash table, and it should be legit since it's
5402 * not possible to evict it during the I/O. The only possible
5403 * reason for it not to be found is if we were freed during the
5406 if (HDR_IN_HASH_TABLE(hdr
)) {
5407 arc_buf_hdr_t
*found
;
5409 ASSERT3U(hdr
->b_birth
, ==, BP_PHYSICAL_BIRTH(zio
->io_bp
));
5410 ASSERT3U(hdr
->b_dva
.dva_word
[0], ==,
5411 BP_IDENTITY(zio
->io_bp
)->dva_word
[0]);
5412 ASSERT3U(hdr
->b_dva
.dva_word
[1], ==,
5413 BP_IDENTITY(zio
->io_bp
)->dva_word
[1]);
5415 found
= buf_hash_find(hdr
->b_spa
, zio
->io_bp
, &hash_lock
);
5417 ASSERT((found
== hdr
&&
5418 DVA_EQUAL(&hdr
->b_dva
, BP_IDENTITY(zio
->io_bp
))) ||
5419 (found
== hdr
&& HDR_L2_READING(hdr
)));
5420 ASSERT3P(hash_lock
, !=, NULL
);
5423 if (BP_IS_PROTECTED(bp
)) {
5424 hdr
->b_crypt_hdr
.b_ot
= BP_GET_TYPE(bp
);
5425 hdr
->b_crypt_hdr
.b_dsobj
= zio
->io_bookmark
.zb_objset
;
5426 zio_crypt_decode_params_bp(bp
, hdr
->b_crypt_hdr
.b_salt
,
5427 hdr
->b_crypt_hdr
.b_iv
);
5429 if (zio
->io_error
== 0) {
5430 if (BP_GET_TYPE(bp
) == DMU_OT_INTENT_LOG
) {
5433 tmpbuf
= abd_borrow_buf_copy(zio
->io_abd
,
5434 sizeof (zil_chain_t
));
5435 zio_crypt_decode_mac_zil(tmpbuf
,
5436 hdr
->b_crypt_hdr
.b_mac
);
5437 abd_return_buf(zio
->io_abd
, tmpbuf
,
5438 sizeof (zil_chain_t
));
5440 zio_crypt_decode_mac_bp(bp
,
5441 hdr
->b_crypt_hdr
.b_mac
);
5446 if (zio
->io_error
== 0) {
5447 /* byteswap if necessary */
5448 if (BP_SHOULD_BYTESWAP(zio
->io_bp
)) {
5449 if (BP_GET_LEVEL(zio
->io_bp
) > 0) {
5450 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_UINT64
;
5452 hdr
->b_l1hdr
.b_byteswap
=
5453 DMU_OT_BYTESWAP(BP_GET_TYPE(zio
->io_bp
));
5456 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
5458 if (!HDR_L2_READING(hdr
)) {
5459 hdr
->b_complevel
= zio
->io_prop
.zp_complevel
;
5463 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_EVICTED
);
5464 if (l2arc_noprefetch
&& HDR_PREFETCH(hdr
))
5465 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2CACHE
);
5467 callback_list
= hdr
->b_l1hdr
.b_acb
;
5468 ASSERT3P(callback_list
, !=, NULL
);
5469 hdr
->b_l1hdr
.b_acb
= NULL
;
5472 * If a read request has a callback (i.e. acb_done is not NULL), then we
5473 * make a buf containing the data according to the parameters which were
5474 * passed in. The implementation of arc_buf_alloc_impl() ensures that we
5475 * aren't needlessly decompressing the data multiple times.
5477 int callback_cnt
= 0;
5478 for (acb
= callback_list
; acb
!= NULL
; acb
= acb
->acb_next
) {
5480 /* We need the last one to call below in original order. */
5481 callback_list
= acb
;
5483 if (!acb
->acb_done
|| acb
->acb_nobuf
)
5488 if (zio
->io_error
!= 0)
5491 int error
= arc_buf_alloc_impl(hdr
, zio
->io_spa
,
5492 &acb
->acb_zb
, acb
->acb_private
, acb
->acb_encrypted
,
5493 acb
->acb_compressed
, acb
->acb_noauth
, B_TRUE
,
5497 * Assert non-speculative zios didn't fail because an
5498 * encryption key wasn't loaded
5500 ASSERT((zio
->io_flags
& ZIO_FLAG_SPECULATIVE
) ||
5504 * If we failed to decrypt, report an error now (as the zio
5505 * layer would have done if it had done the transforms).
5507 if (error
== ECKSUM
) {
5508 ASSERT(BP_IS_PROTECTED(bp
));
5509 error
= SET_ERROR(EIO
);
5510 if ((zio
->io_flags
& ZIO_FLAG_SPECULATIVE
) == 0) {
5511 spa_log_error(zio
->io_spa
, &acb
->acb_zb
,
5512 &zio
->io_bp
->blk_birth
);
5513 (void) zfs_ereport_post(
5514 FM_EREPORT_ZFS_AUTHENTICATION
,
5515 zio
->io_spa
, NULL
, &acb
->acb_zb
, zio
, 0);
5521 * Decompression or decryption failed. Set
5522 * io_error so that when we call acb_done
5523 * (below), we will indicate that the read
5524 * failed. Note that in the unusual case
5525 * where one callback is compressed and another
5526 * uncompressed, we will mark all of them
5527 * as failed, even though the uncompressed
5528 * one can't actually fail. In this case,
5529 * the hdr will not be anonymous, because
5530 * if there are multiple callbacks, it's
5531 * because multiple threads found the same
5532 * arc buf in the hash table.
5534 zio
->io_error
= error
;
5539 * If there are multiple callbacks, we must have the hash lock,
5540 * because the only way for multiple threads to find this hdr is
5541 * in the hash table. This ensures that if there are multiple
5542 * callbacks, the hdr is not anonymous. If it were anonymous,
5543 * we couldn't use arc_buf_destroy() in the error case below.
5545 ASSERT(callback_cnt
< 2 || hash_lock
!= NULL
);
5547 if (zio
->io_error
== 0) {
5548 arc_hdr_verify(hdr
, zio
->io_bp
);
5550 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_ERROR
);
5551 if (hdr
->b_l1hdr
.b_state
!= arc_anon
)
5552 arc_change_state(arc_anon
, hdr
);
5553 if (HDR_IN_HASH_TABLE(hdr
))
5554 buf_hash_remove(hdr
);
5557 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5558 (void) remove_reference(hdr
, hdr
);
5560 if (hash_lock
!= NULL
)
5561 mutex_exit(hash_lock
);
5563 /* execute each callback and free its structure */
5564 while ((acb
= callback_list
) != NULL
) {
5565 if (acb
->acb_done
!= NULL
) {
5566 if (zio
->io_error
!= 0 && acb
->acb_buf
!= NULL
) {
5568 * If arc_buf_alloc_impl() fails during
5569 * decompression, the buf will still be
5570 * allocated, and needs to be freed here.
5572 arc_buf_destroy(acb
->acb_buf
,
5574 acb
->acb_buf
= NULL
;
5576 acb
->acb_done(zio
, &zio
->io_bookmark
, zio
->io_bp
,
5577 acb
->acb_buf
, acb
->acb_private
);
5580 if (acb
->acb_zio_dummy
!= NULL
) {
5581 acb
->acb_zio_dummy
->io_error
= zio
->io_error
;
5582 zio_nowait(acb
->acb_zio_dummy
);
5585 callback_list
= acb
->acb_prev
;
5586 if (acb
->acb_wait
) {
5587 mutex_enter(&acb
->acb_wait_lock
);
5588 acb
->acb_wait_error
= zio
->io_error
;
5589 acb
->acb_wait
= B_FALSE
;
5590 cv_signal(&acb
->acb_wait_cv
);
5591 mutex_exit(&acb
->acb_wait_lock
);
5592 /* acb will be freed by the waiting thread. */
5594 kmem_free(acb
, sizeof (arc_callback_t
));
5600 * "Read" the block at the specified DVA (in bp) via the
5601 * cache. If the block is found in the cache, invoke the provided
5602 * callback immediately and return. Note that the `zio' parameter
5603 * in the callback will be NULL in this case, since no IO was
5604 * required. If the block is not in the cache pass the read request
5605 * on to the spa with a substitute callback function, so that the
5606 * requested block will be added to the cache.
5608 * If a read request arrives for a block that has a read in-progress,
5609 * either wait for the in-progress read to complete (and return the
5610 * results); or, if this is a read with a "done" func, add a record
5611 * to the read to invoke the "done" func when the read completes,
5612 * and return; or just return.
5614 * arc_read_done() will invoke all the requested "done" functions
5615 * for readers of this block.
5618 arc_read(zio_t
*pio
, spa_t
*spa
, const blkptr_t
*bp
,
5619 arc_read_done_func_t
*done
, void *private, zio_priority_t priority
,
5620 int zio_flags
, arc_flags_t
*arc_flags
, const zbookmark_phys_t
*zb
)
5622 arc_buf_hdr_t
*hdr
= NULL
;
5623 kmutex_t
*hash_lock
= NULL
;
5625 uint64_t guid
= spa_load_guid(spa
);
5626 boolean_t compressed_read
= (zio_flags
& ZIO_FLAG_RAW_COMPRESS
) != 0;
5627 boolean_t encrypted_read
= BP_IS_ENCRYPTED(bp
) &&
5628 (zio_flags
& ZIO_FLAG_RAW_ENCRYPT
) != 0;
5629 boolean_t noauth_read
= BP_IS_AUTHENTICATED(bp
) &&
5630 (zio_flags
& ZIO_FLAG_RAW_ENCRYPT
) != 0;
5631 boolean_t embedded_bp
= !!BP_IS_EMBEDDED(bp
);
5632 boolean_t no_buf
= *arc_flags
& ARC_FLAG_NO_BUF
;
5633 arc_buf_t
*buf
= NULL
;
5636 ASSERT(!embedded_bp
||
5637 BPE_GET_ETYPE(bp
) == BP_EMBEDDED_TYPE_DATA
);
5638 ASSERT(!BP_IS_HOLE(bp
));
5639 ASSERT(!BP_IS_REDACTED(bp
));
5642 * Normally SPL_FSTRANS will already be set since kernel threads which
5643 * expect to call the DMU interfaces will set it when created. System
5644 * calls are similarly handled by setting/cleaning the bit in the
5645 * registered callback (module/os/.../zfs/zpl_*).
5647 * External consumers such as Lustre which call the exported DMU
5648 * interfaces may not have set SPL_FSTRANS. To avoid a deadlock
5649 * on the hash_lock always set and clear the bit.
5651 fstrans_cookie_t cookie
= spl_fstrans_mark();
5654 * Verify the block pointer contents are reasonable. This should
5655 * always be the case since the blkptr is protected by a checksum.
5656 * However, if there is damage it's desirable to detect this early
5657 * and treat it as a checksum error. This allows an alternate blkptr
5658 * to be tried when one is available (e.g. ditto blocks).
5660 if (!zfs_blkptr_verify(spa
, bp
, (zio_flags
& ZIO_FLAG_CONFIG_WRITER
) ?
5661 BLK_CONFIG_HELD
: BLK_CONFIG_NEEDED
, BLK_VERIFY_LOG
)) {
5662 rc
= SET_ERROR(ECKSUM
);
5668 * Embedded BP's have no DVA and require no I/O to "read".
5669 * Create an anonymous arc buf to back it.
5671 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
5675 * Determine if we have an L1 cache hit or a cache miss. For simplicity
5676 * we maintain encrypted data separately from compressed / uncompressed
5677 * data. If the user is requesting raw encrypted data and we don't have
5678 * that in the header we will read from disk to guarantee that we can
5679 * get it even if the encryption keys aren't loaded.
5681 if (hdr
!= NULL
&& HDR_HAS_L1HDR(hdr
) && (HDR_HAS_RABD(hdr
) ||
5682 (hdr
->b_l1hdr
.b_pabd
!= NULL
&& !encrypted_read
))) {
5683 boolean_t is_data
= !HDR_ISTYPE_METADATA(hdr
);
5685 if (HDR_IO_IN_PROGRESS(hdr
)) {
5686 if (*arc_flags
& ARC_FLAG_CACHED_ONLY
) {
5687 mutex_exit(hash_lock
);
5688 ARCSTAT_BUMP(arcstat_cached_only_in_progress
);
5689 rc
= SET_ERROR(ENOENT
);
5693 zio_t
*head_zio
= hdr
->b_l1hdr
.b_acb
->acb_zio_head
;
5694 ASSERT3P(head_zio
, !=, NULL
);
5695 if ((hdr
->b_flags
& ARC_FLAG_PRIO_ASYNC_READ
) &&
5696 priority
== ZIO_PRIORITY_SYNC_READ
) {
5698 * This is a sync read that needs to wait for
5699 * an in-flight async read. Request that the
5700 * zio have its priority upgraded.
5702 zio_change_priority(head_zio
, priority
);
5703 DTRACE_PROBE1(arc__async__upgrade__sync
,
5704 arc_buf_hdr_t
*, hdr
);
5705 ARCSTAT_BUMP(arcstat_async_upgrade_sync
);
5708 DTRACE_PROBE1(arc__iohit
, arc_buf_hdr_t
*, hdr
);
5709 arc_access(hdr
, *arc_flags
, B_FALSE
);
5712 * If there are multiple threads reading the same block
5713 * and that block is not yet in the ARC, then only one
5714 * thread will do the physical I/O and all other
5715 * threads will wait until that I/O completes.
5716 * Synchronous reads use the acb_wait_cv whereas nowait
5717 * reads register a callback. Both are signalled/called
5720 * Errors of the physical I/O may need to be propagated.
5721 * Synchronous read errors are returned here from
5722 * arc_read_done via acb_wait_error. Nowait reads
5723 * attach the acb_zio_dummy zio to pio and
5724 * arc_read_done propagates the physical I/O's io_error
5725 * to acb_zio_dummy, and thereby to pio.
5727 arc_callback_t
*acb
= NULL
;
5728 if (done
|| pio
|| *arc_flags
& ARC_FLAG_WAIT
) {
5729 acb
= kmem_zalloc(sizeof (arc_callback_t
),
5731 acb
->acb_done
= done
;
5732 acb
->acb_private
= private;
5733 acb
->acb_compressed
= compressed_read
;
5734 acb
->acb_encrypted
= encrypted_read
;
5735 acb
->acb_noauth
= noauth_read
;
5736 acb
->acb_nobuf
= no_buf
;
5737 if (*arc_flags
& ARC_FLAG_WAIT
) {
5738 acb
->acb_wait
= B_TRUE
;
5739 mutex_init(&acb
->acb_wait_lock
, NULL
,
5740 MUTEX_DEFAULT
, NULL
);
5741 cv_init(&acb
->acb_wait_cv
, NULL
,
5746 acb
->acb_zio_dummy
= zio_null(pio
,
5747 spa
, NULL
, NULL
, NULL
, zio_flags
);
5749 acb
->acb_zio_head
= head_zio
;
5750 acb
->acb_next
= hdr
->b_l1hdr
.b_acb
;
5751 hdr
->b_l1hdr
.b_acb
->acb_prev
= acb
;
5752 hdr
->b_l1hdr
.b_acb
= acb
;
5754 mutex_exit(hash_lock
);
5756 ARCSTAT_BUMP(arcstat_iohits
);
5757 ARCSTAT_CONDSTAT(!(*arc_flags
& ARC_FLAG_PREFETCH
),
5758 demand
, prefetch
, is_data
, data
, metadata
, iohits
);
5760 if (*arc_flags
& ARC_FLAG_WAIT
) {
5761 mutex_enter(&acb
->acb_wait_lock
);
5762 while (acb
->acb_wait
) {
5763 cv_wait(&acb
->acb_wait_cv
,
5764 &acb
->acb_wait_lock
);
5766 rc
= acb
->acb_wait_error
;
5767 mutex_exit(&acb
->acb_wait_lock
);
5768 mutex_destroy(&acb
->acb_wait_lock
);
5769 cv_destroy(&acb
->acb_wait_cv
);
5770 kmem_free(acb
, sizeof (arc_callback_t
));
5775 ASSERT(hdr
->b_l1hdr
.b_state
== arc_mru
||
5776 hdr
->b_l1hdr
.b_state
== arc_mfu
||
5777 hdr
->b_l1hdr
.b_state
== arc_uncached
);
5779 DTRACE_PROBE1(arc__hit
, arc_buf_hdr_t
*, hdr
);
5780 arc_access(hdr
, *arc_flags
, B_TRUE
);
5782 if (done
&& !no_buf
) {
5783 ASSERT(!embedded_bp
|| !BP_IS_HOLE(bp
));
5785 /* Get a buf with the desired data in it. */
5786 rc
= arc_buf_alloc_impl(hdr
, spa
, zb
, private,
5787 encrypted_read
, compressed_read
, noauth_read
,
5791 * Convert authentication and decryption errors
5792 * to EIO (and generate an ereport if needed)
5793 * before leaving the ARC.
5795 rc
= SET_ERROR(EIO
);
5796 if ((zio_flags
& ZIO_FLAG_SPECULATIVE
) == 0) {
5797 spa_log_error(spa
, zb
, &hdr
->b_birth
);
5798 (void) zfs_ereport_post(
5799 FM_EREPORT_ZFS_AUTHENTICATION
,
5800 spa
, NULL
, zb
, NULL
, 0);
5804 arc_buf_destroy_impl(buf
);
5806 (void) remove_reference(hdr
, private);
5809 /* assert any errors weren't due to unloaded keys */
5810 ASSERT((zio_flags
& ZIO_FLAG_SPECULATIVE
) ||
5813 mutex_exit(hash_lock
);
5814 ARCSTAT_BUMP(arcstat_hits
);
5815 ARCSTAT_CONDSTAT(!(*arc_flags
& ARC_FLAG_PREFETCH
),
5816 demand
, prefetch
, is_data
, data
, metadata
, hits
);
5817 *arc_flags
|= ARC_FLAG_CACHED
;
5820 uint64_t lsize
= BP_GET_LSIZE(bp
);
5821 uint64_t psize
= BP_GET_PSIZE(bp
);
5822 arc_callback_t
*acb
;
5825 boolean_t devw
= B_FALSE
;
5828 int alloc_flags
= encrypted_read
? ARC_HDR_ALLOC_RDATA
: 0;
5829 arc_buf_contents_t type
= BP_GET_BUFC_TYPE(bp
);
5831 if (*arc_flags
& ARC_FLAG_CACHED_ONLY
) {
5832 if (hash_lock
!= NULL
)
5833 mutex_exit(hash_lock
);
5834 rc
= SET_ERROR(ENOENT
);
5840 * This block is not in the cache or it has
5843 arc_buf_hdr_t
*exists
= NULL
;
5844 hdr
= arc_hdr_alloc(spa_load_guid(spa
), psize
, lsize
,
5845 BP_IS_PROTECTED(bp
), BP_GET_COMPRESS(bp
), 0, type
);
5848 hdr
->b_dva
= *BP_IDENTITY(bp
);
5849 hdr
->b_birth
= BP_PHYSICAL_BIRTH(bp
);
5850 exists
= buf_hash_insert(hdr
, &hash_lock
);
5852 if (exists
!= NULL
) {
5853 /* somebody beat us to the hash insert */
5854 mutex_exit(hash_lock
);
5855 buf_discard_identity(hdr
);
5856 arc_hdr_destroy(hdr
);
5857 goto top
; /* restart the IO request */
5861 * This block is in the ghost cache or encrypted data
5862 * was requested and we didn't have it. If it was
5863 * L2-only (and thus didn't have an L1 hdr),
5864 * we realloc the header to add an L1 hdr.
5866 if (!HDR_HAS_L1HDR(hdr
)) {
5867 hdr
= arc_hdr_realloc(hdr
, hdr_l2only_cache
,
5871 if (GHOST_STATE(hdr
->b_l1hdr
.b_state
)) {
5872 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
5873 ASSERT(!HDR_HAS_RABD(hdr
));
5874 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
5875 ASSERT0(zfs_refcount_count(
5876 &hdr
->b_l1hdr
.b_refcnt
));
5877 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, NULL
);
5879 ASSERT3P(hdr
->b_l1hdr
.b_freeze_cksum
, ==, NULL
);
5881 } else if (HDR_IO_IN_PROGRESS(hdr
)) {
5883 * If this header already had an IO in progress
5884 * and we are performing another IO to fetch
5885 * encrypted data we must wait until the first
5886 * IO completes so as not to confuse
5887 * arc_read_done(). This should be very rare
5888 * and so the performance impact shouldn't
5891 arc_callback_t
*acb
= kmem_zalloc(
5892 sizeof (arc_callback_t
), KM_SLEEP
);
5893 acb
->acb_wait
= B_TRUE
;
5894 mutex_init(&acb
->acb_wait_lock
, NULL
,
5895 MUTEX_DEFAULT
, NULL
);
5896 cv_init(&acb
->acb_wait_cv
, NULL
, CV_DEFAULT
,
5899 hdr
->b_l1hdr
.b_acb
->acb_zio_head
;
5900 acb
->acb_next
= hdr
->b_l1hdr
.b_acb
;
5901 hdr
->b_l1hdr
.b_acb
->acb_prev
= acb
;
5902 hdr
->b_l1hdr
.b_acb
= acb
;
5903 mutex_exit(hash_lock
);
5904 mutex_enter(&acb
->acb_wait_lock
);
5905 while (acb
->acb_wait
) {
5906 cv_wait(&acb
->acb_wait_cv
,
5907 &acb
->acb_wait_lock
);
5909 mutex_exit(&acb
->acb_wait_lock
);
5910 mutex_destroy(&acb
->acb_wait_lock
);
5911 cv_destroy(&acb
->acb_wait_cv
);
5912 kmem_free(acb
, sizeof (arc_callback_t
));
5916 if (*arc_flags
& ARC_FLAG_UNCACHED
) {
5917 arc_hdr_set_flags(hdr
, ARC_FLAG_UNCACHED
);
5918 if (!encrypted_read
)
5919 alloc_flags
|= ARC_HDR_ALLOC_LINEAR
;
5923 * Take additional reference for IO_IN_PROGRESS. It stops
5924 * arc_access() from putting this header without any buffers
5925 * and so other references but obviously nonevictable onto
5926 * the evictable list of MRU or MFU state.
5928 add_reference(hdr
, hdr
);
5930 arc_access(hdr
, *arc_flags
, B_FALSE
);
5931 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
5932 arc_hdr_alloc_abd(hdr
, alloc_flags
);
5933 if (encrypted_read
) {
5934 ASSERT(HDR_HAS_RABD(hdr
));
5935 size
= HDR_GET_PSIZE(hdr
);
5936 hdr_abd
= hdr
->b_crypt_hdr
.b_rabd
;
5937 zio_flags
|= ZIO_FLAG_RAW
;
5939 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
5940 size
= arc_hdr_size(hdr
);
5941 hdr_abd
= hdr
->b_l1hdr
.b_pabd
;
5943 if (arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
) {
5944 zio_flags
|= ZIO_FLAG_RAW_COMPRESS
;
5948 * For authenticated bp's, we do not ask the ZIO layer
5949 * to authenticate them since this will cause the entire
5950 * IO to fail if the key isn't loaded. Instead, we
5951 * defer authentication until arc_buf_fill(), which will
5952 * verify the data when the key is available.
5954 if (BP_IS_AUTHENTICATED(bp
))
5955 zio_flags
|= ZIO_FLAG_RAW_ENCRYPT
;
5958 if (BP_IS_AUTHENTICATED(bp
))
5959 arc_hdr_set_flags(hdr
, ARC_FLAG_NOAUTH
);
5960 if (BP_GET_LEVEL(bp
) > 0)
5961 arc_hdr_set_flags(hdr
, ARC_FLAG_INDIRECT
);
5962 ASSERT(!GHOST_STATE(hdr
->b_l1hdr
.b_state
));
5964 acb
= kmem_zalloc(sizeof (arc_callback_t
), KM_SLEEP
);
5965 acb
->acb_done
= done
;
5966 acb
->acb_private
= private;
5967 acb
->acb_compressed
= compressed_read
;
5968 acb
->acb_encrypted
= encrypted_read
;
5969 acb
->acb_noauth
= noauth_read
;
5972 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
5973 hdr
->b_l1hdr
.b_acb
= acb
;
5975 if (HDR_HAS_L2HDR(hdr
) &&
5976 (vd
= hdr
->b_l2hdr
.b_dev
->l2ad_vdev
) != NULL
) {
5977 devw
= hdr
->b_l2hdr
.b_dev
->l2ad_writing
;
5978 addr
= hdr
->b_l2hdr
.b_daddr
;
5980 * Lock out L2ARC device removal.
5982 if (vdev_is_dead(vd
) ||
5983 !spa_config_tryenter(spa
, SCL_L2ARC
, vd
, RW_READER
))
5988 * We count both async reads and scrub IOs as asynchronous so
5989 * that both can be upgraded in the event of a cache hit while
5990 * the read IO is still in-flight.
5992 if (priority
== ZIO_PRIORITY_ASYNC_READ
||
5993 priority
== ZIO_PRIORITY_SCRUB
)
5994 arc_hdr_set_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5996 arc_hdr_clear_flags(hdr
, ARC_FLAG_PRIO_ASYNC_READ
);
5999 * At this point, we have a level 1 cache miss or a blkptr
6000 * with embedded data. Try again in L2ARC if possible.
6002 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, lsize
);
6005 * Skip ARC stat bump for block pointers with embedded
6006 * data. The data are read from the blkptr itself via
6007 * decode_embedded_bp_compressed().
6010 DTRACE_PROBE4(arc__miss
, arc_buf_hdr_t
*, hdr
,
6011 blkptr_t
*, bp
, uint64_t, lsize
,
6012 zbookmark_phys_t
*, zb
);
6013 ARCSTAT_BUMP(arcstat_misses
);
6014 ARCSTAT_CONDSTAT(!(*arc_flags
& ARC_FLAG_PREFETCH
),
6015 demand
, prefetch
, !HDR_ISTYPE_METADATA(hdr
), data
,
6017 zfs_racct_read(size
, 1);
6020 /* Check if the spa even has l2 configured */
6021 const boolean_t spa_has_l2
= l2arc_ndev
!= 0 &&
6022 spa
->spa_l2cache
.sav_count
> 0;
6024 if (vd
!= NULL
&& spa_has_l2
&& !(l2arc_norw
&& devw
)) {
6026 * Read from the L2ARC if the following are true:
6027 * 1. The L2ARC vdev was previously cached.
6028 * 2. This buffer still has L2ARC metadata.
6029 * 3. This buffer isn't currently writing to the L2ARC.
6030 * 4. The L2ARC entry wasn't evicted, which may
6031 * also have invalidated the vdev.
6032 * 5. This isn't prefetch or l2arc_noprefetch is 0.
6034 if (HDR_HAS_L2HDR(hdr
) &&
6035 !HDR_L2_WRITING(hdr
) && !HDR_L2_EVICTED(hdr
) &&
6036 !(l2arc_noprefetch
&&
6037 (*arc_flags
& ARC_FLAG_PREFETCH
))) {
6038 l2arc_read_callback_t
*cb
;
6042 DTRACE_PROBE1(l2arc__hit
, arc_buf_hdr_t
*, hdr
);
6043 ARCSTAT_BUMP(arcstat_l2_hits
);
6044 hdr
->b_l2hdr
.b_hits
++;
6046 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
),
6048 cb
->l2rcb_hdr
= hdr
;
6051 cb
->l2rcb_flags
= zio_flags
;
6054 * When Compressed ARC is disabled, but the
6055 * L2ARC block is compressed, arc_hdr_size()
6056 * will have returned LSIZE rather than PSIZE.
6058 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
6059 !HDR_COMPRESSION_ENABLED(hdr
) &&
6060 HDR_GET_PSIZE(hdr
) != 0) {
6061 size
= HDR_GET_PSIZE(hdr
);
6064 asize
= vdev_psize_to_asize(vd
, size
);
6065 if (asize
!= size
) {
6066 abd
= abd_alloc_for_io(asize
,
6067 HDR_ISTYPE_METADATA(hdr
));
6068 cb
->l2rcb_abd
= abd
;
6073 ASSERT(addr
>= VDEV_LABEL_START_SIZE
&&
6074 addr
+ asize
<= vd
->vdev_psize
-
6075 VDEV_LABEL_END_SIZE
);
6078 * l2arc read. The SCL_L2ARC lock will be
6079 * released by l2arc_read_done().
6080 * Issue a null zio if the underlying buffer
6081 * was squashed to zero size by compression.
6083 ASSERT3U(arc_hdr_get_compress(hdr
), !=,
6084 ZIO_COMPRESS_EMPTY
);
6085 rzio
= zio_read_phys(pio
, vd
, addr
,
6088 l2arc_read_done
, cb
, priority
,
6089 zio_flags
| ZIO_FLAG_CANFAIL
|
6090 ZIO_FLAG_DONT_PROPAGATE
|
6091 ZIO_FLAG_DONT_RETRY
, B_FALSE
);
6092 acb
->acb_zio_head
= rzio
;
6094 if (hash_lock
!= NULL
)
6095 mutex_exit(hash_lock
);
6097 DTRACE_PROBE2(l2arc__read
, vdev_t
*, vd
,
6099 ARCSTAT_INCR(arcstat_l2_read_bytes
,
6100 HDR_GET_PSIZE(hdr
));
6102 if (*arc_flags
& ARC_FLAG_NOWAIT
) {
6107 ASSERT(*arc_flags
& ARC_FLAG_WAIT
);
6108 if (zio_wait(rzio
) == 0)
6111 /* l2arc read error; goto zio_read() */
6112 if (hash_lock
!= NULL
)
6113 mutex_enter(hash_lock
);
6115 DTRACE_PROBE1(l2arc__miss
,
6116 arc_buf_hdr_t
*, hdr
);
6117 ARCSTAT_BUMP(arcstat_l2_misses
);
6118 if (HDR_L2_WRITING(hdr
))
6119 ARCSTAT_BUMP(arcstat_l2_rw_clash
);
6120 spa_config_exit(spa
, SCL_L2ARC
, vd
);
6124 spa_config_exit(spa
, SCL_L2ARC
, vd
);
6127 * Only a spa with l2 should contribute to l2
6128 * miss stats. (Including the case of having a
6129 * faulted cache device - that's also a miss.)
6133 * Skip ARC stat bump for block pointers with
6134 * embedded data. The data are read from the
6136 * decode_embedded_bp_compressed().
6139 DTRACE_PROBE1(l2arc__miss
,
6140 arc_buf_hdr_t
*, hdr
);
6141 ARCSTAT_BUMP(arcstat_l2_misses
);
6146 rzio
= zio_read(pio
, spa
, bp
, hdr_abd
, size
,
6147 arc_read_done
, hdr
, priority
, zio_flags
, zb
);
6148 acb
->acb_zio_head
= rzio
;
6150 if (hash_lock
!= NULL
)
6151 mutex_exit(hash_lock
);
6153 if (*arc_flags
& ARC_FLAG_WAIT
) {
6154 rc
= zio_wait(rzio
);
6158 ASSERT(*arc_flags
& ARC_FLAG_NOWAIT
);
6163 /* embedded bps don't actually go to disk */
6165 spa_read_history_add(spa
, zb
, *arc_flags
);
6166 spl_fstrans_unmark(cookie
);
6171 done(NULL
, zb
, bp
, buf
, private);
6172 if (pio
&& rc
!= 0) {
6173 zio_t
*zio
= zio_null(pio
, spa
, NULL
, NULL
, NULL
, zio_flags
);
6181 arc_add_prune_callback(arc_prune_func_t
*func
, void *private)
6185 p
= kmem_alloc(sizeof (*p
), KM_SLEEP
);
6187 p
->p_private
= private;
6188 list_link_init(&p
->p_node
);
6189 zfs_refcount_create(&p
->p_refcnt
);
6191 mutex_enter(&arc_prune_mtx
);
6192 zfs_refcount_add(&p
->p_refcnt
, &arc_prune_list
);
6193 list_insert_head(&arc_prune_list
, p
);
6194 mutex_exit(&arc_prune_mtx
);
6200 arc_remove_prune_callback(arc_prune_t
*p
)
6202 boolean_t wait
= B_FALSE
;
6203 mutex_enter(&arc_prune_mtx
);
6204 list_remove(&arc_prune_list
, p
);
6205 if (zfs_refcount_remove(&p
->p_refcnt
, &arc_prune_list
) > 0)
6207 mutex_exit(&arc_prune_mtx
);
6209 /* wait for arc_prune_task to finish */
6211 taskq_wait_outstanding(arc_prune_taskq
, 0);
6212 ASSERT0(zfs_refcount_count(&p
->p_refcnt
));
6213 zfs_refcount_destroy(&p
->p_refcnt
);
6214 kmem_free(p
, sizeof (*p
));
6218 * Notify the arc that a block was freed, and thus will never be used again.
6221 arc_freed(spa_t
*spa
, const blkptr_t
*bp
)
6224 kmutex_t
*hash_lock
;
6225 uint64_t guid
= spa_load_guid(spa
);
6227 ASSERT(!BP_IS_EMBEDDED(bp
));
6229 hdr
= buf_hash_find(guid
, bp
, &hash_lock
);
6234 * We might be trying to free a block that is still doing I/O
6235 * (i.e. prefetch) or has some other reference (i.e. a dedup-ed,
6236 * dmu_sync-ed block). A block may also have a reference if it is
6237 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would
6238 * have written the new block to its final resting place on disk but
6239 * without the dedup flag set. This would have left the hdr in the MRU
6240 * state and discoverable. When the txg finally syncs it detects that
6241 * the block was overridden in open context and issues an override I/O.
6242 * Since this is a dedup block, the override I/O will determine if the
6243 * block is already in the DDT. If so, then it will replace the io_bp
6244 * with the bp from the DDT and allow the I/O to finish. When the I/O
6245 * reaches the done callback, dbuf_write_override_done, it will
6246 * check to see if the io_bp and io_bp_override are identical.
6247 * If they are not, then it indicates that the bp was replaced with
6248 * the bp in the DDT and the override bp is freed. This allows
6249 * us to arrive here with a reference on a block that is being
6250 * freed. So if we have an I/O in progress, or a reference to
6251 * this hdr, then we don't destroy the hdr.
6253 if (!HDR_HAS_L1HDR(hdr
) ||
6254 zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
)) {
6255 arc_change_state(arc_anon
, hdr
);
6256 arc_hdr_destroy(hdr
);
6257 mutex_exit(hash_lock
);
6259 mutex_exit(hash_lock
);
6265 * Release this buffer from the cache, making it an anonymous buffer. This
6266 * must be done after a read and prior to modifying the buffer contents.
6267 * If the buffer has more than one reference, we must make
6268 * a new hdr for the buffer.
6271 arc_release(arc_buf_t
*buf
, const void *tag
)
6273 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
6276 * It would be nice to assert that if its DMU metadata (level >
6277 * 0 || it's the dnode file), then it must be syncing context.
6278 * But we don't know that information at this level.
6281 ASSERT(HDR_HAS_L1HDR(hdr
));
6284 * We don't grab the hash lock prior to this check, because if
6285 * the buffer's header is in the arc_anon state, it won't be
6286 * linked into the hash table.
6288 if (hdr
->b_l1hdr
.b_state
== arc_anon
) {
6289 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
6290 ASSERT(!HDR_IN_HASH_TABLE(hdr
));
6291 ASSERT(!HDR_HAS_L2HDR(hdr
));
6293 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, buf
);
6294 ASSERT(ARC_BUF_LAST(buf
));
6295 ASSERT3S(zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
), ==, 1);
6296 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
6298 hdr
->b_l1hdr
.b_arc_access
= 0;
6301 * If the buf is being overridden then it may already
6302 * have a hdr that is not empty.
6304 buf_discard_identity(hdr
);
6310 kmutex_t
*hash_lock
= HDR_LOCK(hdr
);
6311 mutex_enter(hash_lock
);
6314 * This assignment is only valid as long as the hash_lock is
6315 * held, we must be careful not to reference state or the
6316 * b_state field after dropping the lock.
6318 arc_state_t
*state
= hdr
->b_l1hdr
.b_state
;
6319 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
6320 ASSERT3P(state
, !=, arc_anon
);
6322 /* this buffer is not on any list */
6323 ASSERT3S(zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
), >, 0);
6325 if (HDR_HAS_L2HDR(hdr
)) {
6326 mutex_enter(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
6329 * We have to recheck this conditional again now that
6330 * we're holding the l2ad_mtx to prevent a race with
6331 * another thread which might be concurrently calling
6332 * l2arc_evict(). In that case, l2arc_evict() might have
6333 * destroyed the header's L2 portion as we were waiting
6334 * to acquire the l2ad_mtx.
6336 if (HDR_HAS_L2HDR(hdr
))
6337 arc_hdr_l2hdr_destroy(hdr
);
6339 mutex_exit(&hdr
->b_l2hdr
.b_dev
->l2ad_mtx
);
6343 * Do we have more than one buf?
6345 if (hdr
->b_l1hdr
.b_buf
!= buf
|| !ARC_BUF_LAST(buf
)) {
6346 arc_buf_hdr_t
*nhdr
;
6347 uint64_t spa
= hdr
->b_spa
;
6348 uint64_t psize
= HDR_GET_PSIZE(hdr
);
6349 uint64_t lsize
= HDR_GET_LSIZE(hdr
);
6350 boolean_t
protected = HDR_PROTECTED(hdr
);
6351 enum zio_compress compress
= arc_hdr_get_compress(hdr
);
6352 arc_buf_contents_t type
= arc_buf_type(hdr
);
6353 VERIFY3U(hdr
->b_type
, ==, type
);
6355 ASSERT(hdr
->b_l1hdr
.b_buf
!= buf
|| buf
->b_next
!= NULL
);
6356 VERIFY3S(remove_reference(hdr
, tag
), >, 0);
6358 if (arc_buf_is_shared(buf
) && !ARC_BUF_COMPRESSED(buf
)) {
6359 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, buf
);
6360 ASSERT(ARC_BUF_LAST(buf
));
6364 * Pull the data off of this hdr and attach it to
6365 * a new anonymous hdr. Also find the last buffer
6366 * in the hdr's buffer list.
6368 arc_buf_t
*lastbuf
= arc_buf_remove(hdr
, buf
);
6369 ASSERT3P(lastbuf
, !=, NULL
);
6372 * If the current arc_buf_t and the hdr are sharing their data
6373 * buffer, then we must stop sharing that block.
6375 if (arc_buf_is_shared(buf
)) {
6376 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, buf
);
6377 VERIFY(!arc_buf_is_shared(lastbuf
));
6380 * First, sever the block sharing relationship between
6381 * buf and the arc_buf_hdr_t.
6383 arc_unshare_buf(hdr
, buf
);
6386 * Now we need to recreate the hdr's b_pabd. Since we
6387 * have lastbuf handy, we try to share with it, but if
6388 * we can't then we allocate a new b_pabd and copy the
6389 * data from buf into it.
6391 if (arc_can_share(hdr
, lastbuf
)) {
6392 arc_share_buf(hdr
, lastbuf
);
6394 arc_hdr_alloc_abd(hdr
, 0);
6395 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
,
6396 buf
->b_data
, psize
);
6398 VERIFY3P(lastbuf
->b_data
, !=, NULL
);
6399 } else if (HDR_SHARED_DATA(hdr
)) {
6401 * Uncompressed shared buffers are always at the end
6402 * of the list. Compressed buffers don't have the
6403 * same requirements. This makes it hard to
6404 * simply assert that the lastbuf is shared so
6405 * we rely on the hdr's compression flags to determine
6406 * if we have a compressed, shared buffer.
6408 ASSERT(arc_buf_is_shared(lastbuf
) ||
6409 arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
);
6410 ASSERT(!ARC_BUF_SHARED(buf
));
6413 ASSERT(hdr
->b_l1hdr
.b_pabd
!= NULL
|| HDR_HAS_RABD(hdr
));
6414 ASSERT3P(state
, !=, arc_l2c_only
);
6416 (void) zfs_refcount_remove_many(&state
->arcs_size
[type
],
6417 arc_buf_size(buf
), buf
);
6419 if (zfs_refcount_is_zero(&hdr
->b_l1hdr
.b_refcnt
)) {
6420 ASSERT3P(state
, !=, arc_l2c_only
);
6421 (void) zfs_refcount_remove_many(
6422 &state
->arcs_esize
[type
],
6423 arc_buf_size(buf
), buf
);
6426 arc_cksum_verify(buf
);
6427 arc_buf_unwatch(buf
);
6429 /* if this is the last uncompressed buf free the checksum */
6430 if (!arc_hdr_has_uncompressed_buf(hdr
))
6431 arc_cksum_free(hdr
);
6433 mutex_exit(hash_lock
);
6435 nhdr
= arc_hdr_alloc(spa
, psize
, lsize
, protected,
6436 compress
, hdr
->b_complevel
, type
);
6437 ASSERT3P(nhdr
->b_l1hdr
.b_buf
, ==, NULL
);
6438 ASSERT0(zfs_refcount_count(&nhdr
->b_l1hdr
.b_refcnt
));
6439 VERIFY3U(nhdr
->b_type
, ==, type
);
6440 ASSERT(!HDR_SHARED_DATA(nhdr
));
6442 nhdr
->b_l1hdr
.b_buf
= buf
;
6443 (void) zfs_refcount_add(&nhdr
->b_l1hdr
.b_refcnt
, tag
);
6446 (void) zfs_refcount_add_many(&arc_anon
->arcs_size
[type
],
6447 arc_buf_size(buf
), buf
);
6449 ASSERT(zfs_refcount_count(&hdr
->b_l1hdr
.b_refcnt
) == 1);
6450 /* protected by hash lock, or hdr is on arc_anon */
6451 ASSERT(!multilist_link_active(&hdr
->b_l1hdr
.b_arc_node
));
6452 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
6453 hdr
->b_l1hdr
.b_mru_hits
= 0;
6454 hdr
->b_l1hdr
.b_mru_ghost_hits
= 0;
6455 hdr
->b_l1hdr
.b_mfu_hits
= 0;
6456 hdr
->b_l1hdr
.b_mfu_ghost_hits
= 0;
6457 arc_change_state(arc_anon
, hdr
);
6458 hdr
->b_l1hdr
.b_arc_access
= 0;
6460 mutex_exit(hash_lock
);
6461 buf_discard_identity(hdr
);
6467 arc_released(arc_buf_t
*buf
)
6469 return (buf
->b_data
!= NULL
&&
6470 buf
->b_hdr
->b_l1hdr
.b_state
== arc_anon
);
6475 arc_referenced(arc_buf_t
*buf
)
6477 return (zfs_refcount_count(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
6482 arc_write_ready(zio_t
*zio
)
6484 arc_write_callback_t
*callback
= zio
->io_private
;
6485 arc_buf_t
*buf
= callback
->awcb_buf
;
6486 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
6487 blkptr_t
*bp
= zio
->io_bp
;
6488 uint64_t psize
= BP_IS_HOLE(bp
) ? 0 : BP_GET_PSIZE(bp
);
6489 fstrans_cookie_t cookie
= spl_fstrans_mark();
6491 ASSERT(HDR_HAS_L1HDR(hdr
));
6492 ASSERT(!zfs_refcount_is_zero(&buf
->b_hdr
->b_l1hdr
.b_refcnt
));
6493 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, NULL
);
6496 * If we're reexecuting this zio because the pool suspended, then
6497 * cleanup any state that was previously set the first time the
6498 * callback was invoked.
6500 if (zio
->io_flags
& ZIO_FLAG_REEXECUTED
) {
6501 arc_cksum_free(hdr
);
6502 arc_buf_unwatch(buf
);
6503 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
6504 if (arc_buf_is_shared(buf
)) {
6505 arc_unshare_buf(hdr
, buf
);
6507 arc_hdr_free_abd(hdr
, B_FALSE
);
6511 if (HDR_HAS_RABD(hdr
))
6512 arc_hdr_free_abd(hdr
, B_TRUE
);
6514 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
6515 ASSERT(!HDR_HAS_RABD(hdr
));
6516 ASSERT(!HDR_SHARED_DATA(hdr
));
6517 ASSERT(!arc_buf_is_shared(buf
));
6519 callback
->awcb_ready(zio
, buf
, callback
->awcb_private
);
6521 if (HDR_IO_IN_PROGRESS(hdr
)) {
6522 ASSERT(zio
->io_flags
& ZIO_FLAG_REEXECUTED
);
6524 arc_hdr_set_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
6525 add_reference(hdr
, hdr
); /* For IO_IN_PROGRESS. */
6528 if (BP_IS_PROTECTED(bp
) != !!HDR_PROTECTED(hdr
))
6529 hdr
= arc_hdr_realloc_crypt(hdr
, BP_IS_PROTECTED(bp
));
6531 if (BP_IS_PROTECTED(bp
)) {
6532 /* ZIL blocks are written through zio_rewrite */
6533 ASSERT3U(BP_GET_TYPE(bp
), !=, DMU_OT_INTENT_LOG
);
6534 ASSERT(HDR_PROTECTED(hdr
));
6536 if (BP_SHOULD_BYTESWAP(bp
)) {
6537 if (BP_GET_LEVEL(bp
) > 0) {
6538 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_UINT64
;
6540 hdr
->b_l1hdr
.b_byteswap
=
6541 DMU_OT_BYTESWAP(BP_GET_TYPE(bp
));
6544 hdr
->b_l1hdr
.b_byteswap
= DMU_BSWAP_NUMFUNCS
;
6547 hdr
->b_crypt_hdr
.b_ot
= BP_GET_TYPE(bp
);
6548 hdr
->b_crypt_hdr
.b_dsobj
= zio
->io_bookmark
.zb_objset
;
6549 zio_crypt_decode_params_bp(bp
, hdr
->b_crypt_hdr
.b_salt
,
6550 hdr
->b_crypt_hdr
.b_iv
);
6551 zio_crypt_decode_mac_bp(bp
, hdr
->b_crypt_hdr
.b_mac
);
6555 * If this block was written for raw encryption but the zio layer
6556 * ended up only authenticating it, adjust the buffer flags now.
6558 if (BP_IS_AUTHENTICATED(bp
) && ARC_BUF_ENCRYPTED(buf
)) {
6559 arc_hdr_set_flags(hdr
, ARC_FLAG_NOAUTH
);
6560 buf
->b_flags
&= ~ARC_BUF_FLAG_ENCRYPTED
;
6561 if (BP_GET_COMPRESS(bp
) == ZIO_COMPRESS_OFF
)
6562 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
6563 } else if (BP_IS_HOLE(bp
) && ARC_BUF_ENCRYPTED(buf
)) {
6564 buf
->b_flags
&= ~ARC_BUF_FLAG_ENCRYPTED
;
6565 buf
->b_flags
&= ~ARC_BUF_FLAG_COMPRESSED
;
6568 /* this must be done after the buffer flags are adjusted */
6569 arc_cksum_compute(buf
);
6571 enum zio_compress compress
;
6572 if (BP_IS_HOLE(bp
) || BP_IS_EMBEDDED(bp
)) {
6573 compress
= ZIO_COMPRESS_OFF
;
6575 ASSERT3U(HDR_GET_LSIZE(hdr
), ==, BP_GET_LSIZE(bp
));
6576 compress
= BP_GET_COMPRESS(bp
);
6578 HDR_SET_PSIZE(hdr
, psize
);
6579 arc_hdr_set_compress(hdr
, compress
);
6580 hdr
->b_complevel
= zio
->io_prop
.zp_complevel
;
6582 if (zio
->io_error
!= 0 || psize
== 0)
6586 * Fill the hdr with data. If the buffer is encrypted we have no choice
6587 * but to copy the data into b_radb. If the hdr is compressed, the data
6588 * we want is available from the zio, otherwise we can take it from
6591 * We might be able to share the buf's data with the hdr here. However,
6592 * doing so would cause the ARC to be full of linear ABDs if we write a
6593 * lot of shareable data. As a compromise, we check whether scattered
6594 * ABDs are allowed, and assume that if they are then the user wants
6595 * the ARC to be primarily filled with them regardless of the data being
6596 * written. Therefore, if they're allowed then we allocate one and copy
6597 * the data into it; otherwise, we share the data directly if we can.
6599 if (ARC_BUF_ENCRYPTED(buf
)) {
6600 ASSERT3U(psize
, >, 0);
6601 ASSERT(ARC_BUF_COMPRESSED(buf
));
6602 arc_hdr_alloc_abd(hdr
, ARC_HDR_ALLOC_RDATA
|
6603 ARC_HDR_USE_RESERVE
);
6604 abd_copy(hdr
->b_crypt_hdr
.b_rabd
, zio
->io_abd
, psize
);
6605 } else if (!(HDR_UNCACHED(hdr
) ||
6606 abd_size_alloc_linear(arc_buf_size(buf
))) ||
6607 !arc_can_share(hdr
, buf
)) {
6609 * Ideally, we would always copy the io_abd into b_pabd, but the
6610 * user may have disabled compressed ARC, thus we must check the
6611 * hdr's compression setting rather than the io_bp's.
6613 if (BP_IS_ENCRYPTED(bp
)) {
6614 ASSERT3U(psize
, >, 0);
6615 arc_hdr_alloc_abd(hdr
, ARC_HDR_ALLOC_RDATA
|
6616 ARC_HDR_USE_RESERVE
);
6617 abd_copy(hdr
->b_crypt_hdr
.b_rabd
, zio
->io_abd
, psize
);
6618 } else if (arc_hdr_get_compress(hdr
) != ZIO_COMPRESS_OFF
&&
6619 !ARC_BUF_COMPRESSED(buf
)) {
6620 ASSERT3U(psize
, >, 0);
6621 arc_hdr_alloc_abd(hdr
, ARC_HDR_USE_RESERVE
);
6622 abd_copy(hdr
->b_l1hdr
.b_pabd
, zio
->io_abd
, psize
);
6624 ASSERT3U(zio
->io_orig_size
, ==, arc_hdr_size(hdr
));
6625 arc_hdr_alloc_abd(hdr
, ARC_HDR_USE_RESERVE
);
6626 abd_copy_from_buf(hdr
->b_l1hdr
.b_pabd
, buf
->b_data
,
6630 ASSERT3P(buf
->b_data
, ==, abd_to_buf(zio
->io_orig_abd
));
6631 ASSERT3U(zio
->io_orig_size
, ==, arc_buf_size(buf
));
6632 ASSERT3P(hdr
->b_l1hdr
.b_buf
, ==, buf
);
6633 ASSERT(ARC_BUF_LAST(buf
));
6635 arc_share_buf(hdr
, buf
);
6639 arc_hdr_verify(hdr
, bp
);
6640 spl_fstrans_unmark(cookie
);
6644 arc_write_children_ready(zio_t
*zio
)
6646 arc_write_callback_t
*callback
= zio
->io_private
;
6647 arc_buf_t
*buf
= callback
->awcb_buf
;
6649 callback
->awcb_children_ready(zio
, buf
, callback
->awcb_private
);
6653 arc_write_done(zio_t
*zio
)
6655 arc_write_callback_t
*callback
= zio
->io_private
;
6656 arc_buf_t
*buf
= callback
->awcb_buf
;
6657 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
6659 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
6661 if (zio
->io_error
== 0) {
6662 arc_hdr_verify(hdr
, zio
->io_bp
);
6664 if (BP_IS_HOLE(zio
->io_bp
) || BP_IS_EMBEDDED(zio
->io_bp
)) {
6665 buf_discard_identity(hdr
);
6667 hdr
->b_dva
= *BP_IDENTITY(zio
->io_bp
);
6668 hdr
->b_birth
= BP_PHYSICAL_BIRTH(zio
->io_bp
);
6671 ASSERT(HDR_EMPTY(hdr
));
6675 * If the block to be written was all-zero or compressed enough to be
6676 * embedded in the BP, no write was performed so there will be no
6677 * dva/birth/checksum. The buffer must therefore remain anonymous
6680 if (!HDR_EMPTY(hdr
)) {
6681 arc_buf_hdr_t
*exists
;
6682 kmutex_t
*hash_lock
;
6684 ASSERT3U(zio
->io_error
, ==, 0);
6686 arc_cksum_verify(buf
);
6688 exists
= buf_hash_insert(hdr
, &hash_lock
);
6689 if (exists
!= NULL
) {
6691 * This can only happen if we overwrite for
6692 * sync-to-convergence, because we remove
6693 * buffers from the hash table when we arc_free().
6695 if (zio
->io_flags
& ZIO_FLAG_IO_REWRITE
) {
6696 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
6697 panic("bad overwrite, hdr=%p exists=%p",
6698 (void *)hdr
, (void *)exists
);
6699 ASSERT(zfs_refcount_is_zero(
6700 &exists
->b_l1hdr
.b_refcnt
));
6701 arc_change_state(arc_anon
, exists
);
6702 arc_hdr_destroy(exists
);
6703 mutex_exit(hash_lock
);
6704 exists
= buf_hash_insert(hdr
, &hash_lock
);
6705 ASSERT3P(exists
, ==, NULL
);
6706 } else if (zio
->io_flags
& ZIO_FLAG_NOPWRITE
) {
6708 ASSERT(zio
->io_prop
.zp_nopwrite
);
6709 if (!BP_EQUAL(&zio
->io_bp_orig
, zio
->io_bp
))
6710 panic("bad nopwrite, hdr=%p exists=%p",
6711 (void *)hdr
, (void *)exists
);
6714 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, NULL
);
6715 ASSERT(ARC_BUF_LAST(hdr
->b_l1hdr
.b_buf
));
6716 ASSERT(hdr
->b_l1hdr
.b_state
== arc_anon
);
6717 ASSERT(BP_GET_DEDUP(zio
->io_bp
));
6718 ASSERT(BP_GET_LEVEL(zio
->io_bp
) == 0);
6721 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
6722 VERIFY3S(remove_reference(hdr
, hdr
), >, 0);
6723 /* if it's not anon, we are doing a scrub */
6724 if (exists
== NULL
&& hdr
->b_l1hdr
.b_state
== arc_anon
)
6725 arc_access(hdr
, 0, B_FALSE
);
6726 mutex_exit(hash_lock
);
6728 arc_hdr_clear_flags(hdr
, ARC_FLAG_IO_IN_PROGRESS
);
6729 VERIFY3S(remove_reference(hdr
, hdr
), >, 0);
6732 callback
->awcb_done(zio
, buf
, callback
->awcb_private
);
6734 abd_free(zio
->io_abd
);
6735 kmem_free(callback
, sizeof (arc_write_callback_t
));
6739 arc_write(zio_t
*pio
, spa_t
*spa
, uint64_t txg
,
6740 blkptr_t
*bp
, arc_buf_t
*buf
, boolean_t uncached
, boolean_t l2arc
,
6741 const zio_prop_t
*zp
, arc_write_done_func_t
*ready
,
6742 arc_write_done_func_t
*children_ready
, arc_write_done_func_t
*done
,
6743 void *private, zio_priority_t priority
, int zio_flags
,
6744 const zbookmark_phys_t
*zb
)
6746 arc_buf_hdr_t
*hdr
= buf
->b_hdr
;
6747 arc_write_callback_t
*callback
;
6749 zio_prop_t localprop
= *zp
;
6751 ASSERT3P(ready
, !=, NULL
);
6752 ASSERT3P(done
, !=, NULL
);
6753 ASSERT(!HDR_IO_ERROR(hdr
));
6754 ASSERT(!HDR_IO_IN_PROGRESS(hdr
));
6755 ASSERT3P(hdr
->b_l1hdr
.b_acb
, ==, NULL
);
6756 ASSERT3P(hdr
->b_l1hdr
.b_buf
, !=, NULL
);
6758 arc_hdr_set_flags(hdr
, ARC_FLAG_UNCACHED
);
6760 arc_hdr_set_flags(hdr
, ARC_FLAG_L2CACHE
);
6762 if (ARC_BUF_ENCRYPTED(buf
)) {
6763 ASSERT(ARC_BUF_COMPRESSED(buf
));
6764 localprop
.zp_encrypt
= B_TRUE
;
6765 localprop
.zp_compress
= HDR_GET_COMPRESS(hdr
);
6766 localprop
.zp_complevel
= hdr
->b_complevel
;
6767 localprop
.zp_byteorder
=
6768 (hdr
->b_l1hdr
.b_byteswap
== DMU_BSWAP_NUMFUNCS
) ?
6769 ZFS_HOST_BYTEORDER
: !ZFS_HOST_BYTEORDER
;
6770 memcpy(localprop
.zp_salt
, hdr
->b_crypt_hdr
.b_salt
,
6772 memcpy(localprop
.zp_iv
, hdr
->b_crypt_hdr
.b_iv
,
6774 memcpy(localprop
.zp_mac
, hdr
->b_crypt_hdr
.b_mac
,
6776 if (DMU_OT_IS_ENCRYPTED(localprop
.zp_type
)) {
6777 localprop
.zp_nopwrite
= B_FALSE
;
6778 localprop
.zp_copies
=
6779 MIN(localprop
.zp_copies
, SPA_DVAS_PER_BP
- 1);
6781 zio_flags
|= ZIO_FLAG_RAW
;
6782 } else if (ARC_BUF_COMPRESSED(buf
)) {
6783 ASSERT3U(HDR_GET_LSIZE(hdr
), !=, arc_buf_size(buf
));
6784 localprop
.zp_compress
= HDR_GET_COMPRESS(hdr
);
6785 localprop
.zp_complevel
= hdr
->b_complevel
;
6786 zio_flags
|= ZIO_FLAG_RAW_COMPRESS
;
6788 callback
= kmem_zalloc(sizeof (arc_write_callback_t
), KM_SLEEP
);
6789 callback
->awcb_ready
= ready
;
6790 callback
->awcb_children_ready
= children_ready
;
6791 callback
->awcb_done
= done
;
6792 callback
->awcb_private
= private;
6793 callback
->awcb_buf
= buf
;
6796 * The hdr's b_pabd is now stale, free it now. A new data block
6797 * will be allocated when the zio pipeline calls arc_write_ready().
6799 if (hdr
->b_l1hdr
.b_pabd
!= NULL
) {
6801 * If the buf is currently sharing the data block with
6802 * the hdr then we need to break that relationship here.
6803 * The hdr will remain with a NULL data pointer and the
6804 * buf will take sole ownership of the block.
6806 if (arc_buf_is_shared(buf
)) {
6807 arc_unshare_buf(hdr
, buf
);
6809 arc_hdr_free_abd(hdr
, B_FALSE
);
6811 VERIFY3P(buf
->b_data
, !=, NULL
);
6814 if (HDR_HAS_RABD(hdr
))
6815 arc_hdr_free_abd(hdr
, B_TRUE
);
6817 if (!(zio_flags
& ZIO_FLAG_RAW
))
6818 arc_hdr_set_compress(hdr
, ZIO_COMPRESS_OFF
);
6820 ASSERT(!arc_buf_is_shared(buf
));
6821 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, ==, NULL
);
6823 zio
= zio_write(pio
, spa
, txg
, bp
,
6824 abd_get_from_buf(buf
->b_data
, HDR_GET_LSIZE(hdr
)),
6825 HDR_GET_LSIZE(hdr
), arc_buf_size(buf
), &localprop
, arc_write_ready
,
6826 (children_ready
!= NULL
) ? arc_write_children_ready
: NULL
,
6827 arc_write_done
, callback
, priority
, zio_flags
, zb
);
6833 arc_tempreserve_clear(uint64_t reserve
)
6835 atomic_add_64(&arc_tempreserve
, -reserve
);
6836 ASSERT((int64_t)arc_tempreserve
>= 0);
6840 arc_tempreserve_space(spa_t
*spa
, uint64_t reserve
, uint64_t txg
)
6846 reserve
> arc_c
/4 &&
6847 reserve
* 4 > (2ULL << SPA_MAXBLOCKSHIFT
))
6848 arc_c
= MIN(arc_c_max
, reserve
* 4);
6851 * Throttle when the calculated memory footprint for the TXG
6852 * exceeds the target ARC size.
6854 if (reserve
> arc_c
) {
6855 DMU_TX_STAT_BUMP(dmu_tx_memory_reserve
);
6856 return (SET_ERROR(ERESTART
));
6860 * Don't count loaned bufs as in flight dirty data to prevent long
6861 * network delays from blocking transactions that are ready to be
6862 * assigned to a txg.
6865 /* assert that it has not wrapped around */
6866 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes
, 0), >=, 0);
6868 anon_size
= MAX((int64_t)
6869 (zfs_refcount_count(&arc_anon
->arcs_size
[ARC_BUFC_DATA
]) +
6870 zfs_refcount_count(&arc_anon
->arcs_size
[ARC_BUFC_METADATA
]) -
6871 arc_loaned_bytes
), 0);
6874 * Writes will, almost always, require additional memory allocations
6875 * in order to compress/encrypt/etc the data. We therefore need to
6876 * make sure that there is sufficient available memory for this.
6878 error
= arc_memory_throttle(spa
, reserve
, txg
);
6883 * Throttle writes when the amount of dirty data in the cache
6884 * gets too large. We try to keep the cache less than half full
6885 * of dirty blocks so that our sync times don't grow too large.
6887 * In the case of one pool being built on another pool, we want
6888 * to make sure we don't end up throttling the lower (backing)
6889 * pool when the upper pool is the majority contributor to dirty
6890 * data. To insure we make forward progress during throttling, we
6891 * also check the current pool's net dirty data and only throttle
6892 * if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
6893 * data in the cache.
6895 * Note: if two requests come in concurrently, we might let them
6896 * both succeed, when one of them should fail. Not a huge deal.
6898 uint64_t total_dirty
= reserve
+ arc_tempreserve
+ anon_size
;
6899 uint64_t spa_dirty_anon
= spa_dirty_data(spa
);
6900 uint64_t rarc_c
= arc_warm
? arc_c
: arc_c_max
;
6901 if (total_dirty
> rarc_c
* zfs_arc_dirty_limit_percent
/ 100 &&
6902 anon_size
> rarc_c
* zfs_arc_anon_limit_percent
/ 100 &&
6903 spa_dirty_anon
> anon_size
* zfs_arc_pool_dirty_percent
/ 100) {
6905 uint64_t meta_esize
= zfs_refcount_count(
6906 &arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
6907 uint64_t data_esize
=
6908 zfs_refcount_count(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
6909 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
6910 "anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
6911 (u_longlong_t
)arc_tempreserve
>> 10,
6912 (u_longlong_t
)meta_esize
>> 10,
6913 (u_longlong_t
)data_esize
>> 10,
6914 (u_longlong_t
)reserve
>> 10,
6915 (u_longlong_t
)rarc_c
>> 10);
6917 DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle
);
6918 return (SET_ERROR(ERESTART
));
6920 atomic_add_64(&arc_tempreserve
, reserve
);
6925 arc_kstat_update_state(arc_state_t
*state
, kstat_named_t
*size
,
6926 kstat_named_t
*data
, kstat_named_t
*metadata
,
6927 kstat_named_t
*evict_data
, kstat_named_t
*evict_metadata
)
6930 zfs_refcount_count(&state
->arcs_size
[ARC_BUFC_DATA
]);
6931 metadata
->value
.ui64
=
6932 zfs_refcount_count(&state
->arcs_size
[ARC_BUFC_METADATA
]);
6933 size
->value
.ui64
= data
->value
.ui64
+ metadata
->value
.ui64
;
6934 evict_data
->value
.ui64
=
6935 zfs_refcount_count(&state
->arcs_esize
[ARC_BUFC_DATA
]);
6936 evict_metadata
->value
.ui64
=
6937 zfs_refcount_count(&state
->arcs_esize
[ARC_BUFC_METADATA
]);
6941 arc_kstat_update(kstat_t
*ksp
, int rw
)
6943 arc_stats_t
*as
= ksp
->ks_data
;
6945 if (rw
== KSTAT_WRITE
)
6946 return (SET_ERROR(EACCES
));
6948 as
->arcstat_hits
.value
.ui64
=
6949 wmsum_value(&arc_sums
.arcstat_hits
);
6950 as
->arcstat_iohits
.value
.ui64
=
6951 wmsum_value(&arc_sums
.arcstat_iohits
);
6952 as
->arcstat_misses
.value
.ui64
=
6953 wmsum_value(&arc_sums
.arcstat_misses
);
6954 as
->arcstat_demand_data_hits
.value
.ui64
=
6955 wmsum_value(&arc_sums
.arcstat_demand_data_hits
);
6956 as
->arcstat_demand_data_iohits
.value
.ui64
=
6957 wmsum_value(&arc_sums
.arcstat_demand_data_iohits
);
6958 as
->arcstat_demand_data_misses
.value
.ui64
=
6959 wmsum_value(&arc_sums
.arcstat_demand_data_misses
);
6960 as
->arcstat_demand_metadata_hits
.value
.ui64
=
6961 wmsum_value(&arc_sums
.arcstat_demand_metadata_hits
);
6962 as
->arcstat_demand_metadata_iohits
.value
.ui64
=
6963 wmsum_value(&arc_sums
.arcstat_demand_metadata_iohits
);
6964 as
->arcstat_demand_metadata_misses
.value
.ui64
=
6965 wmsum_value(&arc_sums
.arcstat_demand_metadata_misses
);
6966 as
->arcstat_prefetch_data_hits
.value
.ui64
=
6967 wmsum_value(&arc_sums
.arcstat_prefetch_data_hits
);
6968 as
->arcstat_prefetch_data_iohits
.value
.ui64
=
6969 wmsum_value(&arc_sums
.arcstat_prefetch_data_iohits
);
6970 as
->arcstat_prefetch_data_misses
.value
.ui64
=
6971 wmsum_value(&arc_sums
.arcstat_prefetch_data_misses
);
6972 as
->arcstat_prefetch_metadata_hits
.value
.ui64
=
6973 wmsum_value(&arc_sums
.arcstat_prefetch_metadata_hits
);
6974 as
->arcstat_prefetch_metadata_iohits
.value
.ui64
=
6975 wmsum_value(&arc_sums
.arcstat_prefetch_metadata_iohits
);
6976 as
->arcstat_prefetch_metadata_misses
.value
.ui64
=
6977 wmsum_value(&arc_sums
.arcstat_prefetch_metadata_misses
);
6978 as
->arcstat_mru_hits
.value
.ui64
=
6979 wmsum_value(&arc_sums
.arcstat_mru_hits
);
6980 as
->arcstat_mru_ghost_hits
.value
.ui64
=
6981 wmsum_value(&arc_sums
.arcstat_mru_ghost_hits
);
6982 as
->arcstat_mfu_hits
.value
.ui64
=
6983 wmsum_value(&arc_sums
.arcstat_mfu_hits
);
6984 as
->arcstat_mfu_ghost_hits
.value
.ui64
=
6985 wmsum_value(&arc_sums
.arcstat_mfu_ghost_hits
);
6986 as
->arcstat_uncached_hits
.value
.ui64
=
6987 wmsum_value(&arc_sums
.arcstat_uncached_hits
);
6988 as
->arcstat_deleted
.value
.ui64
=
6989 wmsum_value(&arc_sums
.arcstat_deleted
);
6990 as
->arcstat_mutex_miss
.value
.ui64
=
6991 wmsum_value(&arc_sums
.arcstat_mutex_miss
);
6992 as
->arcstat_access_skip
.value
.ui64
=
6993 wmsum_value(&arc_sums
.arcstat_access_skip
);
6994 as
->arcstat_evict_skip
.value
.ui64
=
6995 wmsum_value(&arc_sums
.arcstat_evict_skip
);
6996 as
->arcstat_evict_not_enough
.value
.ui64
=
6997 wmsum_value(&arc_sums
.arcstat_evict_not_enough
);
6998 as
->arcstat_evict_l2_cached
.value
.ui64
=
6999 wmsum_value(&arc_sums
.arcstat_evict_l2_cached
);
7000 as
->arcstat_evict_l2_eligible
.value
.ui64
=
7001 wmsum_value(&arc_sums
.arcstat_evict_l2_eligible
);
7002 as
->arcstat_evict_l2_eligible_mfu
.value
.ui64
=
7003 wmsum_value(&arc_sums
.arcstat_evict_l2_eligible_mfu
);
7004 as
->arcstat_evict_l2_eligible_mru
.value
.ui64
=
7005 wmsum_value(&arc_sums
.arcstat_evict_l2_eligible_mru
);
7006 as
->arcstat_evict_l2_ineligible
.value
.ui64
=
7007 wmsum_value(&arc_sums
.arcstat_evict_l2_ineligible
);
7008 as
->arcstat_evict_l2_skip
.value
.ui64
=
7009 wmsum_value(&arc_sums
.arcstat_evict_l2_skip
);
7010 as
->arcstat_hash_collisions
.value
.ui64
=
7011 wmsum_value(&arc_sums
.arcstat_hash_collisions
);
7012 as
->arcstat_hash_chains
.value
.ui64
=
7013 wmsum_value(&arc_sums
.arcstat_hash_chains
);
7014 as
->arcstat_size
.value
.ui64
=
7015 aggsum_value(&arc_sums
.arcstat_size
);
7016 as
->arcstat_compressed_size
.value
.ui64
=
7017 wmsum_value(&arc_sums
.arcstat_compressed_size
);
7018 as
->arcstat_uncompressed_size
.value
.ui64
=
7019 wmsum_value(&arc_sums
.arcstat_uncompressed_size
);
7020 as
->arcstat_overhead_size
.value
.ui64
=
7021 wmsum_value(&arc_sums
.arcstat_overhead_size
);
7022 as
->arcstat_hdr_size
.value
.ui64
=
7023 wmsum_value(&arc_sums
.arcstat_hdr_size
);
7024 as
->arcstat_data_size
.value
.ui64
=
7025 wmsum_value(&arc_sums
.arcstat_data_size
);
7026 as
->arcstat_metadata_size
.value
.ui64
=
7027 wmsum_value(&arc_sums
.arcstat_metadata_size
);
7028 as
->arcstat_dbuf_size
.value
.ui64
=
7029 wmsum_value(&arc_sums
.arcstat_dbuf_size
);
7030 #if defined(COMPAT_FREEBSD11)
7031 as
->arcstat_other_size
.value
.ui64
=
7032 wmsum_value(&arc_sums
.arcstat_bonus_size
) +
7033 wmsum_value(&arc_sums
.arcstat_dnode_size
) +
7034 wmsum_value(&arc_sums
.arcstat_dbuf_size
);
7037 arc_kstat_update_state(arc_anon
,
7038 &as
->arcstat_anon_size
,
7039 &as
->arcstat_anon_data
,
7040 &as
->arcstat_anon_metadata
,
7041 &as
->arcstat_anon_evictable_data
,
7042 &as
->arcstat_anon_evictable_metadata
);
7043 arc_kstat_update_state(arc_mru
,
7044 &as
->arcstat_mru_size
,
7045 &as
->arcstat_mru_data
,
7046 &as
->arcstat_mru_metadata
,
7047 &as
->arcstat_mru_evictable_data
,
7048 &as
->arcstat_mru_evictable_metadata
);
7049 arc_kstat_update_state(arc_mru_ghost
,
7050 &as
->arcstat_mru_ghost_size
,
7051 &as
->arcstat_mru_ghost_data
,
7052 &as
->arcstat_mru_ghost_metadata
,
7053 &as
->arcstat_mru_ghost_evictable_data
,
7054 &as
->arcstat_mru_ghost_evictable_metadata
);
7055 arc_kstat_update_state(arc_mfu
,
7056 &as
->arcstat_mfu_size
,
7057 &as
->arcstat_mfu_data
,
7058 &as
->arcstat_mfu_metadata
,
7059 &as
->arcstat_mfu_evictable_data
,
7060 &as
->arcstat_mfu_evictable_metadata
);
7061 arc_kstat_update_state(arc_mfu_ghost
,
7062 &as
->arcstat_mfu_ghost_size
,
7063 &as
->arcstat_mfu_ghost_data
,
7064 &as
->arcstat_mfu_ghost_metadata
,
7065 &as
->arcstat_mfu_ghost_evictable_data
,
7066 &as
->arcstat_mfu_ghost_evictable_metadata
);
7067 arc_kstat_update_state(arc_uncached
,
7068 &as
->arcstat_uncached_size
,
7069 &as
->arcstat_uncached_data
,
7070 &as
->arcstat_uncached_metadata
,
7071 &as
->arcstat_uncached_evictable_data
,
7072 &as
->arcstat_uncached_evictable_metadata
);
7074 as
->arcstat_dnode_size
.value
.ui64
=
7075 wmsum_value(&arc_sums
.arcstat_dnode_size
);
7076 as
->arcstat_bonus_size
.value
.ui64
=
7077 wmsum_value(&arc_sums
.arcstat_bonus_size
);
7078 as
->arcstat_l2_hits
.value
.ui64
=
7079 wmsum_value(&arc_sums
.arcstat_l2_hits
);
7080 as
->arcstat_l2_misses
.value
.ui64
=
7081 wmsum_value(&arc_sums
.arcstat_l2_misses
);
7082 as
->arcstat_l2_prefetch_asize
.value
.ui64
=
7083 wmsum_value(&arc_sums
.arcstat_l2_prefetch_asize
);
7084 as
->arcstat_l2_mru_asize
.value
.ui64
=
7085 wmsum_value(&arc_sums
.arcstat_l2_mru_asize
);
7086 as
->arcstat_l2_mfu_asize
.value
.ui64
=
7087 wmsum_value(&arc_sums
.arcstat_l2_mfu_asize
);
7088 as
->arcstat_l2_bufc_data_asize
.value
.ui64
=
7089 wmsum_value(&arc_sums
.arcstat_l2_bufc_data_asize
);
7090 as
->arcstat_l2_bufc_metadata_asize
.value
.ui64
=
7091 wmsum_value(&arc_sums
.arcstat_l2_bufc_metadata_asize
);
7092 as
->arcstat_l2_feeds
.value
.ui64
=
7093 wmsum_value(&arc_sums
.arcstat_l2_feeds
);
7094 as
->arcstat_l2_rw_clash
.value
.ui64
=
7095 wmsum_value(&arc_sums
.arcstat_l2_rw_clash
);
7096 as
->arcstat_l2_read_bytes
.value
.ui64
=
7097 wmsum_value(&arc_sums
.arcstat_l2_read_bytes
);
7098 as
->arcstat_l2_write_bytes
.value
.ui64
=
7099 wmsum_value(&arc_sums
.arcstat_l2_write_bytes
);
7100 as
->arcstat_l2_writes_sent
.value
.ui64
=
7101 wmsum_value(&arc_sums
.arcstat_l2_writes_sent
);
7102 as
->arcstat_l2_writes_done
.value
.ui64
=
7103 wmsum_value(&arc_sums
.arcstat_l2_writes_done
);
7104 as
->arcstat_l2_writes_error
.value
.ui64
=
7105 wmsum_value(&arc_sums
.arcstat_l2_writes_error
);
7106 as
->arcstat_l2_writes_lock_retry
.value
.ui64
=
7107 wmsum_value(&arc_sums
.arcstat_l2_writes_lock_retry
);
7108 as
->arcstat_l2_evict_lock_retry
.value
.ui64
=
7109 wmsum_value(&arc_sums
.arcstat_l2_evict_lock_retry
);
7110 as
->arcstat_l2_evict_reading
.value
.ui64
=
7111 wmsum_value(&arc_sums
.arcstat_l2_evict_reading
);
7112 as
->arcstat_l2_evict_l1cached
.value
.ui64
=
7113 wmsum_value(&arc_sums
.arcstat_l2_evict_l1cached
);
7114 as
->arcstat_l2_free_on_write
.value
.ui64
=
7115 wmsum_value(&arc_sums
.arcstat_l2_free_on_write
);
7116 as
->arcstat_l2_abort_lowmem
.value
.ui64
=
7117 wmsum_value(&arc_sums
.arcstat_l2_abort_lowmem
);
7118 as
->arcstat_l2_cksum_bad
.value
.ui64
=
7119 wmsum_value(&arc_sums
.arcstat_l2_cksum_bad
);
7120 as
->arcstat_l2_io_error
.value
.ui64
=
7121 wmsum_value(&arc_sums
.arcstat_l2_io_error
);
7122 as
->arcstat_l2_lsize
.value
.ui64
=
7123 wmsum_value(&arc_sums
.arcstat_l2_lsize
);
7124 as
->arcstat_l2_psize
.value
.ui64
=
7125 wmsum_value(&arc_sums
.arcstat_l2_psize
);
7126 as
->arcstat_l2_hdr_size
.value
.ui64
=
7127 aggsum_value(&arc_sums
.arcstat_l2_hdr_size
);
7128 as
->arcstat_l2_log_blk_writes
.value
.ui64
=
7129 wmsum_value(&arc_sums
.arcstat_l2_log_blk_writes
);
7130 as
->arcstat_l2_log_blk_asize
.value
.ui64
=
7131 wmsum_value(&arc_sums
.arcstat_l2_log_blk_asize
);
7132 as
->arcstat_l2_log_blk_count
.value
.ui64
=
7133 wmsum_value(&arc_sums
.arcstat_l2_log_blk_count
);
7134 as
->arcstat_l2_rebuild_success
.value
.ui64
=
7135 wmsum_value(&arc_sums
.arcstat_l2_rebuild_success
);
7136 as
->arcstat_l2_rebuild_abort_unsupported
.value
.ui64
=
7137 wmsum_value(&arc_sums
.arcstat_l2_rebuild_abort_unsupported
);
7138 as
->arcstat_l2_rebuild_abort_io_errors
.value
.ui64
=
7139 wmsum_value(&arc_sums
.arcstat_l2_rebuild_abort_io_errors
);
7140 as
->arcstat_l2_rebuild_abort_dh_errors
.value
.ui64
=
7141 wmsum_value(&arc_sums
.arcstat_l2_rebuild_abort_dh_errors
);
7142 as
->arcstat_l2_rebuild_abort_cksum_lb_errors
.value
.ui64
=
7143 wmsum_value(&arc_sums
.arcstat_l2_rebuild_abort_cksum_lb_errors
);
7144 as
->arcstat_l2_rebuild_abort_lowmem
.value
.ui64
=
7145 wmsum_value(&arc_sums
.arcstat_l2_rebuild_abort_lowmem
);
7146 as
->arcstat_l2_rebuild_size
.value
.ui64
=
7147 wmsum_value(&arc_sums
.arcstat_l2_rebuild_size
);
7148 as
->arcstat_l2_rebuild_asize
.value
.ui64
=
7149 wmsum_value(&arc_sums
.arcstat_l2_rebuild_asize
);
7150 as
->arcstat_l2_rebuild_bufs
.value
.ui64
=
7151 wmsum_value(&arc_sums
.arcstat_l2_rebuild_bufs
);
7152 as
->arcstat_l2_rebuild_bufs_precached
.value
.ui64
=
7153 wmsum_value(&arc_sums
.arcstat_l2_rebuild_bufs_precached
);
7154 as
->arcstat_l2_rebuild_log_blks
.value
.ui64
=
7155 wmsum_value(&arc_sums
.arcstat_l2_rebuild_log_blks
);
7156 as
->arcstat_memory_throttle_count
.value
.ui64
=
7157 wmsum_value(&arc_sums
.arcstat_memory_throttle_count
);
7158 as
->arcstat_memory_direct_count
.value
.ui64
=
7159 wmsum_value(&arc_sums
.arcstat_memory_direct_count
);
7160 as
->arcstat_memory_indirect_count
.value
.ui64
=
7161 wmsum_value(&arc_sums
.arcstat_memory_indirect_count
);
7163 as
->arcstat_memory_all_bytes
.value
.ui64
=
7165 as
->arcstat_memory_free_bytes
.value
.ui64
=
7167 as
->arcstat_memory_available_bytes
.value
.i64
=
7168 arc_available_memory();
7170 as
->arcstat_prune
.value
.ui64
=
7171 wmsum_value(&arc_sums
.arcstat_prune
);
7172 as
->arcstat_meta_used
.value
.ui64
=
7173 wmsum_value(&arc_sums
.arcstat_meta_used
);
7174 as
->arcstat_async_upgrade_sync
.value
.ui64
=
7175 wmsum_value(&arc_sums
.arcstat_async_upgrade_sync
);
7176 as
->arcstat_predictive_prefetch
.value
.ui64
=
7177 wmsum_value(&arc_sums
.arcstat_predictive_prefetch
);
7178 as
->arcstat_demand_hit_predictive_prefetch
.value
.ui64
=
7179 wmsum_value(&arc_sums
.arcstat_demand_hit_predictive_prefetch
);
7180 as
->arcstat_demand_iohit_predictive_prefetch
.value
.ui64
=
7181 wmsum_value(&arc_sums
.arcstat_demand_iohit_predictive_prefetch
);
7182 as
->arcstat_prescient_prefetch
.value
.ui64
=
7183 wmsum_value(&arc_sums
.arcstat_prescient_prefetch
);
7184 as
->arcstat_demand_hit_prescient_prefetch
.value
.ui64
=
7185 wmsum_value(&arc_sums
.arcstat_demand_hit_prescient_prefetch
);
7186 as
->arcstat_demand_iohit_prescient_prefetch
.value
.ui64
=
7187 wmsum_value(&arc_sums
.arcstat_demand_iohit_prescient_prefetch
);
7188 as
->arcstat_raw_size
.value
.ui64
=
7189 wmsum_value(&arc_sums
.arcstat_raw_size
);
7190 as
->arcstat_cached_only_in_progress
.value
.ui64
=
7191 wmsum_value(&arc_sums
.arcstat_cached_only_in_progress
);
7192 as
->arcstat_abd_chunk_waste_size
.value
.ui64
=
7193 wmsum_value(&arc_sums
.arcstat_abd_chunk_waste_size
);
7199 * This function *must* return indices evenly distributed between all
7200 * sublists of the multilist. This is needed due to how the ARC eviction
7201 * code is laid out; arc_evict_state() assumes ARC buffers are evenly
7202 * distributed between all sublists and uses this assumption when
7203 * deciding which sublist to evict from and how much to evict from it.
7206 arc_state_multilist_index_func(multilist_t
*ml
, void *obj
)
7208 arc_buf_hdr_t
*hdr
= obj
;
7211 * We rely on b_dva to generate evenly distributed index
7212 * numbers using buf_hash below. So, as an added precaution,
7213 * let's make sure we never add empty buffers to the arc lists.
7215 ASSERT(!HDR_EMPTY(hdr
));
7218 * The assumption here, is the hash value for a given
7219 * arc_buf_hdr_t will remain constant throughout its lifetime
7220 * (i.e. its b_spa, b_dva, and b_birth fields don't change).
7221 * Thus, we don't need to store the header's sublist index
7222 * on insertion, as this index can be recalculated on removal.
7224 * Also, the low order bits of the hash value are thought to be
7225 * distributed evenly. Otherwise, in the case that the multilist
7226 * has a power of two number of sublists, each sublists' usage
7227 * would not be evenly distributed. In this context full 64bit
7228 * division would be a waste of time, so limit it to 32 bits.
7230 return ((unsigned int)buf_hash(hdr
->b_spa
, &hdr
->b_dva
, hdr
->b_birth
) %
7231 multilist_get_num_sublists(ml
));
7235 arc_state_l2c_multilist_index_func(multilist_t
*ml
, void *obj
)
7237 panic("Header %p insert into arc_l2c_only %p", obj
, ml
);
7240 #define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
7241 if ((do_warn) && (tuning) && ((tuning) != (value))) { \
7243 "ignoring tunable %s (using %llu instead)", \
7244 (#tuning), (u_longlong_t)(value)); \
7249 * Called during module initialization and periodically thereafter to
7250 * apply reasonable changes to the exposed performance tunings. Can also be
7251 * called explicitly by param_set_arc_*() functions when ARC tunables are
7252 * updated manually. Non-zero zfs_* values which differ from the currently set
7253 * values will be applied.
7256 arc_tuning_update(boolean_t verbose
)
7258 uint64_t allmem
= arc_all_memory();
7260 /* Valid range: 32M - <arc_c_max> */
7261 if ((zfs_arc_min
) && (zfs_arc_min
!= arc_c_min
) &&
7262 (zfs_arc_min
>= 2ULL << SPA_MAXBLOCKSHIFT
) &&
7263 (zfs_arc_min
<= arc_c_max
)) {
7264 arc_c_min
= zfs_arc_min
;
7265 arc_c
= MAX(arc_c
, arc_c_min
);
7267 WARN_IF_TUNING_IGNORED(zfs_arc_min
, arc_c_min
, verbose
);
7269 /* Valid range: 64M - <all physical memory> */
7270 if ((zfs_arc_max
) && (zfs_arc_max
!= arc_c_max
) &&
7271 (zfs_arc_max
>= MIN_ARC_MAX
) && (zfs_arc_max
< allmem
) &&
7272 (zfs_arc_max
> arc_c_min
)) {
7273 arc_c_max
= zfs_arc_max
;
7274 arc_c
= MIN(arc_c
, arc_c_max
);
7275 if (arc_dnode_limit
> arc_c_max
)
7276 arc_dnode_limit
= arc_c_max
;
7278 WARN_IF_TUNING_IGNORED(zfs_arc_max
, arc_c_max
, verbose
);
7280 /* Valid range: 0 - <all physical memory> */
7281 arc_dnode_limit
= zfs_arc_dnode_limit
? zfs_arc_dnode_limit
:
7282 MIN(zfs_arc_dnode_limit_percent
, 100) * arc_c_max
/ 100;
7283 WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit
, arc_dnode_limit
, verbose
);
7285 /* Valid range: 1 - N */
7286 if (zfs_arc_grow_retry
)
7287 arc_grow_retry
= zfs_arc_grow_retry
;
7289 /* Valid range: 1 - N */
7290 if (zfs_arc_shrink_shift
) {
7291 arc_shrink_shift
= zfs_arc_shrink_shift
;
7292 arc_no_grow_shift
= MIN(arc_no_grow_shift
, arc_shrink_shift
-1);
7295 /* Valid range: 1 - N ms */
7296 if (zfs_arc_min_prefetch_ms
)
7297 arc_min_prefetch_ms
= zfs_arc_min_prefetch_ms
;
7299 /* Valid range: 1 - N ms */
7300 if (zfs_arc_min_prescient_prefetch_ms
) {
7301 arc_min_prescient_prefetch_ms
=
7302 zfs_arc_min_prescient_prefetch_ms
;
7305 /* Valid range: 0 - 100 */
7306 if (zfs_arc_lotsfree_percent
<= 100)
7307 arc_lotsfree_percent
= zfs_arc_lotsfree_percent
;
7308 WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent
, arc_lotsfree_percent
,
7311 /* Valid range: 0 - <all physical memory> */
7312 if ((zfs_arc_sys_free
) && (zfs_arc_sys_free
!= arc_sys_free
))
7313 arc_sys_free
= MIN(zfs_arc_sys_free
, allmem
);
7314 WARN_IF_TUNING_IGNORED(zfs_arc_sys_free
, arc_sys_free
, verbose
);
7318 arc_state_multilist_init(multilist_t
*ml
,
7319 multilist_sublist_index_func_t
*index_func
, int *maxcountp
)
7321 multilist_create(ml
, sizeof (arc_buf_hdr_t
),
7322 offsetof(arc_buf_hdr_t
, b_l1hdr
.b_arc_node
), index_func
);
7323 *maxcountp
= MAX(*maxcountp
, multilist_get_num_sublists(ml
));
7327 arc_state_init(void)
7329 int num_sublists
= 0;
7331 arc_state_multilist_init(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
],
7332 arc_state_multilist_index_func
, &num_sublists
);
7333 arc_state_multilist_init(&arc_mru
->arcs_list
[ARC_BUFC_DATA
],
7334 arc_state_multilist_index_func
, &num_sublists
);
7335 arc_state_multilist_init(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
],
7336 arc_state_multilist_index_func
, &num_sublists
);
7337 arc_state_multilist_init(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
],
7338 arc_state_multilist_index_func
, &num_sublists
);
7339 arc_state_multilist_init(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
],
7340 arc_state_multilist_index_func
, &num_sublists
);
7341 arc_state_multilist_init(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
],
7342 arc_state_multilist_index_func
, &num_sublists
);
7343 arc_state_multilist_init(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
],
7344 arc_state_multilist_index_func
, &num_sublists
);
7345 arc_state_multilist_init(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
],
7346 arc_state_multilist_index_func
, &num_sublists
);
7347 arc_state_multilist_init(&arc_uncached
->arcs_list
[ARC_BUFC_METADATA
],
7348 arc_state_multilist_index_func
, &num_sublists
);
7349 arc_state_multilist_init(&arc_uncached
->arcs_list
[ARC_BUFC_DATA
],
7350 arc_state_multilist_index_func
, &num_sublists
);
7353 * L2 headers should never be on the L2 state list since they don't
7354 * have L1 headers allocated. Special index function asserts that.
7356 arc_state_multilist_init(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
],
7357 arc_state_l2c_multilist_index_func
, &num_sublists
);
7358 arc_state_multilist_init(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
],
7359 arc_state_l2c_multilist_index_func
, &num_sublists
);
7362 * Keep track of the number of markers needed to reclaim buffers from
7363 * any ARC state. The markers will be pre-allocated so as to minimize
7364 * the number of memory allocations performed by the eviction thread.
7366 arc_state_evict_marker_count
= num_sublists
;
7368 zfs_refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
7369 zfs_refcount_create(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
7370 zfs_refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
7371 zfs_refcount_create(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
7372 zfs_refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
7373 zfs_refcount_create(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
7374 zfs_refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
7375 zfs_refcount_create(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
7376 zfs_refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
7377 zfs_refcount_create(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
7378 zfs_refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
7379 zfs_refcount_create(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
7380 zfs_refcount_create(&arc_uncached
->arcs_esize
[ARC_BUFC_METADATA
]);
7381 zfs_refcount_create(&arc_uncached
->arcs_esize
[ARC_BUFC_DATA
]);
7383 zfs_refcount_create(&arc_anon
->arcs_size
[ARC_BUFC_DATA
]);
7384 zfs_refcount_create(&arc_anon
->arcs_size
[ARC_BUFC_METADATA
]);
7385 zfs_refcount_create(&arc_mru
->arcs_size
[ARC_BUFC_DATA
]);
7386 zfs_refcount_create(&arc_mru
->arcs_size
[ARC_BUFC_METADATA
]);
7387 zfs_refcount_create(&arc_mru_ghost
->arcs_size
[ARC_BUFC_DATA
]);
7388 zfs_refcount_create(&arc_mru_ghost
->arcs_size
[ARC_BUFC_METADATA
]);
7389 zfs_refcount_create(&arc_mfu
->arcs_size
[ARC_BUFC_DATA
]);
7390 zfs_refcount_create(&arc_mfu
->arcs_size
[ARC_BUFC_METADATA
]);
7391 zfs_refcount_create(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_DATA
]);
7392 zfs_refcount_create(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_METADATA
]);
7393 zfs_refcount_create(&arc_l2c_only
->arcs_size
[ARC_BUFC_DATA
]);
7394 zfs_refcount_create(&arc_l2c_only
->arcs_size
[ARC_BUFC_METADATA
]);
7395 zfs_refcount_create(&arc_uncached
->arcs_size
[ARC_BUFC_DATA
]);
7396 zfs_refcount_create(&arc_uncached
->arcs_size
[ARC_BUFC_METADATA
]);
7398 wmsum_init(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_DATA
], 0);
7399 wmsum_init(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_METADATA
], 0);
7400 wmsum_init(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_DATA
], 0);
7401 wmsum_init(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_METADATA
], 0);
7403 wmsum_init(&arc_sums
.arcstat_hits
, 0);
7404 wmsum_init(&arc_sums
.arcstat_iohits
, 0);
7405 wmsum_init(&arc_sums
.arcstat_misses
, 0);
7406 wmsum_init(&arc_sums
.arcstat_demand_data_hits
, 0);
7407 wmsum_init(&arc_sums
.arcstat_demand_data_iohits
, 0);
7408 wmsum_init(&arc_sums
.arcstat_demand_data_misses
, 0);
7409 wmsum_init(&arc_sums
.arcstat_demand_metadata_hits
, 0);
7410 wmsum_init(&arc_sums
.arcstat_demand_metadata_iohits
, 0);
7411 wmsum_init(&arc_sums
.arcstat_demand_metadata_misses
, 0);
7412 wmsum_init(&arc_sums
.arcstat_prefetch_data_hits
, 0);
7413 wmsum_init(&arc_sums
.arcstat_prefetch_data_iohits
, 0);
7414 wmsum_init(&arc_sums
.arcstat_prefetch_data_misses
, 0);
7415 wmsum_init(&arc_sums
.arcstat_prefetch_metadata_hits
, 0);
7416 wmsum_init(&arc_sums
.arcstat_prefetch_metadata_iohits
, 0);
7417 wmsum_init(&arc_sums
.arcstat_prefetch_metadata_misses
, 0);
7418 wmsum_init(&arc_sums
.arcstat_mru_hits
, 0);
7419 wmsum_init(&arc_sums
.arcstat_mru_ghost_hits
, 0);
7420 wmsum_init(&arc_sums
.arcstat_mfu_hits
, 0);
7421 wmsum_init(&arc_sums
.arcstat_mfu_ghost_hits
, 0);
7422 wmsum_init(&arc_sums
.arcstat_uncached_hits
, 0);
7423 wmsum_init(&arc_sums
.arcstat_deleted
, 0);
7424 wmsum_init(&arc_sums
.arcstat_mutex_miss
, 0);
7425 wmsum_init(&arc_sums
.arcstat_access_skip
, 0);
7426 wmsum_init(&arc_sums
.arcstat_evict_skip
, 0);
7427 wmsum_init(&arc_sums
.arcstat_evict_not_enough
, 0);
7428 wmsum_init(&arc_sums
.arcstat_evict_l2_cached
, 0);
7429 wmsum_init(&arc_sums
.arcstat_evict_l2_eligible
, 0);
7430 wmsum_init(&arc_sums
.arcstat_evict_l2_eligible_mfu
, 0);
7431 wmsum_init(&arc_sums
.arcstat_evict_l2_eligible_mru
, 0);
7432 wmsum_init(&arc_sums
.arcstat_evict_l2_ineligible
, 0);
7433 wmsum_init(&arc_sums
.arcstat_evict_l2_skip
, 0);
7434 wmsum_init(&arc_sums
.arcstat_hash_collisions
, 0);
7435 wmsum_init(&arc_sums
.arcstat_hash_chains
, 0);
7436 aggsum_init(&arc_sums
.arcstat_size
, 0);
7437 wmsum_init(&arc_sums
.arcstat_compressed_size
, 0);
7438 wmsum_init(&arc_sums
.arcstat_uncompressed_size
, 0);
7439 wmsum_init(&arc_sums
.arcstat_overhead_size
, 0);
7440 wmsum_init(&arc_sums
.arcstat_hdr_size
, 0);
7441 wmsum_init(&arc_sums
.arcstat_data_size
, 0);
7442 wmsum_init(&arc_sums
.arcstat_metadata_size
, 0);
7443 wmsum_init(&arc_sums
.arcstat_dbuf_size
, 0);
7444 wmsum_init(&arc_sums
.arcstat_dnode_size
, 0);
7445 wmsum_init(&arc_sums
.arcstat_bonus_size
, 0);
7446 wmsum_init(&arc_sums
.arcstat_l2_hits
, 0);
7447 wmsum_init(&arc_sums
.arcstat_l2_misses
, 0);
7448 wmsum_init(&arc_sums
.arcstat_l2_prefetch_asize
, 0);
7449 wmsum_init(&arc_sums
.arcstat_l2_mru_asize
, 0);
7450 wmsum_init(&arc_sums
.arcstat_l2_mfu_asize
, 0);
7451 wmsum_init(&arc_sums
.arcstat_l2_bufc_data_asize
, 0);
7452 wmsum_init(&arc_sums
.arcstat_l2_bufc_metadata_asize
, 0);
7453 wmsum_init(&arc_sums
.arcstat_l2_feeds
, 0);
7454 wmsum_init(&arc_sums
.arcstat_l2_rw_clash
, 0);
7455 wmsum_init(&arc_sums
.arcstat_l2_read_bytes
, 0);
7456 wmsum_init(&arc_sums
.arcstat_l2_write_bytes
, 0);
7457 wmsum_init(&arc_sums
.arcstat_l2_writes_sent
, 0);
7458 wmsum_init(&arc_sums
.arcstat_l2_writes_done
, 0);
7459 wmsum_init(&arc_sums
.arcstat_l2_writes_error
, 0);
7460 wmsum_init(&arc_sums
.arcstat_l2_writes_lock_retry
, 0);
7461 wmsum_init(&arc_sums
.arcstat_l2_evict_lock_retry
, 0);
7462 wmsum_init(&arc_sums
.arcstat_l2_evict_reading
, 0);
7463 wmsum_init(&arc_sums
.arcstat_l2_evict_l1cached
, 0);
7464 wmsum_init(&arc_sums
.arcstat_l2_free_on_write
, 0);
7465 wmsum_init(&arc_sums
.arcstat_l2_abort_lowmem
, 0);
7466 wmsum_init(&arc_sums
.arcstat_l2_cksum_bad
, 0);
7467 wmsum_init(&arc_sums
.arcstat_l2_io_error
, 0);
7468 wmsum_init(&arc_sums
.arcstat_l2_lsize
, 0);
7469 wmsum_init(&arc_sums
.arcstat_l2_psize
, 0);
7470 aggsum_init(&arc_sums
.arcstat_l2_hdr_size
, 0);
7471 wmsum_init(&arc_sums
.arcstat_l2_log_blk_writes
, 0);
7472 wmsum_init(&arc_sums
.arcstat_l2_log_blk_asize
, 0);
7473 wmsum_init(&arc_sums
.arcstat_l2_log_blk_count
, 0);
7474 wmsum_init(&arc_sums
.arcstat_l2_rebuild_success
, 0);
7475 wmsum_init(&arc_sums
.arcstat_l2_rebuild_abort_unsupported
, 0);
7476 wmsum_init(&arc_sums
.arcstat_l2_rebuild_abort_io_errors
, 0);
7477 wmsum_init(&arc_sums
.arcstat_l2_rebuild_abort_dh_errors
, 0);
7478 wmsum_init(&arc_sums
.arcstat_l2_rebuild_abort_cksum_lb_errors
, 0);
7479 wmsum_init(&arc_sums
.arcstat_l2_rebuild_abort_lowmem
, 0);
7480 wmsum_init(&arc_sums
.arcstat_l2_rebuild_size
, 0);
7481 wmsum_init(&arc_sums
.arcstat_l2_rebuild_asize
, 0);
7482 wmsum_init(&arc_sums
.arcstat_l2_rebuild_bufs
, 0);
7483 wmsum_init(&arc_sums
.arcstat_l2_rebuild_bufs_precached
, 0);
7484 wmsum_init(&arc_sums
.arcstat_l2_rebuild_log_blks
, 0);
7485 wmsum_init(&arc_sums
.arcstat_memory_throttle_count
, 0);
7486 wmsum_init(&arc_sums
.arcstat_memory_direct_count
, 0);
7487 wmsum_init(&arc_sums
.arcstat_memory_indirect_count
, 0);
7488 wmsum_init(&arc_sums
.arcstat_prune
, 0);
7489 wmsum_init(&arc_sums
.arcstat_meta_used
, 0);
7490 wmsum_init(&arc_sums
.arcstat_async_upgrade_sync
, 0);
7491 wmsum_init(&arc_sums
.arcstat_predictive_prefetch
, 0);
7492 wmsum_init(&arc_sums
.arcstat_demand_hit_predictive_prefetch
, 0);
7493 wmsum_init(&arc_sums
.arcstat_demand_iohit_predictive_prefetch
, 0);
7494 wmsum_init(&arc_sums
.arcstat_prescient_prefetch
, 0);
7495 wmsum_init(&arc_sums
.arcstat_demand_hit_prescient_prefetch
, 0);
7496 wmsum_init(&arc_sums
.arcstat_demand_iohit_prescient_prefetch
, 0);
7497 wmsum_init(&arc_sums
.arcstat_raw_size
, 0);
7498 wmsum_init(&arc_sums
.arcstat_cached_only_in_progress
, 0);
7499 wmsum_init(&arc_sums
.arcstat_abd_chunk_waste_size
, 0);
7501 arc_anon
->arcs_state
= ARC_STATE_ANON
;
7502 arc_mru
->arcs_state
= ARC_STATE_MRU
;
7503 arc_mru_ghost
->arcs_state
= ARC_STATE_MRU_GHOST
;
7504 arc_mfu
->arcs_state
= ARC_STATE_MFU
;
7505 arc_mfu_ghost
->arcs_state
= ARC_STATE_MFU_GHOST
;
7506 arc_l2c_only
->arcs_state
= ARC_STATE_L2C_ONLY
;
7507 arc_uncached
->arcs_state
= ARC_STATE_UNCACHED
;
7511 arc_state_fini(void)
7513 zfs_refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_METADATA
]);
7514 zfs_refcount_destroy(&arc_anon
->arcs_esize
[ARC_BUFC_DATA
]);
7515 zfs_refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_METADATA
]);
7516 zfs_refcount_destroy(&arc_mru
->arcs_esize
[ARC_BUFC_DATA
]);
7517 zfs_refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
7518 zfs_refcount_destroy(&arc_mru_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
7519 zfs_refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_METADATA
]);
7520 zfs_refcount_destroy(&arc_mfu
->arcs_esize
[ARC_BUFC_DATA
]);
7521 zfs_refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_METADATA
]);
7522 zfs_refcount_destroy(&arc_mfu_ghost
->arcs_esize
[ARC_BUFC_DATA
]);
7523 zfs_refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_METADATA
]);
7524 zfs_refcount_destroy(&arc_l2c_only
->arcs_esize
[ARC_BUFC_DATA
]);
7525 zfs_refcount_destroy(&arc_uncached
->arcs_esize
[ARC_BUFC_METADATA
]);
7526 zfs_refcount_destroy(&arc_uncached
->arcs_esize
[ARC_BUFC_DATA
]);
7528 zfs_refcount_destroy(&arc_anon
->arcs_size
[ARC_BUFC_DATA
]);
7529 zfs_refcount_destroy(&arc_anon
->arcs_size
[ARC_BUFC_METADATA
]);
7530 zfs_refcount_destroy(&arc_mru
->arcs_size
[ARC_BUFC_DATA
]);
7531 zfs_refcount_destroy(&arc_mru
->arcs_size
[ARC_BUFC_METADATA
]);
7532 zfs_refcount_destroy(&arc_mru_ghost
->arcs_size
[ARC_BUFC_DATA
]);
7533 zfs_refcount_destroy(&arc_mru_ghost
->arcs_size
[ARC_BUFC_METADATA
]);
7534 zfs_refcount_destroy(&arc_mfu
->arcs_size
[ARC_BUFC_DATA
]);
7535 zfs_refcount_destroy(&arc_mfu
->arcs_size
[ARC_BUFC_METADATA
]);
7536 zfs_refcount_destroy(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_DATA
]);
7537 zfs_refcount_destroy(&arc_mfu_ghost
->arcs_size
[ARC_BUFC_METADATA
]);
7538 zfs_refcount_destroy(&arc_l2c_only
->arcs_size
[ARC_BUFC_DATA
]);
7539 zfs_refcount_destroy(&arc_l2c_only
->arcs_size
[ARC_BUFC_METADATA
]);
7540 zfs_refcount_destroy(&arc_uncached
->arcs_size
[ARC_BUFC_DATA
]);
7541 zfs_refcount_destroy(&arc_uncached
->arcs_size
[ARC_BUFC_METADATA
]);
7543 multilist_destroy(&arc_mru
->arcs_list
[ARC_BUFC_METADATA
]);
7544 multilist_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
7545 multilist_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_METADATA
]);
7546 multilist_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_METADATA
]);
7547 multilist_destroy(&arc_mru
->arcs_list
[ARC_BUFC_DATA
]);
7548 multilist_destroy(&arc_mru_ghost
->arcs_list
[ARC_BUFC_DATA
]);
7549 multilist_destroy(&arc_mfu
->arcs_list
[ARC_BUFC_DATA
]);
7550 multilist_destroy(&arc_mfu_ghost
->arcs_list
[ARC_BUFC_DATA
]);
7551 multilist_destroy(&arc_l2c_only
->arcs_list
[ARC_BUFC_METADATA
]);
7552 multilist_destroy(&arc_l2c_only
->arcs_list
[ARC_BUFC_DATA
]);
7553 multilist_destroy(&arc_uncached
->arcs_list
[ARC_BUFC_METADATA
]);
7554 multilist_destroy(&arc_uncached
->arcs_list
[ARC_BUFC_DATA
]);
7556 wmsum_fini(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_DATA
]);
7557 wmsum_fini(&arc_mru_ghost
->arcs_hits
[ARC_BUFC_METADATA
]);
7558 wmsum_fini(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_DATA
]);
7559 wmsum_fini(&arc_mfu_ghost
->arcs_hits
[ARC_BUFC_METADATA
]);
7561 wmsum_fini(&arc_sums
.arcstat_hits
);
7562 wmsum_fini(&arc_sums
.arcstat_iohits
);
7563 wmsum_fini(&arc_sums
.arcstat_misses
);
7564 wmsum_fini(&arc_sums
.arcstat_demand_data_hits
);
7565 wmsum_fini(&arc_sums
.arcstat_demand_data_iohits
);
7566 wmsum_fini(&arc_sums
.arcstat_demand_data_misses
);
7567 wmsum_fini(&arc_sums
.arcstat_demand_metadata_hits
);
7568 wmsum_fini(&arc_sums
.arcstat_demand_metadata_iohits
);
7569 wmsum_fini(&arc_sums
.arcstat_demand_metadata_misses
);
7570 wmsum_fini(&arc_sums
.arcstat_prefetch_data_hits
);
7571 wmsum_fini(&arc_sums
.arcstat_prefetch_data_iohits
);
7572 wmsum_fini(&arc_sums
.arcstat_prefetch_data_misses
);
7573 wmsum_fini(&arc_sums
.arcstat_prefetch_metadata_hits
);
7574 wmsum_fini(&arc_sums
.arcstat_prefetch_metadata_iohits
);
7575 wmsum_fini(&arc_sums
.arcstat_prefetch_metadata_misses
);
7576 wmsum_fini(&arc_sums
.arcstat_mru_hits
);
7577 wmsum_fini(&arc_sums
.arcstat_mru_ghost_hits
);
7578 wmsum_fini(&arc_sums
.arcstat_mfu_hits
);
7579 wmsum_fini(&arc_sums
.arcstat_mfu_ghost_hits
);
7580 wmsum_fini(&arc_sums
.arcstat_uncached_hits
);
7581 wmsum_fini(&arc_sums
.arcstat_deleted
);
7582 wmsum_fini(&arc_sums
.arcstat_mutex_miss
);
7583 wmsum_fini(&arc_sums
.arcstat_access_skip
);
7584 wmsum_fini(&arc_sums
.arcstat_evict_skip
);
7585 wmsum_fini(&arc_sums
.arcstat_evict_not_enough
);
7586 wmsum_fini(&arc_sums
.arcstat_evict_l2_cached
);
7587 wmsum_fini(&arc_sums
.arcstat_evict_l2_eligible
);
7588 wmsum_fini(&arc_sums
.arcstat_evict_l2_eligible_mfu
);
7589 wmsum_fini(&arc_sums
.arcstat_evict_l2_eligible_mru
);
7590 wmsum_fini(&arc_sums
.arcstat_evict_l2_ineligible
);
7591 wmsum_fini(&arc_sums
.arcstat_evict_l2_skip
);
7592 wmsum_fini(&arc_sums
.arcstat_hash_collisions
);
7593 wmsum_fini(&arc_sums
.arcstat_hash_chains
);
7594 aggsum_fini(&arc_sums
.arcstat_size
);
7595 wmsum_fini(&arc_sums
.arcstat_compressed_size
);
7596 wmsum_fini(&arc_sums
.arcstat_uncompressed_size
);
7597 wmsum_fini(&arc_sums
.arcstat_overhead_size
);
7598 wmsum_fini(&arc_sums
.arcstat_hdr_size
);
7599 wmsum_fini(&arc_sums
.arcstat_data_size
);
7600 wmsum_fini(&arc_sums
.arcstat_metadata_size
);
7601 wmsum_fini(&arc_sums
.arcstat_dbuf_size
);
7602 wmsum_fini(&arc_sums
.arcstat_dnode_size
);
7603 wmsum_fini(&arc_sums
.arcstat_bonus_size
);
7604 wmsum_fini(&arc_sums
.arcstat_l2_hits
);
7605 wmsum_fini(&arc_sums
.arcstat_l2_misses
);
7606 wmsum_fini(&arc_sums
.arcstat_l2_prefetch_asize
);
7607 wmsum_fini(&arc_sums
.arcstat_l2_mru_asize
);
7608 wmsum_fini(&arc_sums
.arcstat_l2_mfu_asize
);
7609 wmsum_fini(&arc_sums
.arcstat_l2_bufc_data_asize
);
7610 wmsum_fini(&arc_sums
.arcstat_l2_bufc_metadata_asize
);
7611 wmsum_fini(&arc_sums
.arcstat_l2_feeds
);
7612 wmsum_fini(&arc_sums
.arcstat_l2_rw_clash
);
7613 wmsum_fini(&arc_sums
.arcstat_l2_read_bytes
);
7614 wmsum_fini(&arc_sums
.arcstat_l2_write_bytes
);
7615 wmsum_fini(&arc_sums
.arcstat_l2_writes_sent
);
7616 wmsum_fini(&arc_sums
.arcstat_l2_writes_done
);
7617 wmsum_fini(&arc_sums
.arcstat_l2_writes_error
);
7618 wmsum_fini(&arc_sums
.arcstat_l2_writes_lock_retry
);
7619 wmsum_fini(&arc_sums
.arcstat_l2_evict_lock_retry
);
7620 wmsum_fini(&arc_sums
.arcstat_l2_evict_reading
);
7621 wmsum_fini(&arc_sums
.arcstat_l2_evict_l1cached
);
7622 wmsum_fini(&arc_sums
.arcstat_l2_free_on_write
);
7623 wmsum_fini(&arc_sums
.arcstat_l2_abort_lowmem
);
7624 wmsum_fini(&arc_sums
.arcstat_l2_cksum_bad
);
7625 wmsum_fini(&arc_sums
.arcstat_l2_io_error
);
7626 wmsum_fini(&arc_sums
.arcstat_l2_lsize
);
7627 wmsum_fini(&arc_sums
.arcstat_l2_psize
);
7628 aggsum_fini(&arc_sums
.arcstat_l2_hdr_size
);
7629 wmsum_fini(&arc_sums
.arcstat_l2_log_blk_writes
);
7630 wmsum_fini(&arc_sums
.arcstat_l2_log_blk_asize
);
7631 wmsum_fini(&arc_sums
.arcstat_l2_log_blk_count
);
7632 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_success
);
7633 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_abort_unsupported
);
7634 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_abort_io_errors
);
7635 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_abort_dh_errors
);
7636 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_abort_cksum_lb_errors
);
7637 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_abort_lowmem
);
7638 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_size
);
7639 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_asize
);
7640 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_bufs
);
7641 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_bufs_precached
);
7642 wmsum_fini(&arc_sums
.arcstat_l2_rebuild_log_blks
);
7643 wmsum_fini(&arc_sums
.arcstat_memory_throttle_count
);
7644 wmsum_fini(&arc_sums
.arcstat_memory_direct_count
);
7645 wmsum_fini(&arc_sums
.arcstat_memory_indirect_count
);
7646 wmsum_fini(&arc_sums
.arcstat_prune
);
7647 wmsum_fini(&arc_sums
.arcstat_meta_used
);
7648 wmsum_fini(&arc_sums
.arcstat_async_upgrade_sync
);
7649 wmsum_fini(&arc_sums
.arcstat_predictive_prefetch
);
7650 wmsum_fini(&arc_sums
.arcstat_demand_hit_predictive_prefetch
);
7651 wmsum_fini(&arc_sums
.arcstat_demand_iohit_predictive_prefetch
);
7652 wmsum_fini(&arc_sums
.arcstat_prescient_prefetch
);
7653 wmsum_fini(&arc_sums
.arcstat_demand_hit_prescient_prefetch
);
7654 wmsum_fini(&arc_sums
.arcstat_demand_iohit_prescient_prefetch
);
7655 wmsum_fini(&arc_sums
.arcstat_raw_size
);
7656 wmsum_fini(&arc_sums
.arcstat_cached_only_in_progress
);
7657 wmsum_fini(&arc_sums
.arcstat_abd_chunk_waste_size
);
7661 arc_target_bytes(void)
7667 arc_set_limits(uint64_t allmem
)
7669 /* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
7670 arc_c_min
= MAX(allmem
/ 32, 2ULL << SPA_MAXBLOCKSHIFT
);
7672 /* How to set default max varies by platform. */
7673 arc_c_max
= arc_default_max(arc_c_min
, allmem
);
7678 uint64_t percent
, allmem
= arc_all_memory();
7679 mutex_init(&arc_evict_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
7680 list_create(&arc_evict_waiters
, sizeof (arc_evict_waiter_t
),
7681 offsetof(arc_evict_waiter_t
, aew_node
));
7683 arc_min_prefetch_ms
= 1000;
7684 arc_min_prescient_prefetch_ms
= 6000;
7686 #if defined(_KERNEL)
7690 arc_set_limits(allmem
);
7694 * If zfs_arc_max is non-zero at init, meaning it was set in the kernel
7695 * environment before the module was loaded, don't block setting the
7696 * maximum because it is less than arc_c_min, instead, reset arc_c_min
7698 * zfs_arc_min will be handled by arc_tuning_update().
7700 if (zfs_arc_max
!= 0 && zfs_arc_max
>= MIN_ARC_MAX
&&
7701 zfs_arc_max
< allmem
) {
7702 arc_c_max
= zfs_arc_max
;
7703 if (arc_c_min
>= arc_c_max
) {
7704 arc_c_min
= MAX(zfs_arc_max
/ 2,
7705 2ULL << SPA_MAXBLOCKSHIFT
);
7710 * In userland, there's only the memory pressure that we artificially
7711 * create (see arc_available_memory()). Don't let arc_c get too
7712 * small, because it can cause transactions to be larger than
7713 * arc_c, causing arc_tempreserve_space() to fail.
7715 arc_c_min
= MAX(arc_c_max
/ 2, 2ULL << SPA_MAXBLOCKSHIFT
);
7720 * 32-bit fixed point fractions of metadata from total ARC size,
7721 * MRU data from all data and MRU metadata from all metadata.
7723 arc_meta
= (1ULL << 32) / 4; /* Metadata is 25% of arc_c. */
7724 arc_pd
= (1ULL << 32) / 2; /* Data MRU is 50% of data. */
7725 arc_pm
= (1ULL << 32) / 2; /* Metadata MRU is 50% of metadata. */
7727 percent
= MIN(zfs_arc_dnode_limit_percent
, 100);
7728 arc_dnode_limit
= arc_c_max
* percent
/ 100;
7730 /* Apply user specified tunings */
7731 arc_tuning_update(B_TRUE
);
7733 /* if kmem_flags are set, lets try to use less memory */
7734 if (kmem_debugging())
7736 if (arc_c
< arc_c_min
)
7739 arc_register_hotplug();
7745 list_create(&arc_prune_list
, sizeof (arc_prune_t
),
7746 offsetof(arc_prune_t
, p_node
));
7747 mutex_init(&arc_prune_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
7749 arc_prune_taskq
= taskq_create("arc_prune", zfs_arc_prune_task_threads
,
7750 defclsyspri
, 100, INT_MAX
, TASKQ_PREPOPULATE
| TASKQ_DYNAMIC
);
7752 arc_ksp
= kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED
,
7753 sizeof (arc_stats
) / sizeof (kstat_named_t
), KSTAT_FLAG_VIRTUAL
);
7755 if (arc_ksp
!= NULL
) {
7756 arc_ksp
->ks_data
= &arc_stats
;
7757 arc_ksp
->ks_update
= arc_kstat_update
;
7758 kstat_install(arc_ksp
);
7761 arc_state_evict_markers
=
7762 arc_state_alloc_markers(arc_state_evict_marker_count
);
7763 arc_evict_zthr
= zthr_create_timer("arc_evict",
7764 arc_evict_cb_check
, arc_evict_cb
, NULL
, SEC2NSEC(1), defclsyspri
);
7765 arc_reap_zthr
= zthr_create_timer("arc_reap",
7766 arc_reap_cb_check
, arc_reap_cb
, NULL
, SEC2NSEC(1), minclsyspri
);
7771 * Calculate maximum amount of dirty data per pool.
7773 * If it has been set by a module parameter, take that.
7774 * Otherwise, use a percentage of physical memory defined by
7775 * zfs_dirty_data_max_percent (default 10%) with a cap at
7776 * zfs_dirty_data_max_max (default 4G or 25% of physical memory).
7779 if (zfs_dirty_data_max_max
== 0)
7780 zfs_dirty_data_max_max
= MIN(4ULL * 1024 * 1024 * 1024,
7781 allmem
* zfs_dirty_data_max_max_percent
/ 100);
7783 if (zfs_dirty_data_max_max
== 0)
7784 zfs_dirty_data_max_max
= MIN(1ULL * 1024 * 1024 * 1024,
7785 allmem
* zfs_dirty_data_max_max_percent
/ 100);
7788 if (zfs_dirty_data_max
== 0) {
7789 zfs_dirty_data_max
= allmem
*
7790 zfs_dirty_data_max_percent
/ 100;
7791 zfs_dirty_data_max
= MIN(zfs_dirty_data_max
,
7792 zfs_dirty_data_max_max
);
7795 if (zfs_wrlog_data_max
== 0) {
7798 * dp_wrlog_total is reduced for each txg at the end of
7799 * spa_sync(). However, dp_dirty_total is reduced every time
7800 * a block is written out. Thus under normal operation,
7801 * dp_wrlog_total could grow 2 times as big as
7802 * zfs_dirty_data_max.
7804 zfs_wrlog_data_max
= zfs_dirty_data_max
* 2;
7815 #endif /* _KERNEL */
7817 /* Use B_TRUE to ensure *all* buffers are evicted */
7818 arc_flush(NULL
, B_TRUE
);
7820 if (arc_ksp
!= NULL
) {
7821 kstat_delete(arc_ksp
);
7825 taskq_wait(arc_prune_taskq
);
7826 taskq_destroy(arc_prune_taskq
);
7828 mutex_enter(&arc_prune_mtx
);
7829 while ((p
= list_remove_head(&arc_prune_list
)) != NULL
) {
7830 zfs_refcount_remove(&p
->p_refcnt
, &arc_prune_list
);
7831 zfs_refcount_destroy(&p
->p_refcnt
);
7832 kmem_free(p
, sizeof (*p
));
7834 mutex_exit(&arc_prune_mtx
);
7836 list_destroy(&arc_prune_list
);
7837 mutex_destroy(&arc_prune_mtx
);
7839 (void) zthr_cancel(arc_evict_zthr
);
7840 (void) zthr_cancel(arc_reap_zthr
);
7841 arc_state_free_markers(arc_state_evict_markers
,
7842 arc_state_evict_marker_count
);
7844 mutex_destroy(&arc_evict_lock
);
7845 list_destroy(&arc_evict_waiters
);
7848 * Free any buffers that were tagged for destruction. This needs
7849 * to occur before arc_state_fini() runs and destroys the aggsum
7850 * values which are updated when freeing scatter ABDs.
7852 l2arc_do_free_on_write();
7855 * buf_fini() must proceed arc_state_fini() because buf_fin() may
7856 * trigger the release of kmem magazines, which can callback to
7857 * arc_space_return() which accesses aggsums freed in act_state_fini().
7862 arc_unregister_hotplug();
7865 * We destroy the zthrs after all the ARC state has been
7866 * torn down to avoid the case of them receiving any
7867 * wakeup() signals after they are destroyed.
7869 zthr_destroy(arc_evict_zthr
);
7870 zthr_destroy(arc_reap_zthr
);
7872 ASSERT0(arc_loaned_bytes
);
7878 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
7879 * It uses dedicated storage devices to hold cached data, which are populated
7880 * using large infrequent writes. The main role of this cache is to boost
7881 * the performance of random read workloads. The intended L2ARC devices
7882 * include short-stroked disks, solid state disks, and other media with
7883 * substantially faster read latency than disk.
7885 * +-----------------------+
7887 * +-----------------------+
7890 * l2arc_feed_thread() arc_read()
7894 * +---------------+ |
7896 * +---------------+ |
7901 * +-------+ +-------+
7903 * | cache | | cache |
7904 * +-------+ +-------+
7905 * +=========+ .-----.
7906 * : L2ARC : |-_____-|
7907 * : devices : | Disks |
7908 * +=========+ `-_____-'
7910 * Read requests are satisfied from the following sources, in order:
7913 * 2) vdev cache of L2ARC devices
7915 * 4) vdev cache of disks
7918 * Some L2ARC device types exhibit extremely slow write performance.
7919 * To accommodate for this there are some significant differences between
7920 * the L2ARC and traditional cache design:
7922 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
7923 * the ARC behave as usual, freeing buffers and placing headers on ghost
7924 * lists. The ARC does not send buffers to the L2ARC during eviction as
7925 * this would add inflated write latencies for all ARC memory pressure.
7927 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
7928 * It does this by periodically scanning buffers from the eviction-end of
7929 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
7930 * not already there. It scans until a headroom of buffers is satisfied,
7931 * which itself is a buffer for ARC eviction. If a compressible buffer is
7932 * found during scanning and selected for writing to an L2ARC device, we
7933 * temporarily boost scanning headroom during the next scan cycle to make
7934 * sure we adapt to compression effects (which might significantly reduce
7935 * the data volume we write to L2ARC). The thread that does this is
7936 * l2arc_feed_thread(), illustrated below; example sizes are included to
7937 * provide a better sense of ratio than this diagram:
7940 * +---------------------+----------+
7941 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
7942 * +---------------------+----------+ | o L2ARC eligible
7943 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
7944 * +---------------------+----------+ |
7945 * 15.9 Gbytes ^ 32 Mbytes |
7947 * l2arc_feed_thread()
7949 * l2arc write hand <--[oooo]--'
7953 * +==============================+
7954 * L2ARC dev |####|#|###|###| |####| ... |
7955 * +==============================+
7958 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
7959 * evicted, then the L2ARC has cached a buffer much sooner than it probably
7960 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
7961 * safe to say that this is an uncommon case, since buffers at the end of
7962 * the ARC lists have moved there due to inactivity.
7964 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
7965 * then the L2ARC simply misses copying some buffers. This serves as a
7966 * pressure valve to prevent heavy read workloads from both stalling the ARC
7967 * with waits and clogging the L2ARC with writes. This also helps prevent
7968 * the potential for the L2ARC to churn if it attempts to cache content too
7969 * quickly, such as during backups of the entire pool.
7971 * 5. After system boot and before the ARC has filled main memory, there are
7972 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
7973 * lists can remain mostly static. Instead of searching from tail of these
7974 * lists as pictured, the l2arc_feed_thread() will search from the list heads
7975 * for eligible buffers, greatly increasing its chance of finding them.
7977 * The L2ARC device write speed is also boosted during this time so that
7978 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
7979 * there are no L2ARC reads, and no fear of degrading read performance
7980 * through increased writes.
7982 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
7983 * the vdev queue can aggregate them into larger and fewer writes. Each
7984 * device is written to in a rotor fashion, sweeping writes through
7985 * available space then repeating.
7987 * 7. The L2ARC does not store dirty content. It never needs to flush
7988 * write buffers back to disk based storage.
7990 * 8. If an ARC buffer is written (and dirtied) which also exists in the
7991 * L2ARC, the now stale L2ARC buffer is immediately dropped.
7993 * The performance of the L2ARC can be tweaked by a number of tunables, which
7994 * may be necessary for different workloads:
7996 * l2arc_write_max max write bytes per interval
7997 * l2arc_write_boost extra write bytes during device warmup
7998 * l2arc_noprefetch skip caching prefetched buffers
7999 * l2arc_headroom number of max device writes to precache
8000 * l2arc_headroom_boost when we find compressed buffers during ARC
8001 * scanning, we multiply headroom by this
8002 * percentage factor for the next scan cycle,
8003 * since more compressed buffers are likely to
8005 * l2arc_feed_secs seconds between L2ARC writing
8007 * Tunables may be removed or added as future performance improvements are
8008 * integrated, and also may become zpool properties.
8010 * There are three key functions that control how the L2ARC warms up:
8012 * l2arc_write_eligible() check if a buffer is eligible to cache
8013 * l2arc_write_size() calculate how much to write
8014 * l2arc_write_interval() calculate sleep delay between writes
8016 * These three functions determine what to write, how much, and how quickly
8019 * L2ARC persistence:
8021 * When writing buffers to L2ARC, we periodically add some metadata to
8022 * make sure we can pick them up after reboot, thus dramatically reducing
8023 * the impact that any downtime has on the performance of storage systems
8024 * with large caches.
8026 * The implementation works fairly simply by integrating the following two
8029 * *) When writing to the L2ARC, we occasionally write a "l2arc log block",
8030 * which is an additional piece of metadata which describes what's been
8031 * written. This allows us to rebuild the arc_buf_hdr_t structures of the
8032 * main ARC buffers. There are 2 linked-lists of log blocks headed by
8033 * dh_start_lbps[2]. We alternate which chain we append to, so they are
8034 * time-wise and offset-wise interleaved, but that is an optimization rather
8035 * than for correctness. The log block also includes a pointer to the
8036 * previous block in its chain.
8038 * *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
8039 * for our header bookkeeping purposes. This contains a device header,
8040 * which contains our top-level reference structures. We update it each
8041 * time we write a new log block, so that we're able to locate it in the
8042 * L2ARC device. If this write results in an inconsistent device header
8043 * (e.g. due to power failure), we detect this by verifying the header's
8044 * checksum and simply fail to reconstruct the L2ARC after reboot.
8046 * Implementation diagram:
8048 * +=== L2ARC device (not to scale) ======================================+
8049 * | ___two newest log block pointers__.__________ |
8050 * | / \dh_start_lbps[1] |
8051 * | / \ \dh_start_lbps[0]|
8053 * ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
8054 * || hdr| ^ /^ /^ / / |
8055 * |+------+ ...--\-------/ \-----/--\------/ / |
8056 * | \--------------/ \--------------/ |
8057 * +======================================================================+
8059 * As can be seen on the diagram, rather than using a simple linked list,
8060 * we use a pair of linked lists with alternating elements. This is a
8061 * performance enhancement due to the fact that we only find out the
8062 * address of the next log block access once the current block has been
8063 * completely read in. Obviously, this hurts performance, because we'd be
8064 * keeping the device's I/O queue at only a 1 operation deep, thus
8065 * incurring a large amount of I/O round-trip latency. Having two lists
8066 * allows us to fetch two log blocks ahead of where we are currently
8067 * rebuilding L2ARC buffers.
8069 * On-device data structures:
8071 * L2ARC device header: l2arc_dev_hdr_phys_t
8072 * L2ARC log block: l2arc_log_blk_phys_t
8074 * L2ARC reconstruction:
8076 * When writing data, we simply write in the standard rotary fashion,
8077 * evicting buffers as we go and simply writing new data over them (writing
8078 * a new log block every now and then). This obviously means that once we
8079 * loop around the end of the device, we will start cutting into an already
8080 * committed log block (and its referenced data buffers), like so:
8082 * current write head__ __old tail
8085 * <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
8086 * ^ ^^^^^^^^^___________________________________
8088 * <<nextwrite>> may overwrite this blk and/or its bufs --'
8090 * When importing the pool, we detect this situation and use it to stop
8091 * our scanning process (see l2arc_rebuild).
8093 * There is one significant caveat to consider when rebuilding ARC contents
8094 * from an L2ARC device: what about invalidated buffers? Given the above
8095 * construction, we cannot update blocks which we've already written to amend
8096 * them to remove buffers which were invalidated. Thus, during reconstruction,
8097 * we might be populating the cache with buffers for data that's not on the
8098 * main pool anymore, or may have been overwritten!
8100 * As it turns out, this isn't a problem. Every arc_read request includes
8101 * both the DVA and, crucially, the birth TXG of the BP the caller is
8102 * looking for. So even if the cache were populated by completely rotten
8103 * blocks for data that had been long deleted and/or overwritten, we'll
8104 * never actually return bad data from the cache, since the DVA with the
8105 * birth TXG uniquely identify a block in space and time - once created,
8106 * a block is immutable on disk. The worst thing we have done is wasted
8107 * some time and memory at l2arc rebuild to reconstruct outdated ARC
8108 * entries that will get dropped from the l2arc as it is being updated
8111 * L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
8112 * hand are not restored. This is done by saving the offset (in bytes)
8113 * l2arc_evict() has evicted to in the L2ARC device header and taking it
8114 * into account when restoring buffers.
8118 l2arc_write_eligible(uint64_t spa_guid
, arc_buf_hdr_t
*hdr
)
8121 * A buffer is *not* eligible for the L2ARC if it:
8122 * 1. belongs to a different spa.
8123 * 2. is already cached on the L2ARC.
8124 * 3. has an I/O in progress (it may be an incomplete read).
8125 * 4. is flagged not eligible (zfs property).
8127 if (hdr
->b_spa
!= spa_guid
|| HDR_HAS_L2HDR(hdr
) ||
8128 HDR_IO_IN_PROGRESS(hdr
) || !HDR_L2CACHE(hdr
))
8135 l2arc_write_size(l2arc_dev_t
*dev
)
8140 * Make sure our globals have meaningful values in case the user
8143 size
= l2arc_write_max
;
8145 cmn_err(CE_NOTE
, "Bad value for l2arc_write_max, value must "
8146 "be greater than zero, resetting it to the default (%d)",
8148 size
= l2arc_write_max
= L2ARC_WRITE_SIZE
;
8151 if (arc_warm
== B_FALSE
)
8152 size
+= l2arc_write_boost
;
8154 /* We need to add in the worst case scenario of log block overhead. */
8155 size
+= l2arc_log_blk_overhead(size
, dev
);
8156 if (dev
->l2ad_vdev
->vdev_has_trim
&& l2arc_trim_ahead
> 0) {
8158 * Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
8159 * times the writesize, whichever is greater.
8161 size
+= MAX(64 * 1024 * 1024,
8162 (size
* l2arc_trim_ahead
) / 100);
8166 * Make sure the write size does not exceed the size of the cache
8167 * device. This is important in l2arc_evict(), otherwise infinite
8168 * iteration can occur.
8170 if (size
> dev
->l2ad_end
- dev
->l2ad_start
) {
8171 cmn_err(CE_NOTE
, "l2arc_write_max or l2arc_write_boost "
8172 "plus the overhead of log blocks (persistent L2ARC, "
8173 "%llu bytes) exceeds the size of the cache device "
8174 "(guid %llu), resetting them to the default (%d)",
8175 (u_longlong_t
)l2arc_log_blk_overhead(size
, dev
),
8176 (u_longlong_t
)dev
->l2ad_vdev
->vdev_guid
, L2ARC_WRITE_SIZE
);
8178 size
= l2arc_write_max
= l2arc_write_boost
= L2ARC_WRITE_SIZE
;
8180 if (l2arc_trim_ahead
> 1) {
8181 cmn_err(CE_NOTE
, "l2arc_trim_ahead set to 1");
8182 l2arc_trim_ahead
= 1;
8185 if (arc_warm
== B_FALSE
)
8186 size
+= l2arc_write_boost
;
8188 size
+= l2arc_log_blk_overhead(size
, dev
);
8189 if (dev
->l2ad_vdev
->vdev_has_trim
&& l2arc_trim_ahead
> 0) {
8190 size
+= MAX(64 * 1024 * 1024,
8191 (size
* l2arc_trim_ahead
) / 100);
8200 l2arc_write_interval(clock_t began
, uint64_t wanted
, uint64_t wrote
)
8202 clock_t interval
, next
, now
;
8205 * If the ARC lists are busy, increase our write rate; if the
8206 * lists are stale, idle back. This is achieved by checking
8207 * how much we previously wrote - if it was more than half of
8208 * what we wanted, schedule the next write much sooner.
8210 if (l2arc_feed_again
&& wrote
> (wanted
/ 2))
8211 interval
= (hz
* l2arc_feed_min_ms
) / 1000;
8213 interval
= hz
* l2arc_feed_secs
;
8215 now
= ddi_get_lbolt();
8216 next
= MAX(now
, MIN(now
+ interval
, began
+ interval
));
8222 * Cycle through L2ARC devices. This is how L2ARC load balances.
8223 * If a device is returned, this also returns holding the spa config lock.
8225 static l2arc_dev_t
*
8226 l2arc_dev_get_next(void)
8228 l2arc_dev_t
*first
, *next
= NULL
;
8231 * Lock out the removal of spas (spa_namespace_lock), then removal
8232 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
8233 * both locks will be dropped and a spa config lock held instead.
8235 mutex_enter(&spa_namespace_lock
);
8236 mutex_enter(&l2arc_dev_mtx
);
8238 /* if there are no vdevs, there is nothing to do */
8239 if (l2arc_ndev
== 0)
8243 next
= l2arc_dev_last
;
8245 /* loop around the list looking for a non-faulted vdev */
8247 next
= list_head(l2arc_dev_list
);
8249 next
= list_next(l2arc_dev_list
, next
);
8251 next
= list_head(l2arc_dev_list
);
8254 /* if we have come back to the start, bail out */
8257 else if (next
== first
)
8260 ASSERT3P(next
, !=, NULL
);
8261 } while (vdev_is_dead(next
->l2ad_vdev
) || next
->l2ad_rebuild
||
8262 next
->l2ad_trim_all
);
8264 /* if we were unable to find any usable vdevs, return NULL */
8265 if (vdev_is_dead(next
->l2ad_vdev
) || next
->l2ad_rebuild
||
8266 next
->l2ad_trim_all
)
8269 l2arc_dev_last
= next
;
8272 mutex_exit(&l2arc_dev_mtx
);
8275 * Grab the config lock to prevent the 'next' device from being
8276 * removed while we are writing to it.
8279 spa_config_enter(next
->l2ad_spa
, SCL_L2ARC
, next
, RW_READER
);
8280 mutex_exit(&spa_namespace_lock
);
8286 * Free buffers that were tagged for destruction.
8289 l2arc_do_free_on_write(void)
8291 l2arc_data_free_t
*df
;
8293 mutex_enter(&l2arc_free_on_write_mtx
);
8294 while ((df
= list_remove_head(l2arc_free_on_write
)) != NULL
) {
8295 ASSERT3P(df
->l2df_abd
, !=, NULL
);
8296 abd_free(df
->l2df_abd
);
8297 kmem_free(df
, sizeof (l2arc_data_free_t
));
8299 mutex_exit(&l2arc_free_on_write_mtx
);
8303 * A write to a cache device has completed. Update all headers to allow
8304 * reads from these buffers to begin.
8307 l2arc_write_done(zio_t
*zio
)
8309 l2arc_write_callback_t
*cb
;
8310 l2arc_lb_abd_buf_t
*abd_buf
;
8311 l2arc_lb_ptr_buf_t
*lb_ptr_buf
;
8313 l2arc_dev_hdr_phys_t
*l2dhdr
;
8315 arc_buf_hdr_t
*head
, *hdr
, *hdr_prev
;
8316 kmutex_t
*hash_lock
;
8317 int64_t bytes_dropped
= 0;
8319 cb
= zio
->io_private
;
8320 ASSERT3P(cb
, !=, NULL
);
8321 dev
= cb
->l2wcb_dev
;
8322 l2dhdr
= dev
->l2ad_dev_hdr
;
8323 ASSERT3P(dev
, !=, NULL
);
8324 head
= cb
->l2wcb_head
;
8325 ASSERT3P(head
, !=, NULL
);
8326 buflist
= &dev
->l2ad_buflist
;
8327 ASSERT3P(buflist
, !=, NULL
);
8328 DTRACE_PROBE2(l2arc__iodone
, zio_t
*, zio
,
8329 l2arc_write_callback_t
*, cb
);
8332 * All writes completed, or an error was hit.
8335 mutex_enter(&dev
->l2ad_mtx
);
8336 for (hdr
= list_prev(buflist
, head
); hdr
; hdr
= hdr_prev
) {
8337 hdr_prev
= list_prev(buflist
, hdr
);
8339 hash_lock
= HDR_LOCK(hdr
);
8342 * We cannot use mutex_enter or else we can deadlock
8343 * with l2arc_write_buffers (due to swapping the order
8344 * the hash lock and l2ad_mtx are taken).
8346 if (!mutex_tryenter(hash_lock
)) {
8348 * Missed the hash lock. We must retry so we
8349 * don't leave the ARC_FLAG_L2_WRITING bit set.
8351 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry
);
8354 * We don't want to rescan the headers we've
8355 * already marked as having been written out, so
8356 * we reinsert the head node so we can pick up
8357 * where we left off.
8359 list_remove(buflist
, head
);
8360 list_insert_after(buflist
, hdr
, head
);
8362 mutex_exit(&dev
->l2ad_mtx
);
8365 * We wait for the hash lock to become available
8366 * to try and prevent busy waiting, and increase
8367 * the chance we'll be able to acquire the lock
8368 * the next time around.
8370 mutex_enter(hash_lock
);
8371 mutex_exit(hash_lock
);
8376 * We could not have been moved into the arc_l2c_only
8377 * state while in-flight due to our ARC_FLAG_L2_WRITING
8378 * bit being set. Let's just ensure that's being enforced.
8380 ASSERT(HDR_HAS_L1HDR(hdr
));
8383 * Skipped - drop L2ARC entry and mark the header as no
8384 * longer L2 eligibile.
8386 if (zio
->io_error
!= 0) {
8388 * Error - drop L2ARC entry.
8390 list_remove(buflist
, hdr
);
8391 arc_hdr_clear_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
8393 uint64_t psize
= HDR_GET_PSIZE(hdr
);
8394 l2arc_hdr_arcstats_decrement(hdr
);
8397 vdev_psize_to_asize(dev
->l2ad_vdev
, psize
);
8398 (void) zfs_refcount_remove_many(&dev
->l2ad_alloc
,
8399 arc_hdr_size(hdr
), hdr
);
8403 * Allow ARC to begin reads and ghost list evictions to
8406 arc_hdr_clear_flags(hdr
, ARC_FLAG_L2_WRITING
);
8408 mutex_exit(hash_lock
);
8412 * Free the allocated abd buffers for writing the log blocks.
8413 * If the zio failed reclaim the allocated space and remove the
8414 * pointers to these log blocks from the log block pointer list
8415 * of the L2ARC device.
8417 while ((abd_buf
= list_remove_tail(&cb
->l2wcb_abd_list
)) != NULL
) {
8418 abd_free(abd_buf
->abd
);
8419 zio_buf_free(abd_buf
, sizeof (*abd_buf
));
8420 if (zio
->io_error
!= 0) {
8421 lb_ptr_buf
= list_remove_head(&dev
->l2ad_lbptr_list
);
8423 * L2BLK_GET_PSIZE returns aligned size for log
8427 L2BLK_GET_PSIZE((lb_ptr_buf
->lb_ptr
)->lbp_prop
);
8428 bytes_dropped
+= asize
;
8429 ARCSTAT_INCR(arcstat_l2_log_blk_asize
, -asize
);
8430 ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count
);
8431 zfs_refcount_remove_many(&dev
->l2ad_lb_asize
, asize
,
8433 zfs_refcount_remove(&dev
->l2ad_lb_count
, lb_ptr_buf
);
8434 kmem_free(lb_ptr_buf
->lb_ptr
,
8435 sizeof (l2arc_log_blkptr_t
));
8436 kmem_free(lb_ptr_buf
, sizeof (l2arc_lb_ptr_buf_t
));
8439 list_destroy(&cb
->l2wcb_abd_list
);
8441 if (zio
->io_error
!= 0) {
8442 ARCSTAT_BUMP(arcstat_l2_writes_error
);
8445 * Restore the lbps array in the header to its previous state.
8446 * If the list of log block pointers is empty, zero out the
8447 * log block pointers in the device header.
8449 lb_ptr_buf
= list_head(&dev
->l2ad_lbptr_list
);
8450 for (int i
= 0; i
< 2; i
++) {
8451 if (lb_ptr_buf
== NULL
) {
8453 * If the list is empty zero out the device
8454 * header. Otherwise zero out the second log
8455 * block pointer in the header.
8459 dev
->l2ad_dev_hdr_asize
);
8461 memset(&l2dhdr
->dh_start_lbps
[i
], 0,
8462 sizeof (l2arc_log_blkptr_t
));
8466 memcpy(&l2dhdr
->dh_start_lbps
[i
], lb_ptr_buf
->lb_ptr
,
8467 sizeof (l2arc_log_blkptr_t
));
8468 lb_ptr_buf
= list_next(&dev
->l2ad_lbptr_list
,
8473 ARCSTAT_BUMP(arcstat_l2_writes_done
);
8474 list_remove(buflist
, head
);
8475 ASSERT(!HDR_HAS_L1HDR(head
));
8476 kmem_cache_free(hdr_l2only_cache
, head
);
8477 mutex_exit(&dev
->l2ad_mtx
);
8479 ASSERT(dev
->l2ad_vdev
!= NULL
);
8480 vdev_space_update(dev
->l2ad_vdev
, -bytes_dropped
, 0, 0);
8482 l2arc_do_free_on_write();
8484 kmem_free(cb
, sizeof (l2arc_write_callback_t
));
8488 l2arc_untransform(zio_t
*zio
, l2arc_read_callback_t
*cb
)
8491 spa_t
*spa
= zio
->io_spa
;
8492 arc_buf_hdr_t
*hdr
= cb
->l2rcb_hdr
;
8493 blkptr_t
*bp
= zio
->io_bp
;
8494 uint8_t salt
[ZIO_DATA_SALT_LEN
];
8495 uint8_t iv
[ZIO_DATA_IV_LEN
];
8496 uint8_t mac
[ZIO_DATA_MAC_LEN
];
8497 boolean_t no_crypt
= B_FALSE
;
8500 * ZIL data is never be written to the L2ARC, so we don't need
8501 * special handling for its unique MAC storage.
8503 ASSERT3U(BP_GET_TYPE(bp
), !=, DMU_OT_INTENT_LOG
);
8504 ASSERT(MUTEX_HELD(HDR_LOCK(hdr
)));
8505 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
8508 * If the data was encrypted, decrypt it now. Note that
8509 * we must check the bp here and not the hdr, since the
8510 * hdr does not have its encryption parameters updated
8511 * until arc_read_done().
8513 if (BP_IS_ENCRYPTED(bp
)) {
8514 abd_t
*eabd
= arc_get_data_abd(hdr
, arc_hdr_size(hdr
), hdr
,
8515 ARC_HDR_USE_RESERVE
);
8517 zio_crypt_decode_params_bp(bp
, salt
, iv
);
8518 zio_crypt_decode_mac_bp(bp
, mac
);
8520 ret
= spa_do_crypt_abd(B_FALSE
, spa
, &cb
->l2rcb_zb
,
8521 BP_GET_TYPE(bp
), BP_GET_DEDUP(bp
), BP_SHOULD_BYTESWAP(bp
),
8522 salt
, iv
, mac
, HDR_GET_PSIZE(hdr
), eabd
,
8523 hdr
->b_l1hdr
.b_pabd
, &no_crypt
);
8525 arc_free_data_abd(hdr
, eabd
, arc_hdr_size(hdr
), hdr
);
8530 * If we actually performed decryption, replace b_pabd
8531 * with the decrypted data. Otherwise we can just throw
8532 * our decryption buffer away.
8535 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
,
8536 arc_hdr_size(hdr
), hdr
);
8537 hdr
->b_l1hdr
.b_pabd
= eabd
;
8540 arc_free_data_abd(hdr
, eabd
, arc_hdr_size(hdr
), hdr
);
8545 * If the L2ARC block was compressed, but ARC compression
8546 * is disabled we decompress the data into a new buffer and
8547 * replace the existing data.
8549 if (HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
8550 !HDR_COMPRESSION_ENABLED(hdr
)) {
8551 abd_t
*cabd
= arc_get_data_abd(hdr
, arc_hdr_size(hdr
), hdr
,
8552 ARC_HDR_USE_RESERVE
);
8553 void *tmp
= abd_borrow_buf(cabd
, arc_hdr_size(hdr
));
8555 ret
= zio_decompress_data(HDR_GET_COMPRESS(hdr
),
8556 hdr
->b_l1hdr
.b_pabd
, tmp
, HDR_GET_PSIZE(hdr
),
8557 HDR_GET_LSIZE(hdr
), &hdr
->b_complevel
);
8559 abd_return_buf_copy(cabd
, tmp
, arc_hdr_size(hdr
));
8560 arc_free_data_abd(hdr
, cabd
, arc_hdr_size(hdr
), hdr
);
8564 abd_return_buf_copy(cabd
, tmp
, arc_hdr_size(hdr
));
8565 arc_free_data_abd(hdr
, hdr
->b_l1hdr
.b_pabd
,
8566 arc_hdr_size(hdr
), hdr
);
8567 hdr
->b_l1hdr
.b_pabd
= cabd
;
8569 zio
->io_size
= HDR_GET_LSIZE(hdr
);
8580 * A read to a cache device completed. Validate buffer contents before
8581 * handing over to the regular ARC routines.
8584 l2arc_read_done(zio_t
*zio
)
8587 l2arc_read_callback_t
*cb
= zio
->io_private
;
8589 kmutex_t
*hash_lock
;
8590 boolean_t valid_cksum
;
8591 boolean_t using_rdata
= (BP_IS_ENCRYPTED(&cb
->l2rcb_bp
) &&
8592 (cb
->l2rcb_flags
& ZIO_FLAG_RAW_ENCRYPT
));
8594 ASSERT3P(zio
->io_vd
, !=, NULL
);
8595 ASSERT(zio
->io_flags
& ZIO_FLAG_DONT_PROPAGATE
);
8597 spa_config_exit(zio
->io_spa
, SCL_L2ARC
, zio
->io_vd
);
8599 ASSERT3P(cb
, !=, NULL
);
8600 hdr
= cb
->l2rcb_hdr
;
8601 ASSERT3P(hdr
, !=, NULL
);
8603 hash_lock
= HDR_LOCK(hdr
);
8604 mutex_enter(hash_lock
);
8605 ASSERT3P(hash_lock
, ==, HDR_LOCK(hdr
));
8608 * If the data was read into a temporary buffer,
8609 * move it and free the buffer.
8611 if (cb
->l2rcb_abd
!= NULL
) {
8612 ASSERT3U(arc_hdr_size(hdr
), <, zio
->io_size
);
8613 if (zio
->io_error
== 0) {
8615 abd_copy(hdr
->b_crypt_hdr
.b_rabd
,
8616 cb
->l2rcb_abd
, arc_hdr_size(hdr
));
8618 abd_copy(hdr
->b_l1hdr
.b_pabd
,
8619 cb
->l2rcb_abd
, arc_hdr_size(hdr
));
8624 * The following must be done regardless of whether
8625 * there was an error:
8626 * - free the temporary buffer
8627 * - point zio to the real ARC buffer
8628 * - set zio size accordingly
8629 * These are required because zio is either re-used for
8630 * an I/O of the block in the case of the error
8631 * or the zio is passed to arc_read_done() and it
8634 abd_free(cb
->l2rcb_abd
);
8635 zio
->io_size
= zio
->io_orig_size
= arc_hdr_size(hdr
);
8638 ASSERT(HDR_HAS_RABD(hdr
));
8639 zio
->io_abd
= zio
->io_orig_abd
=
8640 hdr
->b_crypt_hdr
.b_rabd
;
8642 ASSERT3P(hdr
->b_l1hdr
.b_pabd
, !=, NULL
);
8643 zio
->io_abd
= zio
->io_orig_abd
= hdr
->b_l1hdr
.b_pabd
;
8647 ASSERT3P(zio
->io_abd
, !=, NULL
);
8650 * Check this survived the L2ARC journey.
8652 ASSERT(zio
->io_abd
== hdr
->b_l1hdr
.b_pabd
||
8653 (HDR_HAS_RABD(hdr
) && zio
->io_abd
== hdr
->b_crypt_hdr
.b_rabd
));
8654 zio
->io_bp_copy
= cb
->l2rcb_bp
; /* XXX fix in L2ARC 2.0 */
8655 zio
->io_bp
= &zio
->io_bp_copy
; /* XXX fix in L2ARC 2.0 */
8656 zio
->io_prop
.zp_complevel
= hdr
->b_complevel
;
8658 valid_cksum
= arc_cksum_is_equal(hdr
, zio
);
8661 * b_rabd will always match the data as it exists on disk if it is
8662 * being used. Therefore if we are reading into b_rabd we do not
8663 * attempt to untransform the data.
8665 if (valid_cksum
&& !using_rdata
)
8666 tfm_error
= l2arc_untransform(zio
, cb
);
8668 if (valid_cksum
&& tfm_error
== 0 && zio
->io_error
== 0 &&
8669 !HDR_L2_EVICTED(hdr
)) {
8670 mutex_exit(hash_lock
);
8671 zio
->io_private
= hdr
;
8675 * Buffer didn't survive caching. Increment stats and
8676 * reissue to the original storage device.
8678 if (zio
->io_error
!= 0) {
8679 ARCSTAT_BUMP(arcstat_l2_io_error
);
8681 zio
->io_error
= SET_ERROR(EIO
);
8683 if (!valid_cksum
|| tfm_error
!= 0)
8684 ARCSTAT_BUMP(arcstat_l2_cksum_bad
);
8687 * If there's no waiter, issue an async i/o to the primary
8688 * storage now. If there *is* a waiter, the caller must
8689 * issue the i/o in a context where it's OK to block.
8691 if (zio
->io_waiter
== NULL
) {
8692 zio_t
*pio
= zio_unique_parent(zio
);
8693 void *abd
= (using_rdata
) ?
8694 hdr
->b_crypt_hdr
.b_rabd
: hdr
->b_l1hdr
.b_pabd
;
8696 ASSERT(!pio
|| pio
->io_child_type
== ZIO_CHILD_LOGICAL
);
8698 zio
= zio_read(pio
, zio
->io_spa
, zio
->io_bp
,
8699 abd
, zio
->io_size
, arc_read_done
,
8700 hdr
, zio
->io_priority
, cb
->l2rcb_flags
,
8704 * Original ZIO will be freed, so we need to update
8705 * ARC header with the new ZIO pointer to be used
8706 * by zio_change_priority() in arc_read().
8708 for (struct arc_callback
*acb
= hdr
->b_l1hdr
.b_acb
;
8709 acb
!= NULL
; acb
= acb
->acb_next
)
8710 acb
->acb_zio_head
= zio
;
8712 mutex_exit(hash_lock
);
8715 mutex_exit(hash_lock
);
8719 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
8723 * This is the list priority from which the L2ARC will search for pages to
8724 * cache. This is used within loops (0..3) to cycle through lists in the
8725 * desired order. This order can have a significant effect on cache
8728 * Currently the metadata lists are hit first, MFU then MRU, followed by
8729 * the data lists. This function returns a locked list, and also returns
8732 static multilist_sublist_t
*
8733 l2arc_sublist_lock(int list_num
)
8735 multilist_t
*ml
= NULL
;
8738 ASSERT(list_num
>= 0 && list_num
< L2ARC_FEED_TYPES
);
8742 ml
= &arc_mfu
->arcs_list
[ARC_BUFC_METADATA
];
8745 ml
= &arc_mru
->arcs_list
[ARC_BUFC_METADATA
];
8748 ml
= &arc_mfu
->arcs_list
[ARC_BUFC_DATA
];
8751 ml
= &arc_mru
->arcs_list
[ARC_BUFC_DATA
];
8758 * Return a randomly-selected sublist. This is acceptable
8759 * because the caller feeds only a little bit of data for each
8760 * call (8MB). Subsequent calls will result in different
8761 * sublists being selected.
8763 idx
= multilist_get_random_index(ml
);
8764 return (multilist_sublist_lock(ml
, idx
));
8768 * Calculates the maximum overhead of L2ARC metadata log blocks for a given
8769 * L2ARC write size. l2arc_evict and l2arc_write_size need to include this
8770 * overhead in processing to make sure there is enough headroom available
8771 * when writing buffers.
8773 static inline uint64_t
8774 l2arc_log_blk_overhead(uint64_t write_sz
, l2arc_dev_t
*dev
)
8776 if (dev
->l2ad_log_entries
== 0) {
8779 uint64_t log_entries
= write_sz
>> SPA_MINBLOCKSHIFT
;
8781 uint64_t log_blocks
= (log_entries
+
8782 dev
->l2ad_log_entries
- 1) /
8783 dev
->l2ad_log_entries
;
8785 return (vdev_psize_to_asize(dev
->l2ad_vdev
,
8786 sizeof (l2arc_log_blk_phys_t
)) * log_blocks
);
8791 * Evict buffers from the device write hand to the distance specified in
8792 * bytes. This distance may span populated buffers, it may span nothing.
8793 * This is clearing a region on the L2ARC device ready for writing.
8794 * If the 'all' boolean is set, every buffer is evicted.
8797 l2arc_evict(l2arc_dev_t
*dev
, uint64_t distance
, boolean_t all
)
8800 arc_buf_hdr_t
*hdr
, *hdr_prev
;
8801 kmutex_t
*hash_lock
;
8803 l2arc_lb_ptr_buf_t
*lb_ptr_buf
, *lb_ptr_buf_prev
;
8804 vdev_t
*vd
= dev
->l2ad_vdev
;
8807 buflist
= &dev
->l2ad_buflist
;
8811 if (dev
->l2ad_hand
+ distance
> dev
->l2ad_end
) {
8813 * When there is no space to accommodate upcoming writes,
8814 * evict to the end. Then bump the write and evict hands
8815 * to the start and iterate. This iteration does not
8816 * happen indefinitely as we make sure in
8817 * l2arc_write_size() that when the write hand is reset,
8818 * the write size does not exceed the end of the device.
8821 taddr
= dev
->l2ad_end
;
8823 taddr
= dev
->l2ad_hand
+ distance
;
8825 DTRACE_PROBE4(l2arc__evict
, l2arc_dev_t
*, dev
, list_t
*, buflist
,
8826 uint64_t, taddr
, boolean_t
, all
);
8830 * This check has to be placed after deciding whether to
8833 if (dev
->l2ad_first
) {
8835 * This is the first sweep through the device. There is
8836 * nothing to evict. We have already trimmmed the
8842 * Trim the space to be evicted.
8844 if (vd
->vdev_has_trim
&& dev
->l2ad_evict
< taddr
&&
8845 l2arc_trim_ahead
> 0) {
8847 * We have to drop the spa_config lock because
8848 * vdev_trim_range() will acquire it.
8849 * l2ad_evict already accounts for the label
8850 * size. To prevent vdev_trim_ranges() from
8851 * adding it again, we subtract it from
8854 spa_config_exit(dev
->l2ad_spa
, SCL_L2ARC
, dev
);
8855 vdev_trim_simple(vd
,
8856 dev
->l2ad_evict
- VDEV_LABEL_START_SIZE
,
8857 taddr
- dev
->l2ad_evict
);
8858 spa_config_enter(dev
->l2ad_spa
, SCL_L2ARC
, dev
,
8863 * When rebuilding L2ARC we retrieve the evict hand
8864 * from the header of the device. Of note, l2arc_evict()
8865 * does not actually delete buffers from the cache
8866 * device, but trimming may do so depending on the
8867 * hardware implementation. Thus keeping track of the
8868 * evict hand is useful.
8870 dev
->l2ad_evict
= MAX(dev
->l2ad_evict
, taddr
);
8875 mutex_enter(&dev
->l2ad_mtx
);
8877 * We have to account for evicted log blocks. Run vdev_space_update()
8878 * on log blocks whose offset (in bytes) is before the evicted offset
8879 * (in bytes) by searching in the list of pointers to log blocks
8880 * present in the L2ARC device.
8882 for (lb_ptr_buf
= list_tail(&dev
->l2ad_lbptr_list
); lb_ptr_buf
;
8883 lb_ptr_buf
= lb_ptr_buf_prev
) {
8885 lb_ptr_buf_prev
= list_prev(&dev
->l2ad_lbptr_list
, lb_ptr_buf
);
8887 /* L2BLK_GET_PSIZE returns aligned size for log blocks */
8888 uint64_t asize
= L2BLK_GET_PSIZE(
8889 (lb_ptr_buf
->lb_ptr
)->lbp_prop
);
8892 * We don't worry about log blocks left behind (ie
8893 * lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
8894 * will never write more than l2arc_evict() evicts.
8896 if (!all
&& l2arc_log_blkptr_valid(dev
, lb_ptr_buf
->lb_ptr
)) {
8899 vdev_space_update(vd
, -asize
, 0, 0);
8900 ARCSTAT_INCR(arcstat_l2_log_blk_asize
, -asize
);
8901 ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count
);
8902 zfs_refcount_remove_many(&dev
->l2ad_lb_asize
, asize
,
8904 zfs_refcount_remove(&dev
->l2ad_lb_count
, lb_ptr_buf
);
8905 list_remove(&dev
->l2ad_lbptr_list
, lb_ptr_buf
);
8906 kmem_free(lb_ptr_buf
->lb_ptr
,
8907 sizeof (l2arc_log_blkptr_t
));
8908 kmem_free(lb_ptr_buf
, sizeof (l2arc_lb_ptr_buf_t
));
8912 for (hdr
= list_tail(buflist
); hdr
; hdr
= hdr_prev
) {
8913 hdr_prev
= list_prev(buflist
, hdr
);
8915 ASSERT(!HDR_EMPTY(hdr
));
8916 hash_lock
= HDR_LOCK(hdr
);
8919 * We cannot use mutex_enter or else we can deadlock
8920 * with l2arc_write_buffers (due to swapping the order
8921 * the hash lock and l2ad_mtx are taken).
8923 if (!mutex_tryenter(hash_lock
)) {
8925 * Missed the hash lock. Retry.
8927 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry
);
8928 mutex_exit(&dev
->l2ad_mtx
);
8929 mutex_enter(hash_lock
);
8930 mutex_exit(hash_lock
);
8935 * A header can't be on this list if it doesn't have L2 header.
8937 ASSERT(HDR_HAS_L2HDR(hdr
));
8939 /* Ensure this header has finished being written. */
8940 ASSERT(!HDR_L2_WRITING(hdr
));
8941 ASSERT(!HDR_L2_WRITE_HEAD(hdr
));
8943 if (!all
&& (hdr
->b_l2hdr
.b_daddr
>= dev
->l2ad_evict
||
8944 hdr
->b_l2hdr
.b_daddr
< dev
->l2ad_hand
)) {
8946 * We've evicted to the target address,
8947 * or the end of the device.
8949 mutex_exit(hash_lock
);
8953 if (!HDR_HAS_L1HDR(hdr
)) {
8954 ASSERT(!HDR_L2_READING(hdr
));
8956 * This doesn't exist in the ARC. Destroy.
8957 * arc_hdr_destroy() will call list_remove()
8958 * and decrement arcstat_l2_lsize.
8960 arc_change_state(arc_anon
, hdr
);
8961 arc_hdr_destroy(hdr
);
8963 ASSERT(hdr
->b_l1hdr
.b_state
!= arc_l2c_only
);
8964 ARCSTAT_BUMP(arcstat_l2_evict_l1cached
);
8966 * Invalidate issued or about to be issued
8967 * reads, since we may be about to write
8968 * over this location.
8970 if (HDR_L2_READING(hdr
)) {
8971 ARCSTAT_BUMP(arcstat_l2_evict_reading
);
8972 arc_hdr_set_flags(hdr
, ARC_FLAG_L2_EVICTED
);
8975 arc_hdr_l2hdr_destroy(hdr
);
8977 mutex_exit(hash_lock
);
8979 mutex_exit(&dev
->l2ad_mtx
);
8983 * We need to check if we evict all buffers, otherwise we may iterate
8986 if (!all
&& rerun
) {
8988 * Bump device hand to the device start if it is approaching the
8989 * end. l2arc_evict() has already evicted ahead for this case.
8991 dev
->l2ad_hand
= dev
->l2ad_start
;
8992 dev
->l2ad_evict
= dev
->l2ad_start
;
8993 dev
->l2ad_first
= B_FALSE
;
8999 * In case of cache device removal (all) the following
9000 * assertions may be violated without functional consequences
9001 * as the device is about to be removed.
9003 ASSERT3U(dev
->l2ad_hand
+ distance
, <, dev
->l2ad_end
);
9004 if (!dev
->l2ad_first
)
9005 ASSERT3U(dev
->l2ad_hand
, <=, dev
->l2ad_evict
);
9010 * Handle any abd transforms that might be required for writing to the L2ARC.
9011 * If successful, this function will always return an abd with the data
9012 * transformed as it is on disk in a new abd of asize bytes.
9015 l2arc_apply_transforms(spa_t
*spa
, arc_buf_hdr_t
*hdr
, uint64_t asize
,
9020 abd_t
*cabd
= NULL
, *eabd
= NULL
, *to_write
= hdr
->b_l1hdr
.b_pabd
;
9021 enum zio_compress compress
= HDR_GET_COMPRESS(hdr
);
9022 uint64_t psize
= HDR_GET_PSIZE(hdr
);
9023 uint64_t size
= arc_hdr_size(hdr
);
9024 boolean_t ismd
= HDR_ISTYPE_METADATA(hdr
);
9025 boolean_t bswap
= (hdr
->b_l1hdr
.b_byteswap
!= DMU_BSWAP_NUMFUNCS
);
9026 dsl_crypto_key_t
*dck
= NULL
;
9027 uint8_t mac
[ZIO_DATA_MAC_LEN
] = { 0 };
9028 boolean_t no_crypt
= B_FALSE
;
9030 ASSERT((HDR_GET_COMPRESS(hdr
) != ZIO_COMPRESS_OFF
&&
9031 !HDR_COMPRESSION_ENABLED(hdr
)) ||
9032 HDR_ENCRYPTED(hdr
) || HDR_SHARED_DATA(hdr
) || psize
!= asize
);
9033 ASSERT3U(psize
, <=, asize
);
9036 * If this data simply needs its own buffer, we simply allocate it
9037 * and copy the data. This may be done to eliminate a dependency on a
9038 * shared buffer or to reallocate the buffer to match asize.
9040 if (HDR_HAS_RABD(hdr
) && asize
!= psize
) {
9041 ASSERT3U(asize
, >=, psize
);
9042 to_write
= abd_alloc_for_io(asize
, ismd
);
9043 abd_copy(to_write
, hdr
->b_crypt_hdr
.b_rabd
, psize
);
9045 abd_zero_off(to_write
, psize
, asize
- psize
);
9049 if ((compress
== ZIO_COMPRESS_OFF
|| HDR_COMPRESSION_ENABLED(hdr
)) &&
9050 !HDR_ENCRYPTED(hdr
)) {
9051 ASSERT3U(size
, ==, psize
);
9052 to_write
= abd_alloc_for_io(asize
, ismd
);
9053 abd_copy(to_write
, hdr
->b_l1hdr
.b_pabd
, size
);
9055 abd_zero_off(to_write
, size
, asize
- size
);
9059 if (compress
!= ZIO_COMPRESS_OFF
&& !HDR_COMPRESSION_ENABLED(hdr
)) {
9061 * In some cases, we can wind up with size > asize, so
9062 * we need to opt for the larger allocation option here.
9064 * (We also need abd_return_buf_copy in all cases because
9065 * it's an ASSERT() to modify the buffer before returning it
9066 * with arc_return_buf(), and all the compressors
9067 * write things before deciding to fail compression in nearly
9070 uint64_t bufsize
= MAX(size
, asize
);
9071 cabd
= abd_alloc_for_io(bufsize
, ismd
);
9072 tmp
= abd_borrow_buf(cabd
, bufsize
);
9074 psize
= zio_compress_data(compress
, to_write
, &tmp
, size
,
9077 if (psize
>= asize
) {
9078 psize
= HDR_GET_PSIZE(hdr
);
9079 abd_return_buf_copy(cabd
, tmp
, bufsize
);
9080 HDR_SET_COMPRESS(hdr
, ZIO_COMPRESS_OFF
);
9082 abd_copy(to_write
, hdr
->b_l1hdr
.b_pabd
, psize
);
9084 abd_zero_off(to_write
, psize
, asize
- psize
);
9087 ASSERT3U(psize
, <=, HDR_GET_PSIZE(hdr
));
9089 memset((char *)tmp
+ psize
, 0, bufsize
- psize
);
9090 psize
= HDR_GET_PSIZE(hdr
);
9091 abd_return_buf_copy(cabd
, tmp
, bufsize
);
9096 if (HDR_ENCRYPTED(hdr
)) {
9097 eabd
= abd_alloc_for_io(asize
, ismd
);
9100 * If the dataset was disowned before the buffer
9101 * made it to this point, the key to re-encrypt
9102 * it won't be available. In this case we simply
9103 * won't write the buffer to the L2ARC.
9105 ret
= spa_keystore_lookup_key(spa
, hdr
->b_crypt_hdr
.b_dsobj
,
9110 ret
= zio_do_crypt_abd(B_TRUE
, &dck
->dck_key
,
9111 hdr
->b_crypt_hdr
.b_ot
, bswap
, hdr
->b_crypt_hdr
.b_salt
,
9112 hdr
->b_crypt_hdr
.b_iv
, mac
, psize
, to_write
, eabd
,
9118 abd_copy(eabd
, to_write
, psize
);
9121 abd_zero_off(eabd
, psize
, asize
- psize
);
9123 /* assert that the MAC we got here matches the one we saved */
9124 ASSERT0(memcmp(mac
, hdr
->b_crypt_hdr
.b_mac
, ZIO_DATA_MAC_LEN
));
9125 spa_keystore_dsl_key_rele(spa
, dck
, FTAG
);
9127 if (to_write
== cabd
)
9134 ASSERT3P(to_write
, !=, hdr
->b_l1hdr
.b_pabd
);
9135 *abd_out
= to_write
;
9140 spa_keystore_dsl_key_rele(spa
, dck
, FTAG
);
9151 l2arc_blk_fetch_done(zio_t
*zio
)
9153 l2arc_read_callback_t
*cb
;
9155 cb
= zio
->io_private
;
9156 if (cb
->l2rcb_abd
!= NULL
)
9157 abd_free(cb
->l2rcb_abd
);
9158 kmem_free(cb
, sizeof (l2arc_read_callback_t
));
9162 * Find and write ARC buffers to the L2ARC device.
9164 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
9165 * for reading until they have completed writing.
9166 * The headroom_boost is an in-out parameter used to maintain headroom boost
9167 * state between calls to this function.
9169 * Returns the number of bytes actually written (which may be smaller than
9170 * the delta by which the device hand has changed due to alignment and the
9171 * writing of log blocks).
9174 l2arc_write_buffers(spa_t
*spa
, l2arc_dev_t
*dev
, uint64_t target_sz
)
9176 arc_buf_hdr_t
*hdr
, *hdr_prev
, *head
;
9177 uint64_t write_asize
, write_psize
, write_lsize
, headroom
;
9179 l2arc_write_callback_t
*cb
= NULL
;
9181 uint64_t guid
= spa_load_guid(spa
);
9182 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
9184 ASSERT3P(dev
->l2ad_vdev
, !=, NULL
);
9187 write_lsize
= write_asize
= write_psize
= 0;
9189 head
= kmem_cache_alloc(hdr_l2only_cache
, KM_PUSHPAGE
);
9190 arc_hdr_set_flags(head
, ARC_FLAG_L2_WRITE_HEAD
| ARC_FLAG_HAS_L2HDR
);
9193 * Copy buffers for L2ARC writing.
9195 for (int pass
= 0; pass
< L2ARC_FEED_TYPES
; pass
++) {
9197 * If pass == 1 or 3, we cache MRU metadata and data
9200 if (l2arc_mfuonly
) {
9201 if (pass
== 1 || pass
== 3)
9205 multilist_sublist_t
*mls
= l2arc_sublist_lock(pass
);
9206 uint64_t passed_sz
= 0;
9208 VERIFY3P(mls
, !=, NULL
);
9211 * L2ARC fast warmup.
9213 * Until the ARC is warm and starts to evict, read from the
9214 * head of the ARC lists rather than the tail.
9216 if (arc_warm
== B_FALSE
)
9217 hdr
= multilist_sublist_head(mls
);
9219 hdr
= multilist_sublist_tail(mls
);
9221 headroom
= target_sz
* l2arc_headroom
;
9222 if (zfs_compressed_arc_enabled
)
9223 headroom
= (headroom
* l2arc_headroom_boost
) / 100;
9225 for (; hdr
; hdr
= hdr_prev
) {
9226 kmutex_t
*hash_lock
;
9227 abd_t
*to_write
= NULL
;
9229 if (arc_warm
== B_FALSE
)
9230 hdr_prev
= multilist_sublist_next(mls
, hdr
);
9232 hdr_prev
= multilist_sublist_prev(mls
, hdr
);
9234 hash_lock
= HDR_LOCK(hdr
);
9235 if (!mutex_tryenter(hash_lock
)) {
9237 * Skip this buffer rather than waiting.
9242 passed_sz
+= HDR_GET_LSIZE(hdr
);
9243 if (l2arc_headroom
!= 0 && passed_sz
> headroom
) {
9247 mutex_exit(hash_lock
);
9251 if (!l2arc_write_eligible(guid
, hdr
)) {
9252 mutex_exit(hash_lock
);
9256 ASSERT(HDR_HAS_L1HDR(hdr
));
9258 ASSERT3U(HDR_GET_PSIZE(hdr
), >, 0);
9259 ASSERT3U(arc_hdr_size(hdr
), >, 0);
9260 ASSERT(hdr
->b_l1hdr
.b_pabd
!= NULL
||
9262 uint64_t psize
= HDR_GET_PSIZE(hdr
);
9263 uint64_t asize
= vdev_psize_to_asize(dev
->l2ad_vdev
,
9267 * If the allocated size of this buffer plus the max
9268 * size for the pending log block exceeds the evicted
9269 * target size, terminate writing buffers for this run.
9271 if (write_asize
+ asize
+
9272 sizeof (l2arc_log_blk_phys_t
) > target_sz
) {
9274 mutex_exit(hash_lock
);
9279 * We rely on the L1 portion of the header below, so
9280 * it's invalid for this header to have been evicted out
9281 * of the ghost cache, prior to being written out. The
9282 * ARC_FLAG_L2_WRITING bit ensures this won't happen.
9284 arc_hdr_set_flags(hdr
, ARC_FLAG_L2_WRITING
);
9287 * If this header has b_rabd, we can use this since it
9288 * must always match the data exactly as it exists on
9289 * disk. Otherwise, the L2ARC can normally use the
9290 * hdr's data, but if we're sharing data between the
9291 * hdr and one of its bufs, L2ARC needs its own copy of
9292 * the data so that the ZIO below can't race with the
9293 * buf consumer. To ensure that this copy will be
9294 * available for the lifetime of the ZIO and be cleaned
9295 * up afterwards, we add it to the l2arc_free_on_write
9296 * queue. If we need to apply any transforms to the
9297 * data (compression, encryption) we will also need the
9300 if (HDR_HAS_RABD(hdr
) && psize
== asize
) {
9301 to_write
= hdr
->b_crypt_hdr
.b_rabd
;
9302 } else if ((HDR_COMPRESSION_ENABLED(hdr
) ||
9303 HDR_GET_COMPRESS(hdr
) == ZIO_COMPRESS_OFF
) &&
9304 !HDR_ENCRYPTED(hdr
) && !HDR_SHARED_DATA(hdr
) &&
9306 to_write
= hdr
->b_l1hdr
.b_pabd
;
9309 arc_buf_contents_t type
= arc_buf_type(hdr
);
9311 ret
= l2arc_apply_transforms(spa
, hdr
, asize
,
9314 arc_hdr_clear_flags(hdr
,
9315 ARC_FLAG_L2_WRITING
);
9316 mutex_exit(hash_lock
);
9320 l2arc_free_abd_on_write(to_write
, asize
, type
);
9325 * Insert a dummy header on the buflist so
9326 * l2arc_write_done() can find where the
9327 * write buffers begin without searching.
9329 mutex_enter(&dev
->l2ad_mtx
);
9330 list_insert_head(&dev
->l2ad_buflist
, head
);
9331 mutex_exit(&dev
->l2ad_mtx
);
9334 sizeof (l2arc_write_callback_t
), KM_SLEEP
);
9335 cb
->l2wcb_dev
= dev
;
9336 cb
->l2wcb_head
= head
;
9338 * Create a list to save allocated abd buffers
9339 * for l2arc_log_blk_commit().
9341 list_create(&cb
->l2wcb_abd_list
,
9342 sizeof (l2arc_lb_abd_buf_t
),
9343 offsetof(l2arc_lb_abd_buf_t
, node
));
9344 pio
= zio_root(spa
, l2arc_write_done
, cb
,
9348 hdr
->b_l2hdr
.b_dev
= dev
;
9349 hdr
->b_l2hdr
.b_hits
= 0;
9351 hdr
->b_l2hdr
.b_daddr
= dev
->l2ad_hand
;
9352 hdr
->b_l2hdr
.b_arcs_state
=
9353 hdr
->b_l1hdr
.b_state
->arcs_state
;
9354 arc_hdr_set_flags(hdr
, ARC_FLAG_HAS_L2HDR
);
9356 mutex_enter(&dev
->l2ad_mtx
);
9357 list_insert_head(&dev
->l2ad_buflist
, hdr
);
9358 mutex_exit(&dev
->l2ad_mtx
);
9360 (void) zfs_refcount_add_many(&dev
->l2ad_alloc
,
9361 arc_hdr_size(hdr
), hdr
);
9363 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
,
9364 hdr
->b_l2hdr
.b_daddr
, asize
, to_write
,
9365 ZIO_CHECKSUM_OFF
, NULL
, hdr
,
9366 ZIO_PRIORITY_ASYNC_WRITE
,
9367 ZIO_FLAG_CANFAIL
, B_FALSE
);
9369 write_lsize
+= HDR_GET_LSIZE(hdr
);
9370 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
,
9373 write_psize
+= psize
;
9374 write_asize
+= asize
;
9375 dev
->l2ad_hand
+= asize
;
9376 l2arc_hdr_arcstats_increment(hdr
);
9377 vdev_space_update(dev
->l2ad_vdev
, asize
, 0, 0);
9379 mutex_exit(hash_lock
);
9382 * Append buf info to current log and commit if full.
9383 * arcstat_l2_{size,asize} kstats are updated
9386 if (l2arc_log_blk_insert(dev
, hdr
)) {
9388 * l2ad_hand will be adjusted in
9389 * l2arc_log_blk_commit().
9392 l2arc_log_blk_commit(dev
, pio
, cb
);
9398 multilist_sublist_unlock(mls
);
9404 /* No buffers selected for writing? */
9406 ASSERT0(write_lsize
);
9407 ASSERT(!HDR_HAS_L1HDR(head
));
9408 kmem_cache_free(hdr_l2only_cache
, head
);
9411 * Although we did not write any buffers l2ad_evict may
9414 if (dev
->l2ad_evict
!= l2dhdr
->dh_evict
)
9415 l2arc_dev_hdr_update(dev
);
9420 if (!dev
->l2ad_first
)
9421 ASSERT3U(dev
->l2ad_hand
, <=, dev
->l2ad_evict
);
9423 ASSERT3U(write_asize
, <=, target_sz
);
9424 ARCSTAT_BUMP(arcstat_l2_writes_sent
);
9425 ARCSTAT_INCR(arcstat_l2_write_bytes
, write_psize
);
9427 dev
->l2ad_writing
= B_TRUE
;
9428 (void) zio_wait(pio
);
9429 dev
->l2ad_writing
= B_FALSE
;
9432 * Update the device header after the zio completes as
9433 * l2arc_write_done() may have updated the memory holding the log block
9434 * pointers in the device header.
9436 l2arc_dev_hdr_update(dev
);
9438 return (write_asize
);
9442 l2arc_hdr_limit_reached(void)
9444 int64_t s
= aggsum_upper_bound(&arc_sums
.arcstat_l2_hdr_size
);
9446 return (arc_reclaim_needed() ||
9447 (s
> (arc_warm
? arc_c
: arc_c_max
) * l2arc_meta_percent
/ 100));
9451 * This thread feeds the L2ARC at regular intervals. This is the beating
9452 * heart of the L2ARC.
9454 static __attribute__((noreturn
)) void
9455 l2arc_feed_thread(void *unused
)
9461 uint64_t size
, wrote
;
9462 clock_t begin
, next
= ddi_get_lbolt();
9463 fstrans_cookie_t cookie
;
9465 CALLB_CPR_INIT(&cpr
, &l2arc_feed_thr_lock
, callb_generic_cpr
, FTAG
);
9467 mutex_enter(&l2arc_feed_thr_lock
);
9469 cookie
= spl_fstrans_mark();
9470 while (l2arc_thread_exit
== 0) {
9471 CALLB_CPR_SAFE_BEGIN(&cpr
);
9472 (void) cv_timedwait_idle(&l2arc_feed_thr_cv
,
9473 &l2arc_feed_thr_lock
, next
);
9474 CALLB_CPR_SAFE_END(&cpr
, &l2arc_feed_thr_lock
);
9475 next
= ddi_get_lbolt() + hz
;
9478 * Quick check for L2ARC devices.
9480 mutex_enter(&l2arc_dev_mtx
);
9481 if (l2arc_ndev
== 0) {
9482 mutex_exit(&l2arc_dev_mtx
);
9485 mutex_exit(&l2arc_dev_mtx
);
9486 begin
= ddi_get_lbolt();
9489 * This selects the next l2arc device to write to, and in
9490 * doing so the next spa to feed from: dev->l2ad_spa. This
9491 * will return NULL if there are now no l2arc devices or if
9492 * they are all faulted.
9494 * If a device is returned, its spa's config lock is also
9495 * held to prevent device removal. l2arc_dev_get_next()
9496 * will grab and release l2arc_dev_mtx.
9498 if ((dev
= l2arc_dev_get_next()) == NULL
)
9501 spa
= dev
->l2ad_spa
;
9502 ASSERT3P(spa
, !=, NULL
);
9505 * If the pool is read-only then force the feed thread to
9506 * sleep a little longer.
9508 if (!spa_writeable(spa
)) {
9509 next
= ddi_get_lbolt() + 5 * l2arc_feed_secs
* hz
;
9510 spa_config_exit(spa
, SCL_L2ARC
, dev
);
9515 * Avoid contributing to memory pressure.
9517 if (l2arc_hdr_limit_reached()) {
9518 ARCSTAT_BUMP(arcstat_l2_abort_lowmem
);
9519 spa_config_exit(spa
, SCL_L2ARC
, dev
);
9523 ARCSTAT_BUMP(arcstat_l2_feeds
);
9525 size
= l2arc_write_size(dev
);
9528 * Evict L2ARC buffers that will be overwritten.
9530 l2arc_evict(dev
, size
, B_FALSE
);
9533 * Write ARC buffers.
9535 wrote
= l2arc_write_buffers(spa
, dev
, size
);
9538 * Calculate interval between writes.
9540 next
= l2arc_write_interval(begin
, size
, wrote
);
9541 spa_config_exit(spa
, SCL_L2ARC
, dev
);
9543 spl_fstrans_unmark(cookie
);
9545 l2arc_thread_exit
= 0;
9546 cv_broadcast(&l2arc_feed_thr_cv
);
9547 CALLB_CPR_EXIT(&cpr
); /* drops l2arc_feed_thr_lock */
9552 l2arc_vdev_present(vdev_t
*vd
)
9554 return (l2arc_vdev_get(vd
) != NULL
);
9558 * Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
9559 * the vdev_t isn't an L2ARC device.
9562 l2arc_vdev_get(vdev_t
*vd
)
9566 mutex_enter(&l2arc_dev_mtx
);
9567 for (dev
= list_head(l2arc_dev_list
); dev
!= NULL
;
9568 dev
= list_next(l2arc_dev_list
, dev
)) {
9569 if (dev
->l2ad_vdev
== vd
)
9572 mutex_exit(&l2arc_dev_mtx
);
9578 l2arc_rebuild_dev(l2arc_dev_t
*dev
, boolean_t reopen
)
9580 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
9581 uint64_t l2dhdr_asize
= dev
->l2ad_dev_hdr_asize
;
9582 spa_t
*spa
= dev
->l2ad_spa
;
9585 * The L2ARC has to hold at least the payload of one log block for
9586 * them to be restored (persistent L2ARC). The payload of a log block
9587 * depends on the amount of its log entries. We always write log blocks
9588 * with 1022 entries. How many of them are committed or restored depends
9589 * on the size of the L2ARC device. Thus the maximum payload of
9590 * one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
9591 * is less than that, we reduce the amount of committed and restored
9592 * log entries per block so as to enable persistence.
9594 if (dev
->l2ad_end
< l2arc_rebuild_blocks_min_l2size
) {
9595 dev
->l2ad_log_entries
= 0;
9597 dev
->l2ad_log_entries
= MIN((dev
->l2ad_end
-
9598 dev
->l2ad_start
) >> SPA_MAXBLOCKSHIFT
,
9599 L2ARC_LOG_BLK_MAX_ENTRIES
);
9603 * Read the device header, if an error is returned do not rebuild L2ARC.
9605 if (l2arc_dev_hdr_read(dev
) == 0 && dev
->l2ad_log_entries
> 0) {
9607 * If we are onlining a cache device (vdev_reopen) that was
9608 * still present (l2arc_vdev_present()) and rebuild is enabled,
9609 * we should evict all ARC buffers and pointers to log blocks
9610 * and reclaim their space before restoring its contents to
9614 if (!l2arc_rebuild_enabled
) {
9617 l2arc_evict(dev
, 0, B_TRUE
);
9618 /* start a new log block */
9619 dev
->l2ad_log_ent_idx
= 0;
9620 dev
->l2ad_log_blk_payload_asize
= 0;
9621 dev
->l2ad_log_blk_payload_start
= 0;
9625 * Just mark the device as pending for a rebuild. We won't
9626 * be starting a rebuild in line here as it would block pool
9627 * import. Instead spa_load_impl will hand that off to an
9628 * async task which will call l2arc_spa_rebuild_start.
9630 dev
->l2ad_rebuild
= B_TRUE
;
9631 } else if (spa_writeable(spa
)) {
9633 * In this case TRIM the whole device if l2arc_trim_ahead > 0,
9634 * otherwise create a new header. We zero out the memory holding
9635 * the header to reset dh_start_lbps. If we TRIM the whole
9636 * device the new header will be written by
9637 * vdev_trim_l2arc_thread() at the end of the TRIM to update the
9638 * trim_state in the header too. When reading the header, if
9639 * trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
9640 * we opt to TRIM the whole device again.
9642 if (l2arc_trim_ahead
> 0) {
9643 dev
->l2ad_trim_all
= B_TRUE
;
9645 memset(l2dhdr
, 0, l2dhdr_asize
);
9646 l2arc_dev_hdr_update(dev
);
9652 * Add a vdev for use by the L2ARC. By this point the spa has already
9653 * validated the vdev and opened it.
9656 l2arc_add_vdev(spa_t
*spa
, vdev_t
*vd
)
9658 l2arc_dev_t
*adddev
;
9659 uint64_t l2dhdr_asize
;
9661 ASSERT(!l2arc_vdev_present(vd
));
9664 * Create a new l2arc device entry.
9666 adddev
= vmem_zalloc(sizeof (l2arc_dev_t
), KM_SLEEP
);
9667 adddev
->l2ad_spa
= spa
;
9668 adddev
->l2ad_vdev
= vd
;
9669 /* leave extra size for an l2arc device header */
9670 l2dhdr_asize
= adddev
->l2ad_dev_hdr_asize
=
9671 MAX(sizeof (*adddev
->l2ad_dev_hdr
), 1 << vd
->vdev_ashift
);
9672 adddev
->l2ad_start
= VDEV_LABEL_START_SIZE
+ l2dhdr_asize
;
9673 adddev
->l2ad_end
= VDEV_LABEL_START_SIZE
+ vdev_get_min_asize(vd
);
9674 ASSERT3U(adddev
->l2ad_start
, <, adddev
->l2ad_end
);
9675 adddev
->l2ad_hand
= adddev
->l2ad_start
;
9676 adddev
->l2ad_evict
= adddev
->l2ad_start
;
9677 adddev
->l2ad_first
= B_TRUE
;
9678 adddev
->l2ad_writing
= B_FALSE
;
9679 adddev
->l2ad_trim_all
= B_FALSE
;
9680 list_link_init(&adddev
->l2ad_node
);
9681 adddev
->l2ad_dev_hdr
= kmem_zalloc(l2dhdr_asize
, KM_SLEEP
);
9683 mutex_init(&adddev
->l2ad_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
9685 * This is a list of all ARC buffers that are still valid on the
9688 list_create(&adddev
->l2ad_buflist
, sizeof (arc_buf_hdr_t
),
9689 offsetof(arc_buf_hdr_t
, b_l2hdr
.b_l2node
));
9692 * This is a list of pointers to log blocks that are still present
9695 list_create(&adddev
->l2ad_lbptr_list
, sizeof (l2arc_lb_ptr_buf_t
),
9696 offsetof(l2arc_lb_ptr_buf_t
, node
));
9698 vdev_space_update(vd
, 0, 0, adddev
->l2ad_end
- adddev
->l2ad_hand
);
9699 zfs_refcount_create(&adddev
->l2ad_alloc
);
9700 zfs_refcount_create(&adddev
->l2ad_lb_asize
);
9701 zfs_refcount_create(&adddev
->l2ad_lb_count
);
9704 * Decide if dev is eligible for L2ARC rebuild or whole device
9705 * trimming. This has to happen before the device is added in the
9706 * cache device list and l2arc_dev_mtx is released. Otherwise
9707 * l2arc_feed_thread() might already start writing on the
9710 l2arc_rebuild_dev(adddev
, B_FALSE
);
9713 * Add device to global list
9715 mutex_enter(&l2arc_dev_mtx
);
9716 list_insert_head(l2arc_dev_list
, adddev
);
9717 atomic_inc_64(&l2arc_ndev
);
9718 mutex_exit(&l2arc_dev_mtx
);
9722 * Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
9723 * in case of onlining a cache device.
9726 l2arc_rebuild_vdev(vdev_t
*vd
, boolean_t reopen
)
9728 l2arc_dev_t
*dev
= NULL
;
9730 dev
= l2arc_vdev_get(vd
);
9731 ASSERT3P(dev
, !=, NULL
);
9734 * In contrast to l2arc_add_vdev() we do not have to worry about
9735 * l2arc_feed_thread() invalidating previous content when onlining a
9736 * cache device. The device parameters (l2ad*) are not cleared when
9737 * offlining the device and writing new buffers will not invalidate
9738 * all previous content. In worst case only buffers that have not had
9739 * their log block written to the device will be lost.
9740 * When onlining the cache device (ie offline->online without exporting
9741 * the pool in between) this happens:
9742 * vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
9744 * vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
9745 * During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
9746 * is set to B_TRUE we might write additional buffers to the device.
9748 l2arc_rebuild_dev(dev
, reopen
);
9752 * Remove a vdev from the L2ARC.
9755 l2arc_remove_vdev(vdev_t
*vd
)
9757 l2arc_dev_t
*remdev
= NULL
;
9760 * Find the device by vdev
9762 remdev
= l2arc_vdev_get(vd
);
9763 ASSERT3P(remdev
, !=, NULL
);
9766 * Cancel any ongoing or scheduled rebuild.
9768 mutex_enter(&l2arc_rebuild_thr_lock
);
9769 if (remdev
->l2ad_rebuild_began
== B_TRUE
) {
9770 remdev
->l2ad_rebuild_cancel
= B_TRUE
;
9771 while (remdev
->l2ad_rebuild
== B_TRUE
)
9772 cv_wait(&l2arc_rebuild_thr_cv
, &l2arc_rebuild_thr_lock
);
9774 mutex_exit(&l2arc_rebuild_thr_lock
);
9777 * Remove device from global list
9779 mutex_enter(&l2arc_dev_mtx
);
9780 list_remove(l2arc_dev_list
, remdev
);
9781 l2arc_dev_last
= NULL
; /* may have been invalidated */
9782 atomic_dec_64(&l2arc_ndev
);
9783 mutex_exit(&l2arc_dev_mtx
);
9786 * Clear all buflists and ARC references. L2ARC device flush.
9788 l2arc_evict(remdev
, 0, B_TRUE
);
9789 list_destroy(&remdev
->l2ad_buflist
);
9790 ASSERT(list_is_empty(&remdev
->l2ad_lbptr_list
));
9791 list_destroy(&remdev
->l2ad_lbptr_list
);
9792 mutex_destroy(&remdev
->l2ad_mtx
);
9793 zfs_refcount_destroy(&remdev
->l2ad_alloc
);
9794 zfs_refcount_destroy(&remdev
->l2ad_lb_asize
);
9795 zfs_refcount_destroy(&remdev
->l2ad_lb_count
);
9796 kmem_free(remdev
->l2ad_dev_hdr
, remdev
->l2ad_dev_hdr_asize
);
9797 vmem_free(remdev
, sizeof (l2arc_dev_t
));
9803 l2arc_thread_exit
= 0;
9806 mutex_init(&l2arc_feed_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
9807 cv_init(&l2arc_feed_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
9808 mutex_init(&l2arc_rebuild_thr_lock
, NULL
, MUTEX_DEFAULT
, NULL
);
9809 cv_init(&l2arc_rebuild_thr_cv
, NULL
, CV_DEFAULT
, NULL
);
9810 mutex_init(&l2arc_dev_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
9811 mutex_init(&l2arc_free_on_write_mtx
, NULL
, MUTEX_DEFAULT
, NULL
);
9813 l2arc_dev_list
= &L2ARC_dev_list
;
9814 l2arc_free_on_write
= &L2ARC_free_on_write
;
9815 list_create(l2arc_dev_list
, sizeof (l2arc_dev_t
),
9816 offsetof(l2arc_dev_t
, l2ad_node
));
9817 list_create(l2arc_free_on_write
, sizeof (l2arc_data_free_t
),
9818 offsetof(l2arc_data_free_t
, l2df_list_node
));
9824 mutex_destroy(&l2arc_feed_thr_lock
);
9825 cv_destroy(&l2arc_feed_thr_cv
);
9826 mutex_destroy(&l2arc_rebuild_thr_lock
);
9827 cv_destroy(&l2arc_rebuild_thr_cv
);
9828 mutex_destroy(&l2arc_dev_mtx
);
9829 mutex_destroy(&l2arc_free_on_write_mtx
);
9831 list_destroy(l2arc_dev_list
);
9832 list_destroy(l2arc_free_on_write
);
9838 if (!(spa_mode_global
& SPA_MODE_WRITE
))
9841 (void) thread_create(NULL
, 0, l2arc_feed_thread
, NULL
, 0, &p0
,
9842 TS_RUN
, defclsyspri
);
9848 if (!(spa_mode_global
& SPA_MODE_WRITE
))
9851 mutex_enter(&l2arc_feed_thr_lock
);
9852 cv_signal(&l2arc_feed_thr_cv
); /* kick thread out of startup */
9853 l2arc_thread_exit
= 1;
9854 while (l2arc_thread_exit
!= 0)
9855 cv_wait(&l2arc_feed_thr_cv
, &l2arc_feed_thr_lock
);
9856 mutex_exit(&l2arc_feed_thr_lock
);
9860 * Punches out rebuild threads for the L2ARC devices in a spa. This should
9861 * be called after pool import from the spa async thread, since starting
9862 * these threads directly from spa_import() will make them part of the
9863 * "zpool import" context and delay process exit (and thus pool import).
9866 l2arc_spa_rebuild_start(spa_t
*spa
)
9868 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
9871 * Locate the spa's l2arc devices and kick off rebuild threads.
9873 for (int i
= 0; i
< spa
->spa_l2cache
.sav_count
; i
++) {
9875 l2arc_vdev_get(spa
->spa_l2cache
.sav_vdevs
[i
]);
9877 /* Don't attempt a rebuild if the vdev is UNAVAIL */
9880 mutex_enter(&l2arc_rebuild_thr_lock
);
9881 if (dev
->l2ad_rebuild
&& !dev
->l2ad_rebuild_cancel
) {
9882 dev
->l2ad_rebuild_began
= B_TRUE
;
9883 (void) thread_create(NULL
, 0, l2arc_dev_rebuild_thread
,
9884 dev
, 0, &p0
, TS_RUN
, minclsyspri
);
9886 mutex_exit(&l2arc_rebuild_thr_lock
);
9891 * Main entry point for L2ARC rebuilding.
9893 static __attribute__((noreturn
)) void
9894 l2arc_dev_rebuild_thread(void *arg
)
9896 l2arc_dev_t
*dev
= arg
;
9898 VERIFY(!dev
->l2ad_rebuild_cancel
);
9899 VERIFY(dev
->l2ad_rebuild
);
9900 (void) l2arc_rebuild(dev
);
9901 mutex_enter(&l2arc_rebuild_thr_lock
);
9902 dev
->l2ad_rebuild_began
= B_FALSE
;
9903 dev
->l2ad_rebuild
= B_FALSE
;
9904 mutex_exit(&l2arc_rebuild_thr_lock
);
9910 * This function implements the actual L2ARC metadata rebuild. It:
9911 * starts reading the log block chain and restores each block's contents
9912 * to memory (reconstructing arc_buf_hdr_t's).
9914 * Operation stops under any of the following conditions:
9916 * 1) We reach the end of the log block chain.
9917 * 2) We encounter *any* error condition (cksum errors, io errors)
9920 l2arc_rebuild(l2arc_dev_t
*dev
)
9922 vdev_t
*vd
= dev
->l2ad_vdev
;
9923 spa_t
*spa
= vd
->vdev_spa
;
9925 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
9926 l2arc_log_blk_phys_t
*this_lb
, *next_lb
;
9927 zio_t
*this_io
= NULL
, *next_io
= NULL
;
9928 l2arc_log_blkptr_t lbps
[2];
9929 l2arc_lb_ptr_buf_t
*lb_ptr_buf
;
9930 boolean_t lock_held
;
9932 this_lb
= vmem_zalloc(sizeof (*this_lb
), KM_SLEEP
);
9933 next_lb
= vmem_zalloc(sizeof (*next_lb
), KM_SLEEP
);
9936 * We prevent device removal while issuing reads to the device,
9937 * then during the rebuilding phases we drop this lock again so
9938 * that a spa_unload or device remove can be initiated - this is
9939 * safe, because the spa will signal us to stop before removing
9940 * our device and wait for us to stop.
9942 spa_config_enter(spa
, SCL_L2ARC
, vd
, RW_READER
);
9946 * Retrieve the persistent L2ARC device state.
9947 * L2BLK_GET_PSIZE returns aligned size for log blocks.
9949 dev
->l2ad_evict
= MAX(l2dhdr
->dh_evict
, dev
->l2ad_start
);
9950 dev
->l2ad_hand
= MAX(l2dhdr
->dh_start_lbps
[0].lbp_daddr
+
9951 L2BLK_GET_PSIZE((&l2dhdr
->dh_start_lbps
[0])->lbp_prop
),
9953 dev
->l2ad_first
= !!(l2dhdr
->dh_flags
& L2ARC_DEV_HDR_EVICT_FIRST
);
9955 vd
->vdev_trim_action_time
= l2dhdr
->dh_trim_action_time
;
9956 vd
->vdev_trim_state
= l2dhdr
->dh_trim_state
;
9959 * In case the zfs module parameter l2arc_rebuild_enabled is false
9960 * we do not start the rebuild process.
9962 if (!l2arc_rebuild_enabled
)
9965 /* Prepare the rebuild process */
9966 memcpy(lbps
, l2dhdr
->dh_start_lbps
, sizeof (lbps
));
9968 /* Start the rebuild process */
9970 if (!l2arc_log_blkptr_valid(dev
, &lbps
[0]))
9973 if ((err
= l2arc_log_blk_read(dev
, &lbps
[0], &lbps
[1],
9974 this_lb
, next_lb
, this_io
, &next_io
)) != 0)
9978 * Our memory pressure valve. If the system is running low
9979 * on memory, rather than swamping memory with new ARC buf
9980 * hdrs, we opt not to rebuild the L2ARC. At this point,
9981 * however, we have already set up our L2ARC dev to chain in
9982 * new metadata log blocks, so the user may choose to offline/
9983 * online the L2ARC dev at a later time (or re-import the pool)
9984 * to reconstruct it (when there's less memory pressure).
9986 if (l2arc_hdr_limit_reached()) {
9987 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem
);
9988 cmn_err(CE_NOTE
, "System running low on memory, "
9989 "aborting L2ARC rebuild.");
9990 err
= SET_ERROR(ENOMEM
);
9994 spa_config_exit(spa
, SCL_L2ARC
, vd
);
9995 lock_held
= B_FALSE
;
9998 * Now that we know that the next_lb checks out alright, we
9999 * can start reconstruction from this log block.
10000 * L2BLK_GET_PSIZE returns aligned size for log blocks.
10002 uint64_t asize
= L2BLK_GET_PSIZE((&lbps
[0])->lbp_prop
);
10003 l2arc_log_blk_restore(dev
, this_lb
, asize
);
10006 * log block restored, include its pointer in the list of
10007 * pointers to log blocks present in the L2ARC device.
10009 lb_ptr_buf
= kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t
), KM_SLEEP
);
10010 lb_ptr_buf
->lb_ptr
= kmem_zalloc(sizeof (l2arc_log_blkptr_t
),
10012 memcpy(lb_ptr_buf
->lb_ptr
, &lbps
[0],
10013 sizeof (l2arc_log_blkptr_t
));
10014 mutex_enter(&dev
->l2ad_mtx
);
10015 list_insert_tail(&dev
->l2ad_lbptr_list
, lb_ptr_buf
);
10016 ARCSTAT_INCR(arcstat_l2_log_blk_asize
, asize
);
10017 ARCSTAT_BUMP(arcstat_l2_log_blk_count
);
10018 zfs_refcount_add_many(&dev
->l2ad_lb_asize
, asize
, lb_ptr_buf
);
10019 zfs_refcount_add(&dev
->l2ad_lb_count
, lb_ptr_buf
);
10020 mutex_exit(&dev
->l2ad_mtx
);
10021 vdev_space_update(vd
, asize
, 0, 0);
10024 * Protection against loops of log blocks:
10026 * l2ad_hand l2ad_evict
10028 * l2ad_start |=======================================| l2ad_end
10029 * -----|||----|||---|||----|||
10031 * ---|||---|||----|||---|||
10034 * In this situation the pointer of log block (4) passes
10035 * l2arc_log_blkptr_valid() but the log block should not be
10036 * restored as it is overwritten by the payload of log block
10037 * (0). Only log blocks (0)-(3) should be restored. We check
10038 * whether l2ad_evict lies in between the payload starting
10039 * offset of the next log block (lbps[1].lbp_payload_start)
10040 * and the payload starting offset of the present log block
10041 * (lbps[0].lbp_payload_start). If true and this isn't the
10042 * first pass, we are looping from the beginning and we should
10045 if (l2arc_range_check_overlap(lbps
[1].lbp_payload_start
,
10046 lbps
[0].lbp_payload_start
, dev
->l2ad_evict
) &&
10050 kpreempt(KPREEMPT_SYNC
);
10052 mutex_enter(&l2arc_rebuild_thr_lock
);
10053 if (dev
->l2ad_rebuild_cancel
) {
10054 dev
->l2ad_rebuild
= B_FALSE
;
10055 cv_signal(&l2arc_rebuild_thr_cv
);
10056 mutex_exit(&l2arc_rebuild_thr_lock
);
10057 err
= SET_ERROR(ECANCELED
);
10060 mutex_exit(&l2arc_rebuild_thr_lock
);
10061 if (spa_config_tryenter(spa
, SCL_L2ARC
, vd
,
10063 lock_held
= B_TRUE
;
10067 * L2ARC config lock held by somebody in writer,
10068 * possibly due to them trying to remove us. They'll
10069 * likely to want us to shut down, so after a little
10070 * delay, we check l2ad_rebuild_cancel and retry
10077 * Continue with the next log block.
10080 lbps
[1] = this_lb
->lb_prev_lbp
;
10081 PTR_SWAP(this_lb
, next_lb
);
10086 if (this_io
!= NULL
)
10087 l2arc_log_blk_fetch_abort(this_io
);
10089 if (next_io
!= NULL
)
10090 l2arc_log_blk_fetch_abort(next_io
);
10091 vmem_free(this_lb
, sizeof (*this_lb
));
10092 vmem_free(next_lb
, sizeof (*next_lb
));
10094 if (!l2arc_rebuild_enabled
) {
10095 spa_history_log_internal(spa
, "L2ARC rebuild", NULL
,
10097 } else if (err
== 0 && zfs_refcount_count(&dev
->l2ad_lb_count
) > 0) {
10098 ARCSTAT_BUMP(arcstat_l2_rebuild_success
);
10099 spa_history_log_internal(spa
, "L2ARC rebuild", NULL
,
10100 "successful, restored %llu blocks",
10101 (u_longlong_t
)zfs_refcount_count(&dev
->l2ad_lb_count
));
10102 } else if (err
== 0 && zfs_refcount_count(&dev
->l2ad_lb_count
) == 0) {
10104 * No error but also nothing restored, meaning the lbps array
10105 * in the device header points to invalid/non-present log
10106 * blocks. Reset the header.
10108 spa_history_log_internal(spa
, "L2ARC rebuild", NULL
,
10109 "no valid log blocks");
10110 memset(l2dhdr
, 0, dev
->l2ad_dev_hdr_asize
);
10111 l2arc_dev_hdr_update(dev
);
10112 } else if (err
== ECANCELED
) {
10114 * In case the rebuild was canceled do not log to spa history
10115 * log as the pool may be in the process of being removed.
10117 zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
10118 (u_longlong_t
)zfs_refcount_count(&dev
->l2ad_lb_count
));
10119 } else if (err
!= 0) {
10120 spa_history_log_internal(spa
, "L2ARC rebuild", NULL
,
10121 "aborted, restored %llu blocks",
10122 (u_longlong_t
)zfs_refcount_count(&dev
->l2ad_lb_count
));
10126 spa_config_exit(spa
, SCL_L2ARC
, vd
);
10132 * Attempts to read the device header on the provided L2ARC device and writes
10133 * it to `hdr'. On success, this function returns 0, otherwise the appropriate
10134 * error code is returned.
10137 l2arc_dev_hdr_read(l2arc_dev_t
*dev
)
10141 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
10142 const uint64_t l2dhdr_asize
= dev
->l2ad_dev_hdr_asize
;
10145 guid
= spa_guid(dev
->l2ad_vdev
->vdev_spa
);
10147 abd
= abd_get_from_buf(l2dhdr
, l2dhdr_asize
);
10149 err
= zio_wait(zio_read_phys(NULL
, dev
->l2ad_vdev
,
10150 VDEV_LABEL_START_SIZE
, l2dhdr_asize
, abd
,
10151 ZIO_CHECKSUM_LABEL
, NULL
, NULL
, ZIO_PRIORITY_SYNC_READ
,
10152 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| ZIO_FLAG_DONT_RETRY
|
10153 ZIO_FLAG_SPECULATIVE
, B_FALSE
));
10158 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors
);
10159 zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
10160 "vdev guid: %llu", err
,
10161 (u_longlong_t
)dev
->l2ad_vdev
->vdev_guid
);
10165 if (l2dhdr
->dh_magic
== BSWAP_64(L2ARC_DEV_HDR_MAGIC
))
10166 byteswap_uint64_array(l2dhdr
, sizeof (*l2dhdr
));
10168 if (l2dhdr
->dh_magic
!= L2ARC_DEV_HDR_MAGIC
||
10169 l2dhdr
->dh_spa_guid
!= guid
||
10170 l2dhdr
->dh_vdev_guid
!= dev
->l2ad_vdev
->vdev_guid
||
10171 l2dhdr
->dh_version
!= L2ARC_PERSISTENT_VERSION
||
10172 l2dhdr
->dh_log_entries
!= dev
->l2ad_log_entries
||
10173 l2dhdr
->dh_end
!= dev
->l2ad_end
||
10174 !l2arc_range_check_overlap(dev
->l2ad_start
, dev
->l2ad_end
,
10175 l2dhdr
->dh_evict
) ||
10176 (l2dhdr
->dh_trim_state
!= VDEV_TRIM_COMPLETE
&&
10177 l2arc_trim_ahead
> 0)) {
10179 * Attempt to rebuild a device containing no actual dev hdr
10180 * or containing a header from some other pool or from another
10181 * version of persistent L2ARC.
10183 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported
);
10184 return (SET_ERROR(ENOTSUP
));
10191 * Reads L2ARC log blocks from storage and validates their contents.
10193 * This function implements a simple fetcher to make sure that while
10194 * we're processing one buffer the L2ARC is already fetching the next
10195 * one in the chain.
10197 * The arguments this_lp and next_lp point to the current and next log block
10198 * address in the block chain. Similarly, this_lb and next_lb hold the
10199 * l2arc_log_blk_phys_t's of the current and next L2ARC blk.
10201 * The `this_io' and `next_io' arguments are used for block fetching.
10202 * When issuing the first blk IO during rebuild, you should pass NULL for
10203 * `this_io'. This function will then issue a sync IO to read the block and
10204 * also issue an async IO to fetch the next block in the block chain. The
10205 * fetched IO is returned in `next_io'. On subsequent calls to this
10206 * function, pass the value returned in `next_io' from the previous call
10207 * as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
10208 * Prior to the call, you should initialize your `next_io' pointer to be
10209 * NULL. If no fetch IO was issued, the pointer is left set at NULL.
10211 * On success, this function returns 0, otherwise it returns an appropriate
10212 * error code. On error the fetching IO is aborted and cleared before
10213 * returning from this function. Therefore, if we return `success', the
10214 * caller can assume that we have taken care of cleanup of fetch IOs.
10217 l2arc_log_blk_read(l2arc_dev_t
*dev
,
10218 const l2arc_log_blkptr_t
*this_lbp
, const l2arc_log_blkptr_t
*next_lbp
,
10219 l2arc_log_blk_phys_t
*this_lb
, l2arc_log_blk_phys_t
*next_lb
,
10220 zio_t
*this_io
, zio_t
**next_io
)
10227 ASSERT(this_lbp
!= NULL
&& next_lbp
!= NULL
);
10228 ASSERT(this_lb
!= NULL
&& next_lb
!= NULL
);
10229 ASSERT(next_io
!= NULL
&& *next_io
== NULL
);
10230 ASSERT(l2arc_log_blkptr_valid(dev
, this_lbp
));
10233 * Check to see if we have issued the IO for this log block in a
10234 * previous run. If not, this is the first call, so issue it now.
10236 if (this_io
== NULL
) {
10237 this_io
= l2arc_log_blk_fetch(dev
->l2ad_vdev
, this_lbp
,
10242 * Peek to see if we can start issuing the next IO immediately.
10244 if (l2arc_log_blkptr_valid(dev
, next_lbp
)) {
10246 * Start issuing IO for the next log block early - this
10247 * should help keep the L2ARC device busy while we
10248 * decompress and restore this log block.
10250 *next_io
= l2arc_log_blk_fetch(dev
->l2ad_vdev
, next_lbp
,
10254 /* Wait for the IO to read this log block to complete */
10255 if ((err
= zio_wait(this_io
)) != 0) {
10256 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors
);
10257 zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
10258 "offset: %llu, vdev guid: %llu", err
,
10259 (u_longlong_t
)this_lbp
->lbp_daddr
,
10260 (u_longlong_t
)dev
->l2ad_vdev
->vdev_guid
);
10265 * Make sure the buffer checks out.
10266 * L2BLK_GET_PSIZE returns aligned size for log blocks.
10268 asize
= L2BLK_GET_PSIZE((this_lbp
)->lbp_prop
);
10269 fletcher_4_native(this_lb
, asize
, NULL
, &cksum
);
10270 if (!ZIO_CHECKSUM_EQUAL(cksum
, this_lbp
->lbp_cksum
)) {
10271 ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors
);
10272 zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
10273 "vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
10274 (u_longlong_t
)this_lbp
->lbp_daddr
,
10275 (u_longlong_t
)dev
->l2ad_vdev
->vdev_guid
,
10276 (u_longlong_t
)dev
->l2ad_hand
,
10277 (u_longlong_t
)dev
->l2ad_evict
);
10278 err
= SET_ERROR(ECKSUM
);
10282 /* Now we can take our time decoding this buffer */
10283 switch (L2BLK_GET_COMPRESS((this_lbp
)->lbp_prop
)) {
10284 case ZIO_COMPRESS_OFF
:
10286 case ZIO_COMPRESS_LZ4
:
10287 abd
= abd_alloc_for_io(asize
, B_TRUE
);
10288 abd_copy_from_buf_off(abd
, this_lb
, 0, asize
);
10289 if ((err
= zio_decompress_data(
10290 L2BLK_GET_COMPRESS((this_lbp
)->lbp_prop
),
10291 abd
, this_lb
, asize
, sizeof (*this_lb
), NULL
)) != 0) {
10292 err
= SET_ERROR(EINVAL
);
10297 err
= SET_ERROR(EINVAL
);
10300 if (this_lb
->lb_magic
== BSWAP_64(L2ARC_LOG_BLK_MAGIC
))
10301 byteswap_uint64_array(this_lb
, sizeof (*this_lb
));
10302 if (this_lb
->lb_magic
!= L2ARC_LOG_BLK_MAGIC
) {
10303 err
= SET_ERROR(EINVAL
);
10307 /* Abort an in-flight fetch I/O in case of error */
10308 if (err
!= 0 && *next_io
!= NULL
) {
10309 l2arc_log_blk_fetch_abort(*next_io
);
10318 * Restores the payload of a log block to ARC. This creates empty ARC hdr
10319 * entries which only contain an l2arc hdr, essentially restoring the
10320 * buffers to their L2ARC evicted state. This function also updates space
10321 * usage on the L2ARC vdev to make sure it tracks restored buffers.
10324 l2arc_log_blk_restore(l2arc_dev_t
*dev
, const l2arc_log_blk_phys_t
*lb
,
10327 uint64_t size
= 0, asize
= 0;
10328 uint64_t log_entries
= dev
->l2ad_log_entries
;
10331 * Usually arc_adapt() is called only for data, not headers, but
10332 * since we may allocate significant amount of memory here, let ARC
10335 arc_adapt(log_entries
* HDR_L2ONLY_SIZE
);
10337 for (int i
= log_entries
- 1; i
>= 0; i
--) {
10339 * Restore goes in the reverse temporal direction to preserve
10340 * correct temporal ordering of buffers in the l2ad_buflist.
10341 * l2arc_hdr_restore also does a list_insert_tail instead of
10342 * list_insert_head on the l2ad_buflist:
10344 * LIST l2ad_buflist LIST
10345 * HEAD <------ (time) ------ TAIL
10346 * direction +-----+-----+-----+-----+-----+ direction
10347 * of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
10348 * fill +-----+-----+-----+-----+-----+
10352 * l2arc_feed_thread l2arc_rebuild
10353 * will place new bufs here restores bufs here
10355 * During l2arc_rebuild() the device is not used by
10356 * l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
10358 size
+= L2BLK_GET_LSIZE((&lb
->lb_entries
[i
])->le_prop
);
10359 asize
+= vdev_psize_to_asize(dev
->l2ad_vdev
,
10360 L2BLK_GET_PSIZE((&lb
->lb_entries
[i
])->le_prop
));
10361 l2arc_hdr_restore(&lb
->lb_entries
[i
], dev
);
10365 * Record rebuild stats:
10366 * size Logical size of restored buffers in the L2ARC
10367 * asize Aligned size of restored buffers in the L2ARC
10369 ARCSTAT_INCR(arcstat_l2_rebuild_size
, size
);
10370 ARCSTAT_INCR(arcstat_l2_rebuild_asize
, asize
);
10371 ARCSTAT_INCR(arcstat_l2_rebuild_bufs
, log_entries
);
10372 ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize
, lb_asize
);
10373 ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio
, asize
/ lb_asize
);
10374 ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks
);
10378 * Restores a single ARC buf hdr from a log entry. The ARC buffer is put
10379 * into a state indicating that it has been evicted to L2ARC.
10382 l2arc_hdr_restore(const l2arc_log_ent_phys_t
*le
, l2arc_dev_t
*dev
)
10384 arc_buf_hdr_t
*hdr
, *exists
;
10385 kmutex_t
*hash_lock
;
10386 arc_buf_contents_t type
= L2BLK_GET_TYPE((le
)->le_prop
);
10390 * Do all the allocation before grabbing any locks, this lets us
10391 * sleep if memory is full and we don't have to deal with failed
10394 hdr
= arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le
)->le_prop
), type
,
10395 dev
, le
->le_dva
, le
->le_daddr
,
10396 L2BLK_GET_PSIZE((le
)->le_prop
), le
->le_birth
,
10397 L2BLK_GET_COMPRESS((le
)->le_prop
), le
->le_complevel
,
10398 L2BLK_GET_PROTECTED((le
)->le_prop
),
10399 L2BLK_GET_PREFETCH((le
)->le_prop
),
10400 L2BLK_GET_STATE((le
)->le_prop
));
10401 asize
= vdev_psize_to_asize(dev
->l2ad_vdev
,
10402 L2BLK_GET_PSIZE((le
)->le_prop
));
10405 * vdev_space_update() has to be called before arc_hdr_destroy() to
10406 * avoid underflow since the latter also calls vdev_space_update().
10408 l2arc_hdr_arcstats_increment(hdr
);
10409 vdev_space_update(dev
->l2ad_vdev
, asize
, 0, 0);
10411 mutex_enter(&dev
->l2ad_mtx
);
10412 list_insert_tail(&dev
->l2ad_buflist
, hdr
);
10413 (void) zfs_refcount_add_many(&dev
->l2ad_alloc
, arc_hdr_size(hdr
), hdr
);
10414 mutex_exit(&dev
->l2ad_mtx
);
10416 exists
= buf_hash_insert(hdr
, &hash_lock
);
10418 /* Buffer was already cached, no need to restore it. */
10419 arc_hdr_destroy(hdr
);
10421 * If the buffer is already cached, check whether it has
10422 * L2ARC metadata. If not, enter them and update the flag.
10423 * This is important is case of onlining a cache device, since
10424 * we previously evicted all L2ARC metadata from ARC.
10426 if (!HDR_HAS_L2HDR(exists
)) {
10427 arc_hdr_set_flags(exists
, ARC_FLAG_HAS_L2HDR
);
10428 exists
->b_l2hdr
.b_dev
= dev
;
10429 exists
->b_l2hdr
.b_daddr
= le
->le_daddr
;
10430 exists
->b_l2hdr
.b_arcs_state
=
10431 L2BLK_GET_STATE((le
)->le_prop
);
10432 mutex_enter(&dev
->l2ad_mtx
);
10433 list_insert_tail(&dev
->l2ad_buflist
, exists
);
10434 (void) zfs_refcount_add_many(&dev
->l2ad_alloc
,
10435 arc_hdr_size(exists
), exists
);
10436 mutex_exit(&dev
->l2ad_mtx
);
10437 l2arc_hdr_arcstats_increment(exists
);
10438 vdev_space_update(dev
->l2ad_vdev
, asize
, 0, 0);
10440 ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached
);
10443 mutex_exit(hash_lock
);
10447 * Starts an asynchronous read IO to read a log block. This is used in log
10448 * block reconstruction to start reading the next block before we are done
10449 * decoding and reconstructing the current block, to keep the l2arc device
10450 * nice and hot with read IO to process.
10451 * The returned zio will contain a newly allocated memory buffers for the IO
10452 * data which should then be freed by the caller once the zio is no longer
10453 * needed (i.e. due to it having completed). If you wish to abort this
10454 * zio, you should do so using l2arc_log_blk_fetch_abort, which takes
10455 * care of disposing of the allocated buffers correctly.
10458 l2arc_log_blk_fetch(vdev_t
*vd
, const l2arc_log_blkptr_t
*lbp
,
10459 l2arc_log_blk_phys_t
*lb
)
10463 l2arc_read_callback_t
*cb
;
10465 /* L2BLK_GET_PSIZE returns aligned size for log blocks */
10466 asize
= L2BLK_GET_PSIZE((lbp
)->lbp_prop
);
10467 ASSERT(asize
<= sizeof (l2arc_log_blk_phys_t
));
10469 cb
= kmem_zalloc(sizeof (l2arc_read_callback_t
), KM_SLEEP
);
10470 cb
->l2rcb_abd
= abd_get_from_buf(lb
, asize
);
10471 pio
= zio_root(vd
->vdev_spa
, l2arc_blk_fetch_done
, cb
,
10472 ZIO_FLAG_CANFAIL
| ZIO_FLAG_DONT_PROPAGATE
| ZIO_FLAG_DONT_RETRY
);
10473 (void) zio_nowait(zio_read_phys(pio
, vd
, lbp
->lbp_daddr
, asize
,
10474 cb
->l2rcb_abd
, ZIO_CHECKSUM_OFF
, NULL
, NULL
,
10475 ZIO_PRIORITY_ASYNC_READ
, ZIO_FLAG_CANFAIL
|
10476 ZIO_FLAG_DONT_PROPAGATE
| ZIO_FLAG_DONT_RETRY
, B_FALSE
));
10482 * Aborts a zio returned from l2arc_log_blk_fetch and frees the data
10483 * buffers allocated for it.
10486 l2arc_log_blk_fetch_abort(zio_t
*zio
)
10488 (void) zio_wait(zio
);
10492 * Creates a zio to update the device header on an l2arc device.
10495 l2arc_dev_hdr_update(l2arc_dev_t
*dev
)
10497 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
10498 const uint64_t l2dhdr_asize
= dev
->l2ad_dev_hdr_asize
;
10502 VERIFY(spa_config_held(dev
->l2ad_spa
, SCL_STATE_ALL
, RW_READER
));
10504 l2dhdr
->dh_magic
= L2ARC_DEV_HDR_MAGIC
;
10505 l2dhdr
->dh_version
= L2ARC_PERSISTENT_VERSION
;
10506 l2dhdr
->dh_spa_guid
= spa_guid(dev
->l2ad_vdev
->vdev_spa
);
10507 l2dhdr
->dh_vdev_guid
= dev
->l2ad_vdev
->vdev_guid
;
10508 l2dhdr
->dh_log_entries
= dev
->l2ad_log_entries
;
10509 l2dhdr
->dh_evict
= dev
->l2ad_evict
;
10510 l2dhdr
->dh_start
= dev
->l2ad_start
;
10511 l2dhdr
->dh_end
= dev
->l2ad_end
;
10512 l2dhdr
->dh_lb_asize
= zfs_refcount_count(&dev
->l2ad_lb_asize
);
10513 l2dhdr
->dh_lb_count
= zfs_refcount_count(&dev
->l2ad_lb_count
);
10514 l2dhdr
->dh_flags
= 0;
10515 l2dhdr
->dh_trim_action_time
= dev
->l2ad_vdev
->vdev_trim_action_time
;
10516 l2dhdr
->dh_trim_state
= dev
->l2ad_vdev
->vdev_trim_state
;
10517 if (dev
->l2ad_first
)
10518 l2dhdr
->dh_flags
|= L2ARC_DEV_HDR_EVICT_FIRST
;
10520 abd
= abd_get_from_buf(l2dhdr
, l2dhdr_asize
);
10522 err
= zio_wait(zio_write_phys(NULL
, dev
->l2ad_vdev
,
10523 VDEV_LABEL_START_SIZE
, l2dhdr_asize
, abd
, ZIO_CHECKSUM_LABEL
, NULL
,
10524 NULL
, ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_CANFAIL
, B_FALSE
));
10529 zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
10530 "vdev guid: %llu", err
,
10531 (u_longlong_t
)dev
->l2ad_vdev
->vdev_guid
);
10536 * Commits a log block to the L2ARC device. This routine is invoked from
10537 * l2arc_write_buffers when the log block fills up.
10538 * This function allocates some memory to temporarily hold the serialized
10539 * buffer to be written. This is then released in l2arc_write_done.
10542 l2arc_log_blk_commit(l2arc_dev_t
*dev
, zio_t
*pio
, l2arc_write_callback_t
*cb
)
10544 l2arc_log_blk_phys_t
*lb
= &dev
->l2ad_log_blk
;
10545 l2arc_dev_hdr_phys_t
*l2dhdr
= dev
->l2ad_dev_hdr
;
10546 uint64_t psize
, asize
;
10548 l2arc_lb_abd_buf_t
*abd_buf
;
10549 uint8_t *tmpbuf
= NULL
;
10550 l2arc_lb_ptr_buf_t
*lb_ptr_buf
;
10552 VERIFY3S(dev
->l2ad_log_ent_idx
, ==, dev
->l2ad_log_entries
);
10554 abd_buf
= zio_buf_alloc(sizeof (*abd_buf
));
10555 abd_buf
->abd
= abd_get_from_buf(lb
, sizeof (*lb
));
10556 lb_ptr_buf
= kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t
), KM_SLEEP
);
10557 lb_ptr_buf
->lb_ptr
= kmem_zalloc(sizeof (l2arc_log_blkptr_t
), KM_SLEEP
);
10559 /* link the buffer into the block chain */
10560 lb
->lb_prev_lbp
= l2dhdr
->dh_start_lbps
[1];
10561 lb
->lb_magic
= L2ARC_LOG_BLK_MAGIC
;
10564 * l2arc_log_blk_commit() may be called multiple times during a single
10565 * l2arc_write_buffers() call. Save the allocated abd buffers in a list
10566 * so we can free them in l2arc_write_done() later on.
10568 list_insert_tail(&cb
->l2wcb_abd_list
, abd_buf
);
10570 /* try to compress the buffer */
10571 psize
= zio_compress_data(ZIO_COMPRESS_LZ4
,
10572 abd_buf
->abd
, (void **) &tmpbuf
, sizeof (*lb
), 0);
10574 /* a log block is never entirely zero */
10575 ASSERT(psize
!= 0);
10576 asize
= vdev_psize_to_asize(dev
->l2ad_vdev
, psize
);
10577 ASSERT(asize
<= sizeof (*lb
));
10580 * Update the start log block pointer in the device header to point
10581 * to the log block we're about to write.
10583 l2dhdr
->dh_start_lbps
[1] = l2dhdr
->dh_start_lbps
[0];
10584 l2dhdr
->dh_start_lbps
[0].lbp_daddr
= dev
->l2ad_hand
;
10585 l2dhdr
->dh_start_lbps
[0].lbp_payload_asize
=
10586 dev
->l2ad_log_blk_payload_asize
;
10587 l2dhdr
->dh_start_lbps
[0].lbp_payload_start
=
10588 dev
->l2ad_log_blk_payload_start
;
10590 (&l2dhdr
->dh_start_lbps
[0])->lbp_prop
, sizeof (*lb
));
10592 (&l2dhdr
->dh_start_lbps
[0])->lbp_prop
, asize
);
10593 L2BLK_SET_CHECKSUM(
10594 (&l2dhdr
->dh_start_lbps
[0])->lbp_prop
,
10595 ZIO_CHECKSUM_FLETCHER_4
);
10596 if (asize
< sizeof (*lb
)) {
10597 /* compression succeeded */
10598 memset(tmpbuf
+ psize
, 0, asize
- psize
);
10599 L2BLK_SET_COMPRESS(
10600 (&l2dhdr
->dh_start_lbps
[0])->lbp_prop
,
10603 /* compression failed */
10604 memcpy(tmpbuf
, lb
, sizeof (*lb
));
10605 L2BLK_SET_COMPRESS(
10606 (&l2dhdr
->dh_start_lbps
[0])->lbp_prop
,
10610 /* checksum what we're about to write */
10611 fletcher_4_native(tmpbuf
, asize
, NULL
,
10612 &l2dhdr
->dh_start_lbps
[0].lbp_cksum
);
10614 abd_free(abd_buf
->abd
);
10616 /* perform the write itself */
10617 abd_buf
->abd
= abd_get_from_buf(tmpbuf
, sizeof (*lb
));
10618 abd_take_ownership_of_buf(abd_buf
->abd
, B_TRUE
);
10619 wzio
= zio_write_phys(pio
, dev
->l2ad_vdev
, dev
->l2ad_hand
,
10620 asize
, abd_buf
->abd
, ZIO_CHECKSUM_OFF
, NULL
, NULL
,
10621 ZIO_PRIORITY_ASYNC_WRITE
, ZIO_FLAG_CANFAIL
, B_FALSE
);
10622 DTRACE_PROBE2(l2arc__write
, vdev_t
*, dev
->l2ad_vdev
, zio_t
*, wzio
);
10623 (void) zio_nowait(wzio
);
10625 dev
->l2ad_hand
+= asize
;
10627 * Include the committed log block's pointer in the list of pointers
10628 * to log blocks present in the L2ARC device.
10630 memcpy(lb_ptr_buf
->lb_ptr
, &l2dhdr
->dh_start_lbps
[0],
10631 sizeof (l2arc_log_blkptr_t
));
10632 mutex_enter(&dev
->l2ad_mtx
);
10633 list_insert_head(&dev
->l2ad_lbptr_list
, lb_ptr_buf
);
10634 ARCSTAT_INCR(arcstat_l2_log_blk_asize
, asize
);
10635 ARCSTAT_BUMP(arcstat_l2_log_blk_count
);
10636 zfs_refcount_add_many(&dev
->l2ad_lb_asize
, asize
, lb_ptr_buf
);
10637 zfs_refcount_add(&dev
->l2ad_lb_count
, lb_ptr_buf
);
10638 mutex_exit(&dev
->l2ad_mtx
);
10639 vdev_space_update(dev
->l2ad_vdev
, asize
, 0, 0);
10641 /* bump the kstats */
10642 ARCSTAT_INCR(arcstat_l2_write_bytes
, asize
);
10643 ARCSTAT_BUMP(arcstat_l2_log_blk_writes
);
10644 ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize
, asize
);
10645 ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio
,
10646 dev
->l2ad_log_blk_payload_asize
/ asize
);
10648 /* start a new log block */
10649 dev
->l2ad_log_ent_idx
= 0;
10650 dev
->l2ad_log_blk_payload_asize
= 0;
10651 dev
->l2ad_log_blk_payload_start
= 0;
10657 * Validates an L2ARC log block address to make sure that it can be read
10658 * from the provided L2ARC device.
10661 l2arc_log_blkptr_valid(l2arc_dev_t
*dev
, const l2arc_log_blkptr_t
*lbp
)
10663 /* L2BLK_GET_PSIZE returns aligned size for log blocks */
10664 uint64_t asize
= L2BLK_GET_PSIZE((lbp
)->lbp_prop
);
10665 uint64_t end
= lbp
->lbp_daddr
+ asize
- 1;
10666 uint64_t start
= lbp
->lbp_payload_start
;
10667 boolean_t evicted
= B_FALSE
;
10670 * A log block is valid if all of the following conditions are true:
10671 * - it fits entirely (including its payload) between l2ad_start and
10673 * - it has a valid size
10674 * - neither the log block itself nor part of its payload was evicted
10675 * by l2arc_evict():
10677 * l2ad_hand l2ad_evict
10682 * l2ad_start ============================================ l2ad_end
10683 * --------------------------||||
10690 l2arc_range_check_overlap(start
, end
, dev
->l2ad_hand
) ||
10691 l2arc_range_check_overlap(start
, end
, dev
->l2ad_evict
) ||
10692 l2arc_range_check_overlap(dev
->l2ad_hand
, dev
->l2ad_evict
, start
) ||
10693 l2arc_range_check_overlap(dev
->l2ad_hand
, dev
->l2ad_evict
, end
);
10695 return (start
>= dev
->l2ad_start
&& end
<= dev
->l2ad_end
&&
10696 asize
> 0 && asize
<= sizeof (l2arc_log_blk_phys_t
) &&
10697 (!evicted
|| dev
->l2ad_first
));
10701 * Inserts ARC buffer header `hdr' into the current L2ARC log block on
10702 * the device. The buffer being inserted must be present in L2ARC.
10703 * Returns B_TRUE if the L2ARC log block is full and needs to be committed
10704 * to L2ARC, or B_FALSE if it still has room for more ARC buffers.
10707 l2arc_log_blk_insert(l2arc_dev_t
*dev
, const arc_buf_hdr_t
*hdr
)
10709 l2arc_log_blk_phys_t
*lb
= &dev
->l2ad_log_blk
;
10710 l2arc_log_ent_phys_t
*le
;
10712 if (dev
->l2ad_log_entries
== 0)
10715 int index
= dev
->l2ad_log_ent_idx
++;
10717 ASSERT3S(index
, <, dev
->l2ad_log_entries
);
10718 ASSERT(HDR_HAS_L2HDR(hdr
));
10720 le
= &lb
->lb_entries
[index
];
10721 memset(le
, 0, sizeof (*le
));
10722 le
->le_dva
= hdr
->b_dva
;
10723 le
->le_birth
= hdr
->b_birth
;
10724 le
->le_daddr
= hdr
->b_l2hdr
.b_daddr
;
10726 dev
->l2ad_log_blk_payload_start
= le
->le_daddr
;
10727 L2BLK_SET_LSIZE((le
)->le_prop
, HDR_GET_LSIZE(hdr
));
10728 L2BLK_SET_PSIZE((le
)->le_prop
, HDR_GET_PSIZE(hdr
));
10729 L2BLK_SET_COMPRESS((le
)->le_prop
, HDR_GET_COMPRESS(hdr
));
10730 le
->le_complevel
= hdr
->b_complevel
;
10731 L2BLK_SET_TYPE((le
)->le_prop
, hdr
->b_type
);
10732 L2BLK_SET_PROTECTED((le
)->le_prop
, !!(HDR_PROTECTED(hdr
)));
10733 L2BLK_SET_PREFETCH((le
)->le_prop
, !!(HDR_PREFETCH(hdr
)));
10734 L2BLK_SET_STATE((le
)->le_prop
, hdr
->b_l1hdr
.b_state
->arcs_state
);
10736 dev
->l2ad_log_blk_payload_asize
+= vdev_psize_to_asize(dev
->l2ad_vdev
,
10737 HDR_GET_PSIZE(hdr
));
10739 return (dev
->l2ad_log_ent_idx
== dev
->l2ad_log_entries
);
10743 * Checks whether a given L2ARC device address sits in a time-sequential
10744 * range. The trick here is that the L2ARC is a rotary buffer, so we can't
10745 * just do a range comparison, we need to handle the situation in which the
10746 * range wraps around the end of the L2ARC device. Arguments:
10747 * bottom -- Lower end of the range to check (written to earlier).
10748 * top -- Upper end of the range to check (written to later).
10749 * check -- The address for which we want to determine if it sits in
10750 * between the top and bottom.
10752 * The 3-way conditional below represents the following cases:
10754 * bottom < top : Sequentially ordered case:
10755 * <check>--------+-------------------+
10756 * | (overlap here?) |
10758 * |---------------<bottom>============<top>--------------|
10760 * bottom > top: Looped-around case:
10761 * <check>--------+------------------+
10762 * | (overlap here?) |
10764 * |===============<top>---------------<bottom>===========|
10767 * +---------------+---------<check>
10769 * top == bottom : Just a single address comparison.
10772 l2arc_range_check_overlap(uint64_t bottom
, uint64_t top
, uint64_t check
)
10775 return (bottom
<= check
&& check
<= top
);
10776 else if (bottom
> top
)
10777 return (check
<= top
|| bottom
<= check
);
10779 return (check
== top
);
10782 EXPORT_SYMBOL(arc_buf_size
);
10783 EXPORT_SYMBOL(arc_write
);
10784 EXPORT_SYMBOL(arc_read
);
10785 EXPORT_SYMBOL(arc_buf_info
);
10786 EXPORT_SYMBOL(arc_getbuf_func
);
10787 EXPORT_SYMBOL(arc_add_prune_callback
);
10788 EXPORT_SYMBOL(arc_remove_prune_callback
);
10790 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, min
, param_set_arc_min
,
10791 spl_param_get_u64
, ZMOD_RW
, "Minimum ARC size in bytes");
10793 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, max
, param_set_arc_max
,
10794 spl_param_get_u64
, ZMOD_RW
, "Maximum ARC size in bytes");
10796 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, meta_balance
, UINT
, ZMOD_RW
,
10797 "Balance between metadata and data on ghost hits.");
10799 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, grow_retry
, param_set_arc_int
,
10800 param_get_uint
, ZMOD_RW
, "Seconds before growing ARC size");
10802 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, shrink_shift
, param_set_arc_int
,
10803 param_get_uint
, ZMOD_RW
, "log2(fraction of ARC to reclaim)");
10805 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, pc_percent
, UINT
, ZMOD_RW
,
10806 "Percent of pagecache to reclaim ARC to");
10808 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, average_blocksize
, UINT
, ZMOD_RD
,
10809 "Target average block size");
10811 ZFS_MODULE_PARAM(zfs
, zfs_
, compressed_arc_enabled
, INT
, ZMOD_RW
,
10812 "Disable compressed ARC buffers");
10814 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, min_prefetch_ms
, param_set_arc_int
,
10815 param_get_uint
, ZMOD_RW
, "Min life of prefetch block in ms");
10817 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, min_prescient_prefetch_ms
,
10818 param_set_arc_int
, param_get_uint
, ZMOD_RW
,
10819 "Min life of prescient prefetched block in ms");
10821 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, write_max
, U64
, ZMOD_RW
,
10822 "Max write bytes per interval");
10824 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, write_boost
, U64
, ZMOD_RW
,
10825 "Extra write bytes during device warmup");
10827 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, headroom
, U64
, ZMOD_RW
,
10828 "Number of max device writes to precache");
10830 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, headroom_boost
, U64
, ZMOD_RW
,
10831 "Compressed l2arc_headroom multiplier");
10833 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, trim_ahead
, U64
, ZMOD_RW
,
10834 "TRIM ahead L2ARC write size multiplier");
10836 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, feed_secs
, U64
, ZMOD_RW
,
10837 "Seconds between L2ARC writing");
10839 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, feed_min_ms
, U64
, ZMOD_RW
,
10840 "Min feed interval in milliseconds");
10842 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, noprefetch
, INT
, ZMOD_RW
,
10843 "Skip caching prefetched buffers");
10845 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, feed_again
, INT
, ZMOD_RW
,
10846 "Turbo L2ARC warmup");
10848 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, norw
, INT
, ZMOD_RW
,
10849 "No reads during writes");
10851 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, meta_percent
, UINT
, ZMOD_RW
,
10852 "Percent of ARC size allowed for L2ARC-only headers");
10854 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, rebuild_enabled
, INT
, ZMOD_RW
,
10855 "Rebuild the L2ARC when importing a pool");
10857 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, rebuild_blocks_min_l2size
, U64
, ZMOD_RW
,
10858 "Min size in bytes to write rebuild log blocks in L2ARC");
10860 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, mfuonly
, INT
, ZMOD_RW
,
10861 "Cache only MFU data from ARC into L2ARC");
10863 ZFS_MODULE_PARAM(zfs_l2arc
, l2arc_
, exclude_special
, INT
, ZMOD_RW
,
10864 "Exclude dbufs on special vdevs from being cached to L2ARC if set.");
10866 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, lotsfree_percent
, param_set_arc_int
,
10867 param_get_uint
, ZMOD_RW
, "System free memory I/O throttle in bytes");
10869 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, sys_free
, param_set_arc_u64
,
10870 spl_param_get_u64
, ZMOD_RW
, "System free memory target size in bytes");
10872 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, dnode_limit
, param_set_arc_u64
,
10873 spl_param_get_u64
, ZMOD_RW
, "Minimum bytes of dnodes in ARC");
10875 ZFS_MODULE_PARAM_CALL(zfs_arc
, zfs_arc_
, dnode_limit_percent
,
10876 param_set_arc_int
, param_get_uint
, ZMOD_RW
,
10877 "Percent of ARC meta buffers for dnodes");
10879 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, dnode_reduce_percent
, UINT
, ZMOD_RW
,
10880 "Percentage of excess dnodes to try to unpin");
10882 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, eviction_pct
, UINT
, ZMOD_RW
,
10883 "When full, ARC allocation waits for eviction of this % of alloc size");
10885 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, evict_batch_limit
, UINT
, ZMOD_RW
,
10886 "The number of headers to evict per sublist before moving to the next");
10888 ZFS_MODULE_PARAM(zfs_arc
, zfs_arc_
, prune_task_threads
, INT
, ZMOD_RW
,
10889 "Number of arc_prune threads");