]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
36da08ef | 23 | * Copyright (c) 2012, Joyent, Inc. All rights reserved. |
c30e58c4 | 24 | * Copyright (c) 2011, 2017 by Delphix. All rights reserved. |
36da08ef | 25 | * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. |
d3c2ae1c | 26 | * Copyright 2015 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | /* |
30 | * DVA-based Adjustable Replacement Cache | |
31 | * | |
32 | * While much of the theory of operation used here is | |
33 | * based on the self-tuning, low overhead replacement cache | |
34 | * presented by Megiddo and Modha at FAST 2003, there are some | |
35 | * significant differences: | |
36 | * | |
37 | * 1. The Megiddo and Modha model assumes any page is evictable. | |
38 | * Pages in its cache cannot be "locked" into memory. This makes | |
39 | * the eviction algorithm simple: evict the last page in the list. | |
40 | * This also make the performance characteristics easy to reason | |
41 | * about. Our cache is not so simple. At any given moment, some | |
42 | * subset of the blocks in the cache are un-evictable because we | |
43 | * have handed out a reference to them. Blocks are only evictable | |
44 | * when there are no external references active. This makes | |
45 | * eviction far more problematic: we choose to evict the evictable | |
46 | * blocks that are the "lowest" in the list. | |
47 | * | |
48 | * There are times when it is not possible to evict the requested | |
49 | * space. In these circumstances we are unable to adjust the cache | |
50 | * size. To prevent the cache growing unbounded at these times we | |
51 | * implement a "cache throttle" that slows the flow of new data | |
52 | * into the cache until we can make space available. | |
53 | * | |
54 | * 2. The Megiddo and Modha model assumes a fixed cache size. | |
55 | * Pages are evicted when the cache is full and there is a cache | |
56 | * miss. Our model has a variable sized cache. It grows with | |
57 | * high use, but also tries to react to memory pressure from the | |
58 | * operating system: decreasing its size when system memory is | |
59 | * tight. | |
60 | * | |
61 | * 3. The Megiddo and Modha model assumes a fixed page size. All | |
d3cc8b15 | 62 | * elements of the cache are therefore exactly the same size. So |
34dc7c2f BB |
63 | * when adjusting the cache size following a cache miss, its simply |
64 | * a matter of choosing a single page to evict. In our model, we | |
65 | * have variable sized cache blocks (rangeing from 512 bytes to | |
d3cc8b15 | 66 | * 128K bytes). We therefore choose a set of blocks to evict to make |
34dc7c2f BB |
67 | * space for a cache miss that approximates as closely as possible |
68 | * the space used by the new block. | |
69 | * | |
70 | * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" | |
71 | * by N. Megiddo & D. Modha, FAST 2003 | |
72 | */ | |
73 | ||
74 | /* | |
75 | * The locking model: | |
76 | * | |
77 | * A new reference to a cache buffer can be obtained in two | |
78 | * ways: 1) via a hash table lookup using the DVA as a key, | |
79 | * or 2) via one of the ARC lists. The arc_read() interface | |
2aa34383 | 80 | * uses method 1, while the internal ARC algorithms for |
d3cc8b15 | 81 | * adjusting the cache use method 2. We therefore provide two |
34dc7c2f | 82 | * types of locks: 1) the hash table lock array, and 2) the |
2aa34383 | 83 | * ARC list locks. |
34dc7c2f | 84 | * |
5c839890 BC |
85 | * Buffers do not have their own mutexes, rather they rely on the |
86 | * hash table mutexes for the bulk of their protection (i.e. most | |
87 | * fields in the arc_buf_hdr_t are protected by these mutexes). | |
34dc7c2f BB |
88 | * |
89 | * buf_hash_find() returns the appropriate mutex (held) when it | |
90 | * locates the requested buffer in the hash table. It returns | |
91 | * NULL for the mutex if the buffer was not in the table. | |
92 | * | |
93 | * buf_hash_remove() expects the appropriate hash mutex to be | |
94 | * already held before it is invoked. | |
95 | * | |
2aa34383 | 96 | * Each ARC state also has a mutex which is used to protect the |
34dc7c2f | 97 | * buffer list associated with the state. When attempting to |
2aa34383 | 98 | * obtain a hash table lock while holding an ARC list lock you |
34dc7c2f BB |
99 | * must use: mutex_tryenter() to avoid deadlock. Also note that |
100 | * the active state mutex must be held before the ghost state mutex. | |
101 | * | |
ab26409d BB |
102 | * It as also possible to register a callback which is run when the |
103 | * arc_meta_limit is reached and no buffers can be safely evicted. In | |
104 | * this case the arc user should drop a reference on some arc buffers so | |
105 | * they can be reclaimed and the arc_meta_limit honored. For example, | |
106 | * when using the ZPL each dentry holds a references on a znode. These | |
107 | * dentries must be pruned before the arc buffer holding the znode can | |
108 | * be safely evicted. | |
109 | * | |
34dc7c2f BB |
110 | * Note that the majority of the performance stats are manipulated |
111 | * with atomic operations. | |
112 | * | |
b9541d6b | 113 | * The L2ARC uses the l2ad_mtx on each vdev for the following: |
34dc7c2f BB |
114 | * |
115 | * - L2ARC buflist creation | |
116 | * - L2ARC buflist eviction | |
117 | * - L2ARC write completion, which walks L2ARC buflists | |
118 | * - ARC header destruction, as it removes from L2ARC buflists | |
119 | * - ARC header release, as it removes from L2ARC buflists | |
120 | */ | |
121 | ||
d3c2ae1c GW |
122 | /* |
123 | * ARC operation: | |
124 | * | |
125 | * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. | |
126 | * This structure can point either to a block that is still in the cache or to | |
127 | * one that is only accessible in an L2 ARC device, or it can provide | |
128 | * information about a block that was recently evicted. If a block is | |
129 | * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough | |
130 | * information to retrieve it from the L2ARC device. This information is | |
131 | * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block | |
132 | * that is in this state cannot access the data directly. | |
133 | * | |
134 | * Blocks that are actively being referenced or have not been evicted | |
135 | * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within | |
136 | * the arc_buf_hdr_t that will point to the data block in memory. A block can | |
137 | * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC | |
2aa34383 | 138 | * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and |
a6255b7f | 139 | * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd). |
2aa34383 DK |
140 | * |
141 | * The L1ARC's data pointer may or may not be uncompressed. The ARC has the | |
a6255b7f DQ |
142 | * ability to store the physical data (b_pabd) associated with the DVA of the |
143 | * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block, | |
2aa34383 DK |
144 | * it will match its on-disk compression characteristics. This behavior can be |
145 | * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the | |
a6255b7f | 146 | * compressed ARC functionality is disabled, the b_pabd will point to an |
2aa34383 DK |
147 | * uncompressed version of the on-disk data. |
148 | * | |
149 | * Data in the L1ARC is not accessed by consumers of the ARC directly. Each | |
150 | * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it. | |
151 | * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC | |
152 | * consumer. The ARC will provide references to this data and will keep it | |
153 | * cached until it is no longer in use. The ARC caches only the L1ARC's physical | |
154 | * data block and will evict any arc_buf_t that is no longer referenced. The | |
155 | * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the | |
d3c2ae1c GW |
156 | * "overhead_size" kstat. |
157 | * | |
2aa34383 DK |
158 | * Depending on the consumer, an arc_buf_t can be requested in uncompressed or |
159 | * compressed form. The typical case is that consumers will want uncompressed | |
160 | * data, and when that happens a new data buffer is allocated where the data is | |
161 | * decompressed for them to use. Currently the only consumer who wants | |
162 | * compressed arc_buf_t's is "zfs send", when it streams data exactly as it | |
163 | * exists on disk. When this happens, the arc_buf_t's data buffer is shared | |
164 | * with the arc_buf_hdr_t. | |
d3c2ae1c | 165 | * |
2aa34383 DK |
166 | * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The |
167 | * first one is owned by a compressed send consumer (and therefore references | |
168 | * the same compressed data buffer as the arc_buf_hdr_t) and the second could be | |
169 | * used by any other consumer (and has its own uncompressed copy of the data | |
170 | * buffer). | |
d3c2ae1c | 171 | * |
2aa34383 DK |
172 | * arc_buf_hdr_t |
173 | * +-----------+ | |
174 | * | fields | | |
175 | * | common to | | |
176 | * | L1- and | | |
177 | * | L2ARC | | |
178 | * +-----------+ | |
179 | * | l2arc_buf_hdr_t | |
180 | * | | | |
181 | * +-----------+ | |
182 | * | l1arc_buf_hdr_t | |
183 | * | | arc_buf_t | |
184 | * | b_buf +------------>+-----------+ arc_buf_t | |
a6255b7f | 185 | * | b_pabd +-+ |b_next +---->+-----------+ |
2aa34383 DK |
186 | * +-----------+ | |-----------| |b_next +-->NULL |
187 | * | |b_comp = T | +-----------+ | |
188 | * | |b_data +-+ |b_comp = F | | |
189 | * | +-----------+ | |b_data +-+ | |
190 | * +->+------+ | +-----------+ | | |
191 | * compressed | | | | | |
192 | * data | |<--------------+ | uncompressed | |
193 | * +------+ compressed, | data | |
194 | * shared +-->+------+ | |
195 | * data | | | |
196 | * | | | |
197 | * +------+ | |
d3c2ae1c GW |
198 | * |
199 | * When a consumer reads a block, the ARC must first look to see if the | |
2aa34383 DK |
200 | * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new |
201 | * arc_buf_t and either copies uncompressed data into a new data buffer from an | |
a6255b7f DQ |
202 | * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a |
203 | * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the | |
2aa34383 DK |
204 | * hdr is compressed and the desired compression characteristics of the |
205 | * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the | |
206 | * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be | |
207 | * the last buffer in the hdr's b_buf list, however a shared compressed buf can | |
208 | * be anywhere in the hdr's list. | |
d3c2ae1c GW |
209 | * |
210 | * The diagram below shows an example of an uncompressed ARC hdr that is | |
2aa34383 DK |
211 | * sharing its data with an arc_buf_t (note that the shared uncompressed buf is |
212 | * the last element in the buf list): | |
d3c2ae1c GW |
213 | * |
214 | * arc_buf_hdr_t | |
215 | * +-----------+ | |
216 | * | | | |
217 | * | | | |
218 | * | | | |
219 | * +-----------+ | |
220 | * l2arc_buf_hdr_t| | | |
221 | * | | | |
222 | * +-----------+ | |
223 | * l1arc_buf_hdr_t| | | |
224 | * | | arc_buf_t (shared) | |
225 | * | b_buf +------------>+---------+ arc_buf_t | |
226 | * | | |b_next +---->+---------+ | |
a6255b7f | 227 | * | b_pabd +-+ |---------| |b_next +-->NULL |
d3c2ae1c GW |
228 | * +-----------+ | | | +---------+ |
229 | * | |b_data +-+ | | | |
230 | * | +---------+ | |b_data +-+ | |
231 | * +->+------+ | +---------+ | | |
232 | * | | | | | |
233 | * uncompressed | | | | | |
234 | * data +------+ | | | |
235 | * ^ +->+------+ | | |
236 | * | uncompressed | | | | |
237 | * | data | | | | |
238 | * | +------+ | | |
239 | * +---------------------------------+ | |
240 | * | |
a6255b7f | 241 | * Writing to the ARC requires that the ARC first discard the hdr's b_pabd |
d3c2ae1c | 242 | * since the physical block is about to be rewritten. The new data contents |
2aa34383 DK |
243 | * will be contained in the arc_buf_t. As the I/O pipeline performs the write, |
244 | * it may compress the data before writing it to disk. The ARC will be called | |
245 | * with the transformed data and will bcopy the transformed on-disk block into | |
a6255b7f | 246 | * a newly allocated b_pabd. Writes are always done into buffers which have |
2aa34383 DK |
247 | * either been loaned (and hence are new and don't have other readers) or |
248 | * buffers which have been released (and hence have their own hdr, if there | |
249 | * were originally other readers of the buf's original hdr). This ensures that | |
250 | * the ARC only needs to update a single buf and its hdr after a write occurs. | |
d3c2ae1c | 251 | * |
a6255b7f DQ |
252 | * When the L2ARC is in use, it will also take advantage of the b_pabd. The |
253 | * L2ARC will always write the contents of b_pabd to the L2ARC. This means | |
2aa34383 | 254 | * that when compressed ARC is enabled that the L2ARC blocks are identical |
d3c2ae1c GW |
255 | * to the on-disk block in the main data pool. This provides a significant |
256 | * advantage since the ARC can leverage the bp's checksum when reading from the | |
257 | * L2ARC to determine if the contents are valid. However, if the compressed | |
2aa34383 | 258 | * ARC is disabled, then the L2ARC's block must be transformed to look |
d3c2ae1c GW |
259 | * like the physical block in the main data pool before comparing the |
260 | * checksum and determining its validity. | |
b5256303 TC |
261 | * |
262 | * The L1ARC has a slightly different system for storing encrypted data. | |
263 | * Raw (encrypted + possibly compressed) data has a few subtle differences from | |
264 | * data that is just compressed. The biggest difference is that it is not | |
265 | * possible to decrypt encrypted data (or visa versa) if the keys aren't loaded. | |
266 | * The other difference is that encryption cannot be treated as a suggestion. | |
267 | * If a caller would prefer compressed data, but they actually wind up with | |
268 | * uncompressed data the worst thing that could happen is there might be a | |
269 | * performance hit. If the caller requests encrypted data, however, we must be | |
270 | * sure they actually get it or else secret information could be leaked. Raw | |
271 | * data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore, | |
272 | * may have both an encrypted version and a decrypted version of its data at | |
273 | * once. When a caller needs a raw arc_buf_t, it is allocated and the data is | |
274 | * copied out of this header. To avoid complications with b_pabd, raw buffers | |
275 | * cannot be shared. | |
d3c2ae1c GW |
276 | */ |
277 | ||
34dc7c2f BB |
278 | #include <sys/spa.h> |
279 | #include <sys/zio.h> | |
d3c2ae1c | 280 | #include <sys/spa_impl.h> |
3a17a7a9 | 281 | #include <sys/zio_compress.h> |
d3c2ae1c | 282 | #include <sys/zio_checksum.h> |
34dc7c2f BB |
283 | #include <sys/zfs_context.h> |
284 | #include <sys/arc.h> | |
36da08ef | 285 | #include <sys/refcount.h> |
b128c09f | 286 | #include <sys/vdev.h> |
9babb374 | 287 | #include <sys/vdev_impl.h> |
e8b96c60 | 288 | #include <sys/dsl_pool.h> |
a6255b7f | 289 | #include <sys/zio_checksum.h> |
ca0bf58d | 290 | #include <sys/multilist.h> |
a6255b7f | 291 | #include <sys/abd.h> |
b5256303 TC |
292 | #include <sys/zil.h> |
293 | #include <sys/fm/fs/zfs.h> | |
34dc7c2f BB |
294 | #ifdef _KERNEL |
295 | #include <sys/vmsystm.h> | |
296 | #include <vm/anon.h> | |
297 | #include <sys/fs/swapnode.h> | |
ab26409d | 298 | #include <sys/zpl.h> |
aaed7c40 | 299 | #include <linux/mm_compat.h> |
34dc7c2f BB |
300 | #endif |
301 | #include <sys/callb.h> | |
302 | #include <sys/kstat.h> | |
570827e1 | 303 | #include <sys/dmu_tx.h> |
428870ff | 304 | #include <zfs_fletcher.h> |
59ec819a | 305 | #include <sys/arc_impl.h> |
49ee64e5 | 306 | #include <sys/trace_arc.h> |
34dc7c2f | 307 | |
498877ba MA |
308 | #ifndef _KERNEL |
309 | /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ | |
310 | boolean_t arc_watch = B_FALSE; | |
311 | #endif | |
312 | ||
ca0bf58d PS |
313 | static kmutex_t arc_reclaim_lock; |
314 | static kcondvar_t arc_reclaim_thread_cv; | |
315 | static boolean_t arc_reclaim_thread_exit; | |
316 | static kcondvar_t arc_reclaim_waiters_cv; | |
317 | ||
e8b96c60 | 318 | /* |
ca0bf58d PS |
319 | * The number of headers to evict in arc_evict_state_impl() before |
320 | * dropping the sublist lock and evicting from another sublist. A lower | |
321 | * value means we're more likely to evict the "correct" header (i.e. the | |
322 | * oldest header in the arc state), but comes with higher overhead | |
323 | * (i.e. more invocations of arc_evict_state_impl()). | |
324 | */ | |
325 | int zfs_arc_evict_batch_limit = 10; | |
326 | ||
34dc7c2f | 327 | /* number of seconds before growing cache again */ |
ca67b33a | 328 | static int arc_grow_retry = 5; |
34dc7c2f | 329 | |
a6255b7f | 330 | /* shift of arc_c for calculating overflow limit in arc_get_data_impl */ |
ca67b33a | 331 | int zfs_arc_overflow_shift = 8; |
62422785 | 332 | |
728d6ae9 BB |
333 | /* shift of arc_c for calculating both min and max arc_p */ |
334 | static int arc_p_min_shift = 4; | |
335 | ||
d164b209 | 336 | /* log2(fraction of arc to reclaim) */ |
ca67b33a | 337 | static int arc_shrink_shift = 7; |
d164b209 | 338 | |
03b60eee DB |
339 | /* percent of pagecache to reclaim arc to */ |
340 | #ifdef _KERNEL | |
341 | static uint_t zfs_arc_pc_percent = 0; | |
342 | #endif | |
343 | ||
34dc7c2f | 344 | /* |
ca67b33a MA |
345 | * log2(fraction of ARC which must be free to allow growing). |
346 | * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, | |
347 | * when reading a new block into the ARC, we will evict an equal-sized block | |
348 | * from the ARC. | |
349 | * | |
350 | * This must be less than arc_shrink_shift, so that when we shrink the ARC, | |
351 | * we will still not allow it to grow. | |
34dc7c2f | 352 | */ |
ca67b33a | 353 | int arc_no_grow_shift = 5; |
bce45ec9 | 354 | |
49ddb315 | 355 | |
ca0bf58d PS |
356 | /* |
357 | * minimum lifespan of a prefetch block in clock ticks | |
358 | * (initialized in arc_init()) | |
359 | */ | |
ca67b33a | 360 | static int arc_min_prefetch_lifespan; |
ca0bf58d | 361 | |
e8b96c60 MA |
362 | /* |
363 | * If this percent of memory is free, don't throttle. | |
364 | */ | |
365 | int arc_lotsfree_percent = 10; | |
366 | ||
34dc7c2f BB |
367 | static int arc_dead; |
368 | ||
b128c09f BB |
369 | /* |
370 | * The arc has filled available memory and has now warmed up. | |
371 | */ | |
372 | static boolean_t arc_warm; | |
373 | ||
d3c2ae1c GW |
374 | /* |
375 | * log2 fraction of the zio arena to keep free. | |
376 | */ | |
377 | int arc_zio_arena_free_shift = 2; | |
378 | ||
34dc7c2f BB |
379 | /* |
380 | * These tunables are for performance analysis. | |
381 | */ | |
c28b2279 BB |
382 | unsigned long zfs_arc_max = 0; |
383 | unsigned long zfs_arc_min = 0; | |
384 | unsigned long zfs_arc_meta_limit = 0; | |
ca0bf58d | 385 | unsigned long zfs_arc_meta_min = 0; |
25458cbe TC |
386 | unsigned long zfs_arc_dnode_limit = 0; |
387 | unsigned long zfs_arc_dnode_reduce_percent = 10; | |
ca67b33a MA |
388 | int zfs_arc_grow_retry = 0; |
389 | int zfs_arc_shrink_shift = 0; | |
728d6ae9 | 390 | int zfs_arc_p_min_shift = 0; |
ca67b33a | 391 | int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ |
34dc7c2f | 392 | |
d3c2ae1c GW |
393 | int zfs_compressed_arc_enabled = B_TRUE; |
394 | ||
9907cc1c G |
395 | /* |
396 | * ARC will evict meta buffers that exceed arc_meta_limit. This | |
397 | * tunable make arc_meta_limit adjustable for different workloads. | |
398 | */ | |
399 | unsigned long zfs_arc_meta_limit_percent = 75; | |
400 | ||
401 | /* | |
402 | * Percentage that can be consumed by dnodes of ARC meta buffers. | |
403 | */ | |
404 | unsigned long zfs_arc_dnode_limit_percent = 10; | |
405 | ||
bc888666 | 406 | /* |
ca67b33a | 407 | * These tunables are Linux specific |
bc888666 | 408 | */ |
11f552fa | 409 | unsigned long zfs_arc_sys_free = 0; |
ca67b33a MA |
410 | int zfs_arc_min_prefetch_lifespan = 0; |
411 | int zfs_arc_p_aggressive_disable = 1; | |
412 | int zfs_arc_p_dampener_disable = 1; | |
413 | int zfs_arc_meta_prune = 10000; | |
414 | int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; | |
415 | int zfs_arc_meta_adjust_restarts = 4096; | |
7e8bddd0 | 416 | int zfs_arc_lotsfree_percent = 10; |
bc888666 | 417 | |
34dc7c2f BB |
418 | /* The 6 states: */ |
419 | static arc_state_t ARC_anon; | |
420 | static arc_state_t ARC_mru; | |
421 | static arc_state_t ARC_mru_ghost; | |
422 | static arc_state_t ARC_mfu; | |
423 | static arc_state_t ARC_mfu_ghost; | |
424 | static arc_state_t ARC_l2c_only; | |
425 | ||
426 | typedef struct arc_stats { | |
427 | kstat_named_t arcstat_hits; | |
428 | kstat_named_t arcstat_misses; | |
429 | kstat_named_t arcstat_demand_data_hits; | |
430 | kstat_named_t arcstat_demand_data_misses; | |
431 | kstat_named_t arcstat_demand_metadata_hits; | |
432 | kstat_named_t arcstat_demand_metadata_misses; | |
433 | kstat_named_t arcstat_prefetch_data_hits; | |
434 | kstat_named_t arcstat_prefetch_data_misses; | |
435 | kstat_named_t arcstat_prefetch_metadata_hits; | |
436 | kstat_named_t arcstat_prefetch_metadata_misses; | |
437 | kstat_named_t arcstat_mru_hits; | |
438 | kstat_named_t arcstat_mru_ghost_hits; | |
439 | kstat_named_t arcstat_mfu_hits; | |
440 | kstat_named_t arcstat_mfu_ghost_hits; | |
441 | kstat_named_t arcstat_deleted; | |
e49f1e20 WA |
442 | /* |
443 | * Number of buffers that could not be evicted because the hash lock | |
444 | * was held by another thread. The lock may not necessarily be held | |
445 | * by something using the same buffer, since hash locks are shared | |
446 | * by multiple buffers. | |
447 | */ | |
34dc7c2f | 448 | kstat_named_t arcstat_mutex_miss; |
e49f1e20 WA |
449 | /* |
450 | * Number of buffers skipped because they have I/O in progress, are | |
451 | * indrect prefetch buffers that have not lived long enough, or are | |
452 | * not from the spa we're trying to evict from. | |
453 | */ | |
34dc7c2f | 454 | kstat_named_t arcstat_evict_skip; |
ca0bf58d PS |
455 | /* |
456 | * Number of times arc_evict_state() was unable to evict enough | |
457 | * buffers to reach its target amount. | |
458 | */ | |
459 | kstat_named_t arcstat_evict_not_enough; | |
428870ff BB |
460 | kstat_named_t arcstat_evict_l2_cached; |
461 | kstat_named_t arcstat_evict_l2_eligible; | |
462 | kstat_named_t arcstat_evict_l2_ineligible; | |
ca0bf58d | 463 | kstat_named_t arcstat_evict_l2_skip; |
34dc7c2f BB |
464 | kstat_named_t arcstat_hash_elements; |
465 | kstat_named_t arcstat_hash_elements_max; | |
466 | kstat_named_t arcstat_hash_collisions; | |
467 | kstat_named_t arcstat_hash_chains; | |
468 | kstat_named_t arcstat_hash_chain_max; | |
469 | kstat_named_t arcstat_p; | |
470 | kstat_named_t arcstat_c; | |
471 | kstat_named_t arcstat_c_min; | |
472 | kstat_named_t arcstat_c_max; | |
473 | kstat_named_t arcstat_size; | |
d3c2ae1c | 474 | /* |
a6255b7f | 475 | * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd. |
d3c2ae1c GW |
476 | * Note that the compressed bytes may match the uncompressed bytes |
477 | * if the block is either not compressed or compressed arc is disabled. | |
478 | */ | |
479 | kstat_named_t arcstat_compressed_size; | |
480 | /* | |
a6255b7f | 481 | * Uncompressed size of the data stored in b_pabd. If compressed |
d3c2ae1c GW |
482 | * arc is disabled then this value will be identical to the stat |
483 | * above. | |
484 | */ | |
485 | kstat_named_t arcstat_uncompressed_size; | |
486 | /* | |
487 | * Number of bytes stored in all the arc_buf_t's. This is classified | |
488 | * as "overhead" since this data is typically short-lived and will | |
489 | * be evicted from the arc when it becomes unreferenced unless the | |
490 | * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level | |
491 | * values have been set (see comment in dbuf.c for more information). | |
492 | */ | |
493 | kstat_named_t arcstat_overhead_size; | |
500445c0 PS |
494 | /* |
495 | * Number of bytes consumed by internal ARC structures necessary | |
496 | * for tracking purposes; these structures are not actually | |
497 | * backed by ARC buffers. This includes arc_buf_hdr_t structures | |
498 | * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only | |
499 | * caches), and arc_buf_t structures (allocated via arc_buf_t | |
500 | * cache). | |
501 | */ | |
34dc7c2f | 502 | kstat_named_t arcstat_hdr_size; |
500445c0 PS |
503 | /* |
504 | * Number of bytes consumed by ARC buffers of type equal to | |
505 | * ARC_BUFC_DATA. This is generally consumed by buffers backing | |
506 | * on disk user data (e.g. plain file contents). | |
507 | */ | |
d164b209 | 508 | kstat_named_t arcstat_data_size; |
500445c0 PS |
509 | /* |
510 | * Number of bytes consumed by ARC buffers of type equal to | |
511 | * ARC_BUFC_METADATA. This is generally consumed by buffers | |
512 | * backing on disk data that is used for internal ZFS | |
513 | * structures (e.g. ZAP, dnode, indirect blocks, etc). | |
514 | */ | |
515 | kstat_named_t arcstat_metadata_size; | |
516 | /* | |
25458cbe | 517 | * Number of bytes consumed by dmu_buf_impl_t objects. |
500445c0 | 518 | */ |
25458cbe TC |
519 | kstat_named_t arcstat_dbuf_size; |
520 | /* | |
521 | * Number of bytes consumed by dnode_t objects. | |
522 | */ | |
523 | kstat_named_t arcstat_dnode_size; | |
524 | /* | |
525 | * Number of bytes consumed by bonus buffers. | |
526 | */ | |
527 | kstat_named_t arcstat_bonus_size; | |
500445c0 PS |
528 | /* |
529 | * Total number of bytes consumed by ARC buffers residing in the | |
530 | * arc_anon state. This includes *all* buffers in the arc_anon | |
531 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
532 | * are all included in this value. | |
533 | */ | |
13be560d | 534 | kstat_named_t arcstat_anon_size; |
500445c0 PS |
535 | /* |
536 | * Number of bytes consumed by ARC buffers that meet the | |
537 | * following criteria: backing buffers of type ARC_BUFC_DATA, | |
538 | * residing in the arc_anon state, and are eligible for eviction | |
539 | * (e.g. have no outstanding holds on the buffer). | |
540 | */ | |
541 | kstat_named_t arcstat_anon_evictable_data; | |
542 | /* | |
543 | * Number of bytes consumed by ARC buffers that meet the | |
544 | * following criteria: backing buffers of type ARC_BUFC_METADATA, | |
545 | * residing in the arc_anon state, and are eligible for eviction | |
546 | * (e.g. have no outstanding holds on the buffer). | |
547 | */ | |
548 | kstat_named_t arcstat_anon_evictable_metadata; | |
549 | /* | |
550 | * Total number of bytes consumed by ARC buffers residing in the | |
551 | * arc_mru state. This includes *all* buffers in the arc_mru | |
552 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
553 | * are all included in this value. | |
554 | */ | |
13be560d | 555 | kstat_named_t arcstat_mru_size; |
500445c0 PS |
556 | /* |
557 | * Number of bytes consumed by ARC buffers that meet the | |
558 | * following criteria: backing buffers of type ARC_BUFC_DATA, | |
559 | * residing in the arc_mru state, and are eligible for eviction | |
560 | * (e.g. have no outstanding holds on the buffer). | |
561 | */ | |
562 | kstat_named_t arcstat_mru_evictable_data; | |
563 | /* | |
564 | * Number of bytes consumed by ARC buffers that meet the | |
565 | * following criteria: backing buffers of type ARC_BUFC_METADATA, | |
566 | * residing in the arc_mru state, and are eligible for eviction | |
567 | * (e.g. have no outstanding holds on the buffer). | |
568 | */ | |
569 | kstat_named_t arcstat_mru_evictable_metadata; | |
570 | /* | |
571 | * Total number of bytes that *would have been* consumed by ARC | |
572 | * buffers in the arc_mru_ghost state. The key thing to note | |
573 | * here, is the fact that this size doesn't actually indicate | |
574 | * RAM consumption. The ghost lists only consist of headers and | |
575 | * don't actually have ARC buffers linked off of these headers. | |
576 | * Thus, *if* the headers had associated ARC buffers, these | |
577 | * buffers *would have* consumed this number of bytes. | |
578 | */ | |
13be560d | 579 | kstat_named_t arcstat_mru_ghost_size; |
500445c0 PS |
580 | /* |
581 | * Number of bytes that *would have been* consumed by ARC | |
582 | * buffers that are eligible for eviction, of type | |
583 | * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. | |
584 | */ | |
585 | kstat_named_t arcstat_mru_ghost_evictable_data; | |
586 | /* | |
587 | * Number of bytes that *would have been* consumed by ARC | |
588 | * buffers that are eligible for eviction, of type | |
589 | * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. | |
590 | */ | |
591 | kstat_named_t arcstat_mru_ghost_evictable_metadata; | |
592 | /* | |
593 | * Total number of bytes consumed by ARC buffers residing in the | |
594 | * arc_mfu state. This includes *all* buffers in the arc_mfu | |
595 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
596 | * are all included in this value. | |
597 | */ | |
13be560d | 598 | kstat_named_t arcstat_mfu_size; |
500445c0 PS |
599 | /* |
600 | * Number of bytes consumed by ARC buffers that are eligible for | |
601 | * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu | |
602 | * state. | |
603 | */ | |
604 | kstat_named_t arcstat_mfu_evictable_data; | |
605 | /* | |
606 | * Number of bytes consumed by ARC buffers that are eligible for | |
607 | * eviction, of type ARC_BUFC_METADATA, and reside in the | |
608 | * arc_mfu state. | |
609 | */ | |
610 | kstat_named_t arcstat_mfu_evictable_metadata; | |
611 | /* | |
612 | * Total number of bytes that *would have been* consumed by ARC | |
613 | * buffers in the arc_mfu_ghost state. See the comment above | |
614 | * arcstat_mru_ghost_size for more details. | |
615 | */ | |
13be560d | 616 | kstat_named_t arcstat_mfu_ghost_size; |
500445c0 PS |
617 | /* |
618 | * Number of bytes that *would have been* consumed by ARC | |
619 | * buffers that are eligible for eviction, of type | |
620 | * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. | |
621 | */ | |
622 | kstat_named_t arcstat_mfu_ghost_evictable_data; | |
623 | /* | |
624 | * Number of bytes that *would have been* consumed by ARC | |
625 | * buffers that are eligible for eviction, of type | |
626 | * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. | |
627 | */ | |
628 | kstat_named_t arcstat_mfu_ghost_evictable_metadata; | |
34dc7c2f BB |
629 | kstat_named_t arcstat_l2_hits; |
630 | kstat_named_t arcstat_l2_misses; | |
631 | kstat_named_t arcstat_l2_feeds; | |
632 | kstat_named_t arcstat_l2_rw_clash; | |
d164b209 BB |
633 | kstat_named_t arcstat_l2_read_bytes; |
634 | kstat_named_t arcstat_l2_write_bytes; | |
34dc7c2f BB |
635 | kstat_named_t arcstat_l2_writes_sent; |
636 | kstat_named_t arcstat_l2_writes_done; | |
637 | kstat_named_t arcstat_l2_writes_error; | |
ca0bf58d | 638 | kstat_named_t arcstat_l2_writes_lock_retry; |
34dc7c2f BB |
639 | kstat_named_t arcstat_l2_evict_lock_retry; |
640 | kstat_named_t arcstat_l2_evict_reading; | |
b9541d6b | 641 | kstat_named_t arcstat_l2_evict_l1cached; |
34dc7c2f BB |
642 | kstat_named_t arcstat_l2_free_on_write; |
643 | kstat_named_t arcstat_l2_abort_lowmem; | |
644 | kstat_named_t arcstat_l2_cksum_bad; | |
645 | kstat_named_t arcstat_l2_io_error; | |
01850391 AG |
646 | kstat_named_t arcstat_l2_lsize; |
647 | kstat_named_t arcstat_l2_psize; | |
34dc7c2f BB |
648 | kstat_named_t arcstat_l2_hdr_size; |
649 | kstat_named_t arcstat_memory_throttle_count; | |
7cb67b45 BB |
650 | kstat_named_t arcstat_memory_direct_count; |
651 | kstat_named_t arcstat_memory_indirect_count; | |
70f02287 BB |
652 | kstat_named_t arcstat_memory_all_bytes; |
653 | kstat_named_t arcstat_memory_free_bytes; | |
654 | kstat_named_t arcstat_memory_available_bytes; | |
1834f2d8 BB |
655 | kstat_named_t arcstat_no_grow; |
656 | kstat_named_t arcstat_tempreserve; | |
657 | kstat_named_t arcstat_loaned_bytes; | |
ab26409d | 658 | kstat_named_t arcstat_prune; |
1834f2d8 BB |
659 | kstat_named_t arcstat_meta_used; |
660 | kstat_named_t arcstat_meta_limit; | |
25458cbe | 661 | kstat_named_t arcstat_dnode_limit; |
1834f2d8 | 662 | kstat_named_t arcstat_meta_max; |
ca0bf58d | 663 | kstat_named_t arcstat_meta_min; |
7f60329a MA |
664 | kstat_named_t arcstat_sync_wait_for_async; |
665 | kstat_named_t arcstat_demand_hit_predictive_prefetch; | |
11f552fa BB |
666 | kstat_named_t arcstat_need_free; |
667 | kstat_named_t arcstat_sys_free; | |
b5256303 | 668 | kstat_named_t arcstat_raw_size; |
34dc7c2f BB |
669 | } arc_stats_t; |
670 | ||
671 | static arc_stats_t arc_stats = { | |
672 | { "hits", KSTAT_DATA_UINT64 }, | |
673 | { "misses", KSTAT_DATA_UINT64 }, | |
674 | { "demand_data_hits", KSTAT_DATA_UINT64 }, | |
675 | { "demand_data_misses", KSTAT_DATA_UINT64 }, | |
676 | { "demand_metadata_hits", KSTAT_DATA_UINT64 }, | |
677 | { "demand_metadata_misses", KSTAT_DATA_UINT64 }, | |
678 | { "prefetch_data_hits", KSTAT_DATA_UINT64 }, | |
679 | { "prefetch_data_misses", KSTAT_DATA_UINT64 }, | |
680 | { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, | |
681 | { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, | |
682 | { "mru_hits", KSTAT_DATA_UINT64 }, | |
683 | { "mru_ghost_hits", KSTAT_DATA_UINT64 }, | |
684 | { "mfu_hits", KSTAT_DATA_UINT64 }, | |
685 | { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, | |
686 | { "deleted", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
687 | { "mutex_miss", KSTAT_DATA_UINT64 }, |
688 | { "evict_skip", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 689 | { "evict_not_enough", KSTAT_DATA_UINT64 }, |
428870ff BB |
690 | { "evict_l2_cached", KSTAT_DATA_UINT64 }, |
691 | { "evict_l2_eligible", KSTAT_DATA_UINT64 }, | |
692 | { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 693 | { "evict_l2_skip", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
694 | { "hash_elements", KSTAT_DATA_UINT64 }, |
695 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
696 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
697 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
698 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
699 | { "p", KSTAT_DATA_UINT64 }, | |
700 | { "c", KSTAT_DATA_UINT64 }, | |
701 | { "c_min", KSTAT_DATA_UINT64 }, | |
702 | { "c_max", KSTAT_DATA_UINT64 }, | |
703 | { "size", KSTAT_DATA_UINT64 }, | |
d3c2ae1c GW |
704 | { "compressed_size", KSTAT_DATA_UINT64 }, |
705 | { "uncompressed_size", KSTAT_DATA_UINT64 }, | |
706 | { "overhead_size", KSTAT_DATA_UINT64 }, | |
34dc7c2f | 707 | { "hdr_size", KSTAT_DATA_UINT64 }, |
d164b209 | 708 | { "data_size", KSTAT_DATA_UINT64 }, |
500445c0 | 709 | { "metadata_size", KSTAT_DATA_UINT64 }, |
25458cbe TC |
710 | { "dbuf_size", KSTAT_DATA_UINT64 }, |
711 | { "dnode_size", KSTAT_DATA_UINT64 }, | |
712 | { "bonus_size", KSTAT_DATA_UINT64 }, | |
13be560d | 713 | { "anon_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
714 | { "anon_evictable_data", KSTAT_DATA_UINT64 }, |
715 | { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 716 | { "mru_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
717 | { "mru_evictable_data", KSTAT_DATA_UINT64 }, |
718 | { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 719 | { "mru_ghost_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
720 | { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, |
721 | { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 722 | { "mfu_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
723 | { "mfu_evictable_data", KSTAT_DATA_UINT64 }, |
724 | { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 725 | { "mfu_ghost_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
726 | { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, |
727 | { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
728 | { "l2_hits", KSTAT_DATA_UINT64 }, |
729 | { "l2_misses", KSTAT_DATA_UINT64 }, | |
730 | { "l2_feeds", KSTAT_DATA_UINT64 }, | |
731 | { "l2_rw_clash", KSTAT_DATA_UINT64 }, | |
d164b209 BB |
732 | { "l2_read_bytes", KSTAT_DATA_UINT64 }, |
733 | { "l2_write_bytes", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
734 | { "l2_writes_sent", KSTAT_DATA_UINT64 }, |
735 | { "l2_writes_done", KSTAT_DATA_UINT64 }, | |
736 | { "l2_writes_error", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 737 | { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
738 | { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, |
739 | { "l2_evict_reading", KSTAT_DATA_UINT64 }, | |
b9541d6b | 740 | { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
741 | { "l2_free_on_write", KSTAT_DATA_UINT64 }, |
742 | { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, | |
743 | { "l2_cksum_bad", KSTAT_DATA_UINT64 }, | |
744 | { "l2_io_error", KSTAT_DATA_UINT64 }, | |
745 | { "l2_size", KSTAT_DATA_UINT64 }, | |
3a17a7a9 | 746 | { "l2_asize", KSTAT_DATA_UINT64 }, |
34dc7c2f | 747 | { "l2_hdr_size", KSTAT_DATA_UINT64 }, |
1834f2d8 | 748 | { "memory_throttle_count", KSTAT_DATA_UINT64 }, |
7cb67b45 BB |
749 | { "memory_direct_count", KSTAT_DATA_UINT64 }, |
750 | { "memory_indirect_count", KSTAT_DATA_UINT64 }, | |
70f02287 BB |
751 | { "memory_all_bytes", KSTAT_DATA_UINT64 }, |
752 | { "memory_free_bytes", KSTAT_DATA_UINT64 }, | |
753 | { "memory_available_bytes", KSTAT_DATA_INT64 }, | |
1834f2d8 BB |
754 | { "arc_no_grow", KSTAT_DATA_UINT64 }, |
755 | { "arc_tempreserve", KSTAT_DATA_UINT64 }, | |
756 | { "arc_loaned_bytes", KSTAT_DATA_UINT64 }, | |
ab26409d | 757 | { "arc_prune", KSTAT_DATA_UINT64 }, |
1834f2d8 BB |
758 | { "arc_meta_used", KSTAT_DATA_UINT64 }, |
759 | { "arc_meta_limit", KSTAT_DATA_UINT64 }, | |
25458cbe | 760 | { "arc_dnode_limit", KSTAT_DATA_UINT64 }, |
1834f2d8 | 761 | { "arc_meta_max", KSTAT_DATA_UINT64 }, |
11f552fa | 762 | { "arc_meta_min", KSTAT_DATA_UINT64 }, |
7f60329a MA |
763 | { "sync_wait_for_async", KSTAT_DATA_UINT64 }, |
764 | { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, | |
11f552fa | 765 | { "arc_need_free", KSTAT_DATA_UINT64 }, |
b5256303 TC |
766 | { "arc_sys_free", KSTAT_DATA_UINT64 }, |
767 | { "arc_raw_size", KSTAT_DATA_UINT64 } | |
34dc7c2f BB |
768 | }; |
769 | ||
770 | #define ARCSTAT(stat) (arc_stats.stat.value.ui64) | |
771 | ||
772 | #define ARCSTAT_INCR(stat, val) \ | |
d3cc8b15 | 773 | atomic_add_64(&arc_stats.stat.value.ui64, (val)) |
34dc7c2f | 774 | |
428870ff | 775 | #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) |
34dc7c2f BB |
776 | #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) |
777 | ||
778 | #define ARCSTAT_MAX(stat, val) { \ | |
779 | uint64_t m; \ | |
780 | while ((val) > (m = arc_stats.stat.value.ui64) && \ | |
781 | (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ | |
782 | continue; \ | |
783 | } | |
784 | ||
785 | #define ARCSTAT_MAXSTAT(stat) \ | |
786 | ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) | |
787 | ||
788 | /* | |
789 | * We define a macro to allow ARC hits/misses to be easily broken down by | |
790 | * two separate conditions, giving a total of four different subtypes for | |
791 | * each of hits and misses (so eight statistics total). | |
792 | */ | |
793 | #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ | |
794 | if (cond1) { \ | |
795 | if (cond2) { \ | |
796 | ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ | |
797 | } else { \ | |
798 | ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ | |
799 | } \ | |
800 | } else { \ | |
801 | if (cond2) { \ | |
802 | ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ | |
803 | } else { \ | |
804 | ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ | |
805 | } \ | |
806 | } | |
807 | ||
808 | kstat_t *arc_ksp; | |
428870ff | 809 | static arc_state_t *arc_anon; |
34dc7c2f BB |
810 | static arc_state_t *arc_mru; |
811 | static arc_state_t *arc_mru_ghost; | |
812 | static arc_state_t *arc_mfu; | |
813 | static arc_state_t *arc_mfu_ghost; | |
814 | static arc_state_t *arc_l2c_only; | |
815 | ||
816 | /* | |
817 | * There are several ARC variables that are critical to export as kstats -- | |
818 | * but we don't want to have to grovel around in the kstat whenever we wish to | |
819 | * manipulate them. For these variables, we therefore define them to be in | |
820 | * terms of the statistic variable. This assures that we are not introducing | |
821 | * the possibility of inconsistency by having shadow copies of the variables, | |
822 | * while still allowing the code to be readable. | |
823 | */ | |
824 | #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ | |
825 | #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ | |
826 | #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ | |
827 | #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ | |
828 | #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ | |
d3c2ae1c | 829 | #define arc_no_grow ARCSTAT(arcstat_no_grow) /* do not grow cache size */ |
1834f2d8 BB |
830 | #define arc_tempreserve ARCSTAT(arcstat_tempreserve) |
831 | #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes) | |
23c0a133 | 832 | #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ |
25458cbe | 833 | #define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */ |
ca0bf58d | 834 | #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ |
23c0a133 GW |
835 | #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ |
836 | #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ | |
25458cbe TC |
837 | #define arc_dbuf_size ARCSTAT(arcstat_dbuf_size) /* dbuf metadata */ |
838 | #define arc_dnode_size ARCSTAT(arcstat_dnode_size) /* dnode metadata */ | |
839 | #define arc_bonus_size ARCSTAT(arcstat_bonus_size) /* bonus buffer metadata */ | |
11f552fa BB |
840 | #define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */ |
841 | #define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */ | |
34dc7c2f | 842 | |
b5256303 TC |
843 | /* size of all b_rabd's in entire arc */ |
844 | #define arc_raw_size ARCSTAT(arcstat_raw_size) | |
d3c2ae1c GW |
845 | /* compressed size of entire arc */ |
846 | #define arc_compressed_size ARCSTAT(arcstat_compressed_size) | |
847 | /* uncompressed size of entire arc */ | |
848 | #define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size) | |
849 | /* number of bytes in the arc from arc_buf_t's */ | |
850 | #define arc_overhead_size ARCSTAT(arcstat_overhead_size) | |
3a17a7a9 | 851 | |
ab26409d BB |
852 | static list_t arc_prune_list; |
853 | static kmutex_t arc_prune_mtx; | |
f6046738 | 854 | static taskq_t *arc_prune_taskq; |
428870ff | 855 | |
34dc7c2f BB |
856 | #define GHOST_STATE(state) \ |
857 | ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ | |
858 | (state) == arc_l2c_only) | |
859 | ||
2a432414 GW |
860 | #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) |
861 | #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) | |
862 | #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) | |
863 | #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) | |
d3c2ae1c GW |
864 | #define HDR_COMPRESSION_ENABLED(hdr) \ |
865 | ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) | |
b9541d6b | 866 | |
2a432414 GW |
867 | #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) |
868 | #define HDR_L2_READING(hdr) \ | |
d3c2ae1c GW |
869 | (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ |
870 | ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) | |
2a432414 GW |
871 | #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) |
872 | #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) | |
873 | #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) | |
b5256303 TC |
874 | #define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED) |
875 | #define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH) | |
d3c2ae1c | 876 | #define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) |
34dc7c2f | 877 | |
b9541d6b | 878 | #define HDR_ISTYPE_METADATA(hdr) \ |
d3c2ae1c | 879 | ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) |
b9541d6b CW |
880 | #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) |
881 | ||
882 | #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) | |
883 | #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) | |
b5256303 TC |
884 | #define HDR_HAS_RABD(hdr) \ |
885 | (HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \ | |
886 | (hdr)->b_crypt_hdr.b_rabd != NULL) | |
887 | #define HDR_ENCRYPTED(hdr) \ | |
888 | (HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) | |
889 | #define HDR_AUTHENTICATED(hdr) \ | |
890 | (HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot)) | |
b9541d6b | 891 | |
d3c2ae1c GW |
892 | /* For storing compression mode in b_flags */ |
893 | #define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) | |
894 | ||
895 | #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ | |
896 | HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) | |
897 | #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ | |
898 | HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); | |
899 | ||
900 | #define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) | |
524b4217 DK |
901 | #define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED) |
902 | #define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED) | |
b5256303 | 903 | #define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED) |
d3c2ae1c | 904 | |
34dc7c2f BB |
905 | /* |
906 | * Other sizes | |
907 | */ | |
908 | ||
b5256303 TC |
909 | #define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) |
910 | #define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr)) | |
b9541d6b | 911 | #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) |
34dc7c2f BB |
912 | |
913 | /* | |
914 | * Hash table routines | |
915 | */ | |
916 | ||
00b46022 BB |
917 | #define HT_LOCK_ALIGN 64 |
918 | #define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN))) | |
34dc7c2f BB |
919 | |
920 | struct ht_lock { | |
921 | kmutex_t ht_lock; | |
922 | #ifdef _KERNEL | |
00b46022 | 923 | unsigned char pad[HT_LOCK_PAD]; |
34dc7c2f BB |
924 | #endif |
925 | }; | |
926 | ||
b31d8ea7 | 927 | #define BUF_LOCKS 8192 |
34dc7c2f BB |
928 | typedef struct buf_hash_table { |
929 | uint64_t ht_mask; | |
930 | arc_buf_hdr_t **ht_table; | |
931 | struct ht_lock ht_locks[BUF_LOCKS]; | |
932 | } buf_hash_table_t; | |
933 | ||
934 | static buf_hash_table_t buf_hash_table; | |
935 | ||
936 | #define BUF_HASH_INDEX(spa, dva, birth) \ | |
937 | (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) | |
938 | #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) | |
939 | #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) | |
428870ff BB |
940 | #define HDR_LOCK(hdr) \ |
941 | (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) | |
34dc7c2f BB |
942 | |
943 | uint64_t zfs_crc64_table[256]; | |
944 | ||
945 | /* | |
946 | * Level 2 ARC | |
947 | */ | |
948 | ||
949 | #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ | |
3a17a7a9 | 950 | #define L2ARC_HEADROOM 2 /* num of writes */ |
8a09d5fd | 951 | |
3a17a7a9 SK |
952 | /* |
953 | * If we discover during ARC scan any buffers to be compressed, we boost | |
954 | * our headroom for the next scanning cycle by this percentage multiple. | |
955 | */ | |
956 | #define L2ARC_HEADROOM_BOOST 200 | |
d164b209 BB |
957 | #define L2ARC_FEED_SECS 1 /* caching interval secs */ |
958 | #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ | |
34dc7c2f | 959 | |
4aafab91 G |
960 | /* |
961 | * We can feed L2ARC from two states of ARC buffers, mru and mfu, | |
962 | * and each of the state has two types: data and metadata. | |
963 | */ | |
964 | #define L2ARC_FEED_TYPES 4 | |
965 | ||
34dc7c2f BB |
966 | #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) |
967 | #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) | |
968 | ||
d3cc8b15 | 969 | /* L2ARC Performance Tunables */ |
abd8610c BB |
970 | unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */ |
971 | unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */ | |
972 | unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */ | |
3a17a7a9 | 973 | unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; |
abd8610c BB |
974 | unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ |
975 | unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ | |
976 | int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ | |
977 | int l2arc_feed_again = B_TRUE; /* turbo warmup */ | |
c93504f0 | 978 | int l2arc_norw = B_FALSE; /* no reads during writes */ |
34dc7c2f BB |
979 | |
980 | /* | |
981 | * L2ARC Internals | |
982 | */ | |
34dc7c2f BB |
983 | static list_t L2ARC_dev_list; /* device list */ |
984 | static list_t *l2arc_dev_list; /* device list pointer */ | |
985 | static kmutex_t l2arc_dev_mtx; /* device list mutex */ | |
986 | static l2arc_dev_t *l2arc_dev_last; /* last device used */ | |
34dc7c2f BB |
987 | static list_t L2ARC_free_on_write; /* free after write buf list */ |
988 | static list_t *l2arc_free_on_write; /* free after write list ptr */ | |
989 | static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ | |
990 | static uint64_t l2arc_ndev; /* number of devices */ | |
991 | ||
992 | typedef struct l2arc_read_callback { | |
2aa34383 | 993 | arc_buf_hdr_t *l2rcb_hdr; /* read header */ |
3a17a7a9 | 994 | blkptr_t l2rcb_bp; /* original blkptr */ |
5dbd68a3 | 995 | zbookmark_phys_t l2rcb_zb; /* original bookmark */ |
3a17a7a9 | 996 | int l2rcb_flags; /* original flags */ |
82710e99 | 997 | abd_t *l2rcb_abd; /* temporary buffer */ |
34dc7c2f BB |
998 | } l2arc_read_callback_t; |
999 | ||
34dc7c2f BB |
1000 | typedef struct l2arc_data_free { |
1001 | /* protected by l2arc_free_on_write_mtx */ | |
a6255b7f | 1002 | abd_t *l2df_abd; |
34dc7c2f | 1003 | size_t l2df_size; |
d3c2ae1c | 1004 | arc_buf_contents_t l2df_type; |
34dc7c2f BB |
1005 | list_node_t l2df_list_node; |
1006 | } l2arc_data_free_t; | |
1007 | ||
b5256303 TC |
1008 | typedef enum arc_fill_flags { |
1009 | ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */ | |
1010 | ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */ | |
1011 | ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */ | |
1012 | ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */ | |
1013 | ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */ | |
1014 | } arc_fill_flags_t; | |
1015 | ||
34dc7c2f BB |
1016 | static kmutex_t l2arc_feed_thr_lock; |
1017 | static kcondvar_t l2arc_feed_thr_cv; | |
1018 | static uint8_t l2arc_thread_exit; | |
1019 | ||
a6255b7f | 1020 | static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *); |
d3c2ae1c | 1021 | static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); |
a6255b7f DQ |
1022 | static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *); |
1023 | static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *); | |
d3c2ae1c | 1024 | static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); |
a6255b7f | 1025 | static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag); |
b5256303 TC |
1026 | static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t); |
1027 | static void arc_hdr_alloc_abd(arc_buf_hdr_t *, boolean_t); | |
2a432414 | 1028 | static void arc_access(arc_buf_hdr_t *, kmutex_t *); |
ca0bf58d | 1029 | static boolean_t arc_is_overflowing(void); |
2a432414 | 1030 | static void arc_buf_watch(arc_buf_t *); |
ca67b33a | 1031 | static void arc_tuning_update(void); |
25458cbe | 1032 | static void arc_prune_async(int64_t); |
9edb3695 | 1033 | static uint64_t arc_all_memory(void); |
2a432414 | 1034 | |
b9541d6b CW |
1035 | static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); |
1036 | static uint32_t arc_bufc_to_flags(arc_buf_contents_t); | |
d3c2ae1c GW |
1037 | static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); |
1038 | static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); | |
b9541d6b | 1039 | |
2a432414 GW |
1040 | static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); |
1041 | static void l2arc_read_done(zio_t *); | |
34dc7c2f BB |
1042 | |
1043 | static uint64_t | |
d164b209 | 1044 | buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) |
34dc7c2f | 1045 | { |
34dc7c2f BB |
1046 | uint8_t *vdva = (uint8_t *)dva; |
1047 | uint64_t crc = -1ULL; | |
1048 | int i; | |
1049 | ||
1050 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
1051 | ||
1052 | for (i = 0; i < sizeof (dva_t); i++) | |
1053 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; | |
1054 | ||
d164b209 | 1055 | crc ^= (spa>>8) ^ birth; |
34dc7c2f BB |
1056 | |
1057 | return (crc); | |
1058 | } | |
1059 | ||
d3c2ae1c GW |
1060 | #define HDR_EMPTY(hdr) \ |
1061 | ((hdr)->b_dva.dva_word[0] == 0 && \ | |
1062 | (hdr)->b_dva.dva_word[1] == 0) | |
34dc7c2f | 1063 | |
d3c2ae1c GW |
1064 | #define HDR_EQUAL(spa, dva, birth, hdr) \ |
1065 | ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ | |
1066 | ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ | |
1067 | ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) | |
34dc7c2f | 1068 | |
428870ff BB |
1069 | static void |
1070 | buf_discard_identity(arc_buf_hdr_t *hdr) | |
1071 | { | |
1072 | hdr->b_dva.dva_word[0] = 0; | |
1073 | hdr->b_dva.dva_word[1] = 0; | |
1074 | hdr->b_birth = 0; | |
428870ff BB |
1075 | } |
1076 | ||
34dc7c2f | 1077 | static arc_buf_hdr_t * |
9b67f605 | 1078 | buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) |
34dc7c2f | 1079 | { |
9b67f605 MA |
1080 | const dva_t *dva = BP_IDENTITY(bp); |
1081 | uint64_t birth = BP_PHYSICAL_BIRTH(bp); | |
34dc7c2f BB |
1082 | uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); |
1083 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
2a432414 | 1084 | arc_buf_hdr_t *hdr; |
34dc7c2f BB |
1085 | |
1086 | mutex_enter(hash_lock); | |
2a432414 GW |
1087 | for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; |
1088 | hdr = hdr->b_hash_next) { | |
d3c2ae1c | 1089 | if (HDR_EQUAL(spa, dva, birth, hdr)) { |
34dc7c2f | 1090 | *lockp = hash_lock; |
2a432414 | 1091 | return (hdr); |
34dc7c2f BB |
1092 | } |
1093 | } | |
1094 | mutex_exit(hash_lock); | |
1095 | *lockp = NULL; | |
1096 | return (NULL); | |
1097 | } | |
1098 | ||
1099 | /* | |
1100 | * Insert an entry into the hash table. If there is already an element | |
1101 | * equal to elem in the hash table, then the already existing element | |
1102 | * will be returned and the new element will not be inserted. | |
1103 | * Otherwise returns NULL. | |
b9541d6b | 1104 | * If lockp == NULL, the caller is assumed to already hold the hash lock. |
34dc7c2f BB |
1105 | */ |
1106 | static arc_buf_hdr_t * | |
2a432414 | 1107 | buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) |
34dc7c2f | 1108 | { |
2a432414 | 1109 | uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); |
34dc7c2f | 1110 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); |
2a432414 | 1111 | arc_buf_hdr_t *fhdr; |
34dc7c2f BB |
1112 | uint32_t i; |
1113 | ||
2a432414 GW |
1114 | ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); |
1115 | ASSERT(hdr->b_birth != 0); | |
1116 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
b9541d6b CW |
1117 | |
1118 | if (lockp != NULL) { | |
1119 | *lockp = hash_lock; | |
1120 | mutex_enter(hash_lock); | |
1121 | } else { | |
1122 | ASSERT(MUTEX_HELD(hash_lock)); | |
1123 | } | |
1124 | ||
2a432414 GW |
1125 | for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; |
1126 | fhdr = fhdr->b_hash_next, i++) { | |
d3c2ae1c | 1127 | if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) |
2a432414 | 1128 | return (fhdr); |
34dc7c2f BB |
1129 | } |
1130 | ||
2a432414 GW |
1131 | hdr->b_hash_next = buf_hash_table.ht_table[idx]; |
1132 | buf_hash_table.ht_table[idx] = hdr; | |
d3c2ae1c | 1133 | arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); |
34dc7c2f BB |
1134 | |
1135 | /* collect some hash table performance data */ | |
1136 | if (i > 0) { | |
1137 | ARCSTAT_BUMP(arcstat_hash_collisions); | |
1138 | if (i == 1) | |
1139 | ARCSTAT_BUMP(arcstat_hash_chains); | |
1140 | ||
1141 | ARCSTAT_MAX(arcstat_hash_chain_max, i); | |
1142 | } | |
1143 | ||
1144 | ARCSTAT_BUMP(arcstat_hash_elements); | |
1145 | ARCSTAT_MAXSTAT(arcstat_hash_elements); | |
1146 | ||
1147 | return (NULL); | |
1148 | } | |
1149 | ||
1150 | static void | |
2a432414 | 1151 | buf_hash_remove(arc_buf_hdr_t *hdr) |
34dc7c2f | 1152 | { |
2a432414 GW |
1153 | arc_buf_hdr_t *fhdr, **hdrp; |
1154 | uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); | |
34dc7c2f BB |
1155 | |
1156 | ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); | |
2a432414 | 1157 | ASSERT(HDR_IN_HASH_TABLE(hdr)); |
34dc7c2f | 1158 | |
2a432414 GW |
1159 | hdrp = &buf_hash_table.ht_table[idx]; |
1160 | while ((fhdr = *hdrp) != hdr) { | |
d3c2ae1c | 1161 | ASSERT3P(fhdr, !=, NULL); |
2a432414 | 1162 | hdrp = &fhdr->b_hash_next; |
34dc7c2f | 1163 | } |
2a432414 GW |
1164 | *hdrp = hdr->b_hash_next; |
1165 | hdr->b_hash_next = NULL; | |
d3c2ae1c | 1166 | arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); |
34dc7c2f BB |
1167 | |
1168 | /* collect some hash table performance data */ | |
1169 | ARCSTAT_BUMPDOWN(arcstat_hash_elements); | |
1170 | ||
1171 | if (buf_hash_table.ht_table[idx] && | |
1172 | buf_hash_table.ht_table[idx]->b_hash_next == NULL) | |
1173 | ARCSTAT_BUMPDOWN(arcstat_hash_chains); | |
1174 | } | |
1175 | ||
1176 | /* | |
1177 | * Global data structures and functions for the buf kmem cache. | |
1178 | */ | |
b5256303 | 1179 | |
b9541d6b | 1180 | static kmem_cache_t *hdr_full_cache; |
b5256303 | 1181 | static kmem_cache_t *hdr_full_crypt_cache; |
b9541d6b | 1182 | static kmem_cache_t *hdr_l2only_cache; |
34dc7c2f BB |
1183 | static kmem_cache_t *buf_cache; |
1184 | ||
1185 | static void | |
1186 | buf_fini(void) | |
1187 | { | |
1188 | int i; | |
1189 | ||
00b46022 | 1190 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
1191 | /* |
1192 | * Large allocations which do not require contiguous pages | |
1193 | * should be using vmem_free() in the linux kernel\ | |
1194 | */ | |
00b46022 BB |
1195 | vmem_free(buf_hash_table.ht_table, |
1196 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
1197 | #else | |
34dc7c2f BB |
1198 | kmem_free(buf_hash_table.ht_table, |
1199 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
00b46022 | 1200 | #endif |
34dc7c2f BB |
1201 | for (i = 0; i < BUF_LOCKS; i++) |
1202 | mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); | |
b9541d6b | 1203 | kmem_cache_destroy(hdr_full_cache); |
b5256303 | 1204 | kmem_cache_destroy(hdr_full_crypt_cache); |
b9541d6b | 1205 | kmem_cache_destroy(hdr_l2only_cache); |
34dc7c2f BB |
1206 | kmem_cache_destroy(buf_cache); |
1207 | } | |
1208 | ||
1209 | /* | |
1210 | * Constructor callback - called when the cache is empty | |
1211 | * and a new buf is requested. | |
1212 | */ | |
1213 | /* ARGSUSED */ | |
1214 | static int | |
b9541d6b CW |
1215 | hdr_full_cons(void *vbuf, void *unused, int kmflag) |
1216 | { | |
1217 | arc_buf_hdr_t *hdr = vbuf; | |
1218 | ||
1219 | bzero(hdr, HDR_FULL_SIZE); | |
1220 | cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); | |
1221 | refcount_create(&hdr->b_l1hdr.b_refcnt); | |
1222 | mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); | |
1223 | list_link_init(&hdr->b_l1hdr.b_arc_node); | |
1224 | list_link_init(&hdr->b_l2hdr.b_l2node); | |
ca0bf58d | 1225 | multilist_link_init(&hdr->b_l1hdr.b_arc_node); |
b9541d6b CW |
1226 | arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); |
1227 | ||
1228 | return (0); | |
1229 | } | |
1230 | ||
b5256303 TC |
1231 | /* ARGSUSED */ |
1232 | static int | |
1233 | hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag) | |
1234 | { | |
1235 | arc_buf_hdr_t *hdr = vbuf; | |
1236 | ||
1237 | hdr_full_cons(vbuf, unused, kmflag); | |
1238 | bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr)); | |
1239 | arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); | |
1240 | ||
1241 | return (0); | |
1242 | } | |
1243 | ||
b9541d6b CW |
1244 | /* ARGSUSED */ |
1245 | static int | |
1246 | hdr_l2only_cons(void *vbuf, void *unused, int kmflag) | |
34dc7c2f | 1247 | { |
2a432414 GW |
1248 | arc_buf_hdr_t *hdr = vbuf; |
1249 | ||
b9541d6b CW |
1250 | bzero(hdr, HDR_L2ONLY_SIZE); |
1251 | arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); | |
34dc7c2f | 1252 | |
34dc7c2f BB |
1253 | return (0); |
1254 | } | |
1255 | ||
b128c09f BB |
1256 | /* ARGSUSED */ |
1257 | static int | |
1258 | buf_cons(void *vbuf, void *unused, int kmflag) | |
1259 | { | |
1260 | arc_buf_t *buf = vbuf; | |
1261 | ||
1262 | bzero(buf, sizeof (arc_buf_t)); | |
428870ff | 1263 | mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); |
d164b209 BB |
1264 | arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
1265 | ||
b128c09f BB |
1266 | return (0); |
1267 | } | |
1268 | ||
34dc7c2f BB |
1269 | /* |
1270 | * Destructor callback - called when a cached buf is | |
1271 | * no longer required. | |
1272 | */ | |
1273 | /* ARGSUSED */ | |
1274 | static void | |
b9541d6b | 1275 | hdr_full_dest(void *vbuf, void *unused) |
34dc7c2f | 1276 | { |
2a432414 | 1277 | arc_buf_hdr_t *hdr = vbuf; |
34dc7c2f | 1278 | |
d3c2ae1c | 1279 | ASSERT(HDR_EMPTY(hdr)); |
b9541d6b CW |
1280 | cv_destroy(&hdr->b_l1hdr.b_cv); |
1281 | refcount_destroy(&hdr->b_l1hdr.b_refcnt); | |
1282 | mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); | |
ca0bf58d | 1283 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); |
b9541d6b CW |
1284 | arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); |
1285 | } | |
1286 | ||
b5256303 TC |
1287 | /* ARGSUSED */ |
1288 | static void | |
1289 | hdr_full_crypt_dest(void *vbuf, void *unused) | |
1290 | { | |
1291 | arc_buf_hdr_t *hdr = vbuf; | |
1292 | ||
1293 | hdr_full_dest(vbuf, unused); | |
1294 | arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS); | |
1295 | } | |
1296 | ||
b9541d6b CW |
1297 | /* ARGSUSED */ |
1298 | static void | |
1299 | hdr_l2only_dest(void *vbuf, void *unused) | |
1300 | { | |
1301 | ASSERTV(arc_buf_hdr_t *hdr = vbuf); | |
1302 | ||
d3c2ae1c | 1303 | ASSERT(HDR_EMPTY(hdr)); |
b9541d6b | 1304 | arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); |
34dc7c2f BB |
1305 | } |
1306 | ||
b128c09f BB |
1307 | /* ARGSUSED */ |
1308 | static void | |
1309 | buf_dest(void *vbuf, void *unused) | |
1310 | { | |
1311 | arc_buf_t *buf = vbuf; | |
1312 | ||
428870ff | 1313 | mutex_destroy(&buf->b_evict_lock); |
d164b209 | 1314 | arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
b128c09f BB |
1315 | } |
1316 | ||
8c8af9d8 BB |
1317 | /* |
1318 | * Reclaim callback -- invoked when memory is low. | |
1319 | */ | |
1320 | /* ARGSUSED */ | |
1321 | static void | |
1322 | hdr_recl(void *unused) | |
1323 | { | |
1324 | dprintf("hdr_recl called\n"); | |
1325 | /* | |
1326 | * umem calls the reclaim func when we destroy the buf cache, | |
1327 | * which is after we do arc_fini(). | |
1328 | */ | |
1329 | if (!arc_dead) | |
1330 | cv_signal(&arc_reclaim_thread_cv); | |
1331 | } | |
1332 | ||
34dc7c2f BB |
1333 | static void |
1334 | buf_init(void) | |
1335 | { | |
2db28197 | 1336 | uint64_t *ct = NULL; |
34dc7c2f BB |
1337 | uint64_t hsize = 1ULL << 12; |
1338 | int i, j; | |
1339 | ||
1340 | /* | |
1341 | * The hash table is big enough to fill all of physical memory | |
49ddb315 MA |
1342 | * with an average block size of zfs_arc_average_blocksize (default 8K). |
1343 | * By default, the table will take up | |
1344 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
34dc7c2f | 1345 | */ |
9edb3695 | 1346 | while (hsize * zfs_arc_average_blocksize < arc_all_memory()) |
34dc7c2f BB |
1347 | hsize <<= 1; |
1348 | retry: | |
1349 | buf_hash_table.ht_mask = hsize - 1; | |
00b46022 | 1350 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
1351 | /* |
1352 | * Large allocations which do not require contiguous pages | |
1353 | * should be using vmem_alloc() in the linux kernel | |
1354 | */ | |
00b46022 BB |
1355 | buf_hash_table.ht_table = |
1356 | vmem_zalloc(hsize * sizeof (void*), KM_SLEEP); | |
1357 | #else | |
34dc7c2f BB |
1358 | buf_hash_table.ht_table = |
1359 | kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); | |
00b46022 | 1360 | #endif |
34dc7c2f BB |
1361 | if (buf_hash_table.ht_table == NULL) { |
1362 | ASSERT(hsize > (1ULL << 8)); | |
1363 | hsize >>= 1; | |
1364 | goto retry; | |
1365 | } | |
1366 | ||
b9541d6b | 1367 | hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, |
8c8af9d8 | 1368 | 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); |
b5256303 TC |
1369 | hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt", |
1370 | HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest, | |
1371 | hdr_recl, NULL, NULL, 0); | |
b9541d6b | 1372 | hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", |
8c8af9d8 | 1373 | HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, |
b9541d6b | 1374 | NULL, NULL, 0); |
34dc7c2f | 1375 | buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), |
b128c09f | 1376 | 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); |
34dc7c2f BB |
1377 | |
1378 | for (i = 0; i < 256; i++) | |
1379 | for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) | |
1380 | *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); | |
1381 | ||
1382 | for (i = 0; i < BUF_LOCKS; i++) { | |
1383 | mutex_init(&buf_hash_table.ht_locks[i].ht_lock, | |
40d06e3c | 1384 | NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
1385 | } |
1386 | } | |
1387 | ||
d3c2ae1c | 1388 | #define ARC_MINTIME (hz>>4) /* 62 ms */ |
ca0bf58d | 1389 | |
2aa34383 DK |
1390 | /* |
1391 | * This is the size that the buf occupies in memory. If the buf is compressed, | |
1392 | * it will correspond to the compressed size. You should use this method of | |
1393 | * getting the buf size unless you explicitly need the logical size. | |
1394 | */ | |
1395 | uint64_t | |
1396 | arc_buf_size(arc_buf_t *buf) | |
1397 | { | |
1398 | return (ARC_BUF_COMPRESSED(buf) ? | |
1399 | HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); | |
1400 | } | |
1401 | ||
1402 | uint64_t | |
1403 | arc_buf_lsize(arc_buf_t *buf) | |
1404 | { | |
1405 | return (HDR_GET_LSIZE(buf->b_hdr)); | |
1406 | } | |
1407 | ||
b5256303 TC |
1408 | /* |
1409 | * This function will return B_TRUE if the buffer is encrypted in memory. | |
1410 | * This buffer can be decrypted by calling arc_untransform(). | |
1411 | */ | |
1412 | boolean_t | |
1413 | arc_is_encrypted(arc_buf_t *buf) | |
1414 | { | |
1415 | return (ARC_BUF_ENCRYPTED(buf) != 0); | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * Returns B_TRUE if the buffer represents data that has not had its MAC | |
1420 | * verified yet. | |
1421 | */ | |
1422 | boolean_t | |
1423 | arc_is_unauthenticated(arc_buf_t *buf) | |
1424 | { | |
1425 | return (HDR_NOAUTH(buf->b_hdr) != 0); | |
1426 | } | |
1427 | ||
1428 | void | |
1429 | arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt, | |
1430 | uint8_t *iv, uint8_t *mac) | |
1431 | { | |
1432 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1433 | ||
1434 | ASSERT(HDR_PROTECTED(hdr)); | |
1435 | ||
1436 | bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN); | |
1437 | bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN); | |
1438 | bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN); | |
1439 | *byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? | |
1440 | ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; | |
1441 | } | |
1442 | ||
1443 | /* | |
1444 | * Indicates how this buffer is compressed in memory. If it is not compressed | |
1445 | * the value will be ZIO_COMPRESS_OFF. It can be made normally readable with | |
1446 | * arc_untransform() as long as it is also unencrypted. | |
1447 | */ | |
2aa34383 DK |
1448 | enum zio_compress |
1449 | arc_get_compression(arc_buf_t *buf) | |
1450 | { | |
1451 | return (ARC_BUF_COMPRESSED(buf) ? | |
1452 | HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); | |
1453 | } | |
1454 | ||
b5256303 TC |
1455 | /* |
1456 | * Return the compression algorithm used to store this data in the ARC. If ARC | |
1457 | * compression is enabled or this is an encrypted block, this will be the same | |
1458 | * as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF. | |
1459 | */ | |
1460 | static inline enum zio_compress | |
1461 | arc_hdr_get_compress(arc_buf_hdr_t *hdr) | |
1462 | { | |
1463 | return (HDR_COMPRESSION_ENABLED(hdr) ? | |
1464 | HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF); | |
1465 | } | |
1466 | ||
d3c2ae1c GW |
1467 | static inline boolean_t |
1468 | arc_buf_is_shared(arc_buf_t *buf) | |
1469 | { | |
1470 | boolean_t shared = (buf->b_data != NULL && | |
a6255b7f DQ |
1471 | buf->b_hdr->b_l1hdr.b_pabd != NULL && |
1472 | abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && | |
1473 | buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); | |
d3c2ae1c | 1474 | IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); |
2aa34383 DK |
1475 | IMPLY(shared, ARC_BUF_SHARED(buf)); |
1476 | IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf)); | |
524b4217 DK |
1477 | |
1478 | /* | |
1479 | * It would be nice to assert arc_can_share() too, but the "hdr isn't | |
1480 | * already being shared" requirement prevents us from doing that. | |
1481 | */ | |
1482 | ||
d3c2ae1c GW |
1483 | return (shared); |
1484 | } | |
ca0bf58d | 1485 | |
a7004725 DK |
1486 | /* |
1487 | * Free the checksum associated with this header. If there is no checksum, this | |
1488 | * is a no-op. | |
1489 | */ | |
d3c2ae1c GW |
1490 | static inline void |
1491 | arc_cksum_free(arc_buf_hdr_t *hdr) | |
1492 | { | |
1493 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
b5256303 | 1494 | |
d3c2ae1c GW |
1495 | mutex_enter(&hdr->b_l1hdr.b_freeze_lock); |
1496 | if (hdr->b_l1hdr.b_freeze_cksum != NULL) { | |
1497 | kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); | |
1498 | hdr->b_l1hdr.b_freeze_cksum = NULL; | |
b9541d6b | 1499 | } |
d3c2ae1c | 1500 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); |
b9541d6b CW |
1501 | } |
1502 | ||
a7004725 DK |
1503 | /* |
1504 | * Return true iff at least one of the bufs on hdr is not compressed. | |
b5256303 | 1505 | * Encrypted buffers count as compressed. |
a7004725 DK |
1506 | */ |
1507 | static boolean_t | |
1508 | arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr) | |
1509 | { | |
1510 | for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { | |
1511 | if (!ARC_BUF_COMPRESSED(b)) { | |
1512 | return (B_TRUE); | |
1513 | } | |
1514 | } | |
1515 | return (B_FALSE); | |
1516 | } | |
1517 | ||
1518 | ||
524b4217 DK |
1519 | /* |
1520 | * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data | |
1521 | * matches the checksum that is stored in the hdr. If there is no checksum, | |
1522 | * or if the buf is compressed, this is a no-op. | |
1523 | */ | |
34dc7c2f BB |
1524 | static void |
1525 | arc_cksum_verify(arc_buf_t *buf) | |
1526 | { | |
d3c2ae1c | 1527 | arc_buf_hdr_t *hdr = buf->b_hdr; |
34dc7c2f BB |
1528 | zio_cksum_t zc; |
1529 | ||
1530 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
1531 | return; | |
1532 | ||
524b4217 | 1533 | if (ARC_BUF_COMPRESSED(buf)) { |
a7004725 DK |
1534 | ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || |
1535 | arc_hdr_has_uncompressed_buf(hdr)); | |
524b4217 DK |
1536 | return; |
1537 | } | |
1538 | ||
d3c2ae1c GW |
1539 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1540 | ||
1541 | mutex_enter(&hdr->b_l1hdr.b_freeze_lock); | |
1542 | if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { | |
1543 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); | |
34dc7c2f BB |
1544 | return; |
1545 | } | |
2aa34383 | 1546 | |
3c67d83a | 1547 | fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); |
d3c2ae1c | 1548 | if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) |
34dc7c2f | 1549 | panic("buffer modified while frozen!"); |
d3c2ae1c | 1550 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1551 | } |
1552 | ||
b5256303 TC |
1553 | /* |
1554 | * This function makes the assumption that data stored in the L2ARC | |
1555 | * will be transformed exactly as it is in the main pool. Because of | |
1556 | * this we can verify the checksum against the reading process's bp. | |
1557 | */ | |
d3c2ae1c GW |
1558 | static boolean_t |
1559 | arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) | |
34dc7c2f | 1560 | { |
d3c2ae1c GW |
1561 | ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); |
1562 | VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); | |
34dc7c2f | 1563 | |
d3c2ae1c GW |
1564 | /* |
1565 | * Block pointers always store the checksum for the logical data. | |
1566 | * If the block pointer has the gang bit set, then the checksum | |
1567 | * it represents is for the reconstituted data and not for an | |
1568 | * individual gang member. The zio pipeline, however, must be able to | |
1569 | * determine the checksum of each of the gang constituents so it | |
1570 | * treats the checksum comparison differently than what we need | |
1571 | * for l2arc blocks. This prevents us from using the | |
1572 | * zio_checksum_error() interface directly. Instead we must call the | |
1573 | * zio_checksum_error_impl() so that we can ensure the checksum is | |
1574 | * generated using the correct checksum algorithm and accounts for the | |
1575 | * logical I/O size and not just a gang fragment. | |
1576 | */ | |
b5256303 | 1577 | return (zio_checksum_error_impl(zio->io_spa, zio->io_bp, |
a6255b7f | 1578 | BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, |
d3c2ae1c | 1579 | zio->io_offset, NULL) == 0); |
34dc7c2f BB |
1580 | } |
1581 | ||
524b4217 DK |
1582 | /* |
1583 | * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a | |
1584 | * checksum and attaches it to the buf's hdr so that we can ensure that the buf | |
1585 | * isn't modified later on. If buf is compressed or there is already a checksum | |
1586 | * on the hdr, this is a no-op (we only checksum uncompressed bufs). | |
1587 | */ | |
34dc7c2f | 1588 | static void |
d3c2ae1c | 1589 | arc_cksum_compute(arc_buf_t *buf) |
34dc7c2f | 1590 | { |
d3c2ae1c GW |
1591 | arc_buf_hdr_t *hdr = buf->b_hdr; |
1592 | ||
1593 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
34dc7c2f BB |
1594 | return; |
1595 | ||
d3c2ae1c | 1596 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2aa34383 | 1597 | |
b9541d6b | 1598 | mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
d3c2ae1c | 1599 | if (hdr->b_l1hdr.b_freeze_cksum != NULL) { |
a7004725 | 1600 | ASSERT(arc_hdr_has_uncompressed_buf(hdr)); |
2aa34383 DK |
1601 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); |
1602 | return; | |
1603 | } else if (ARC_BUF_COMPRESSED(buf)) { | |
d3c2ae1c | 1604 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1605 | return; |
1606 | } | |
2aa34383 | 1607 | |
b5256303 | 1608 | ASSERT(!ARC_BUF_ENCRYPTED(buf)); |
2aa34383 | 1609 | ASSERT(!ARC_BUF_COMPRESSED(buf)); |
d3c2ae1c GW |
1610 | hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), |
1611 | KM_SLEEP); | |
3c67d83a | 1612 | fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, |
d3c2ae1c GW |
1613 | hdr->b_l1hdr.b_freeze_cksum); |
1614 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); | |
498877ba MA |
1615 | arc_buf_watch(buf); |
1616 | } | |
1617 | ||
1618 | #ifndef _KERNEL | |
1619 | void | |
1620 | arc_buf_sigsegv(int sig, siginfo_t *si, void *unused) | |
1621 | { | |
02730c33 | 1622 | panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr); |
498877ba MA |
1623 | } |
1624 | #endif | |
1625 | ||
1626 | /* ARGSUSED */ | |
1627 | static void | |
1628 | arc_buf_unwatch(arc_buf_t *buf) | |
1629 | { | |
1630 | #ifndef _KERNEL | |
1631 | if (arc_watch) { | |
a7004725 | 1632 | ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), |
498877ba MA |
1633 | PROT_READ | PROT_WRITE)); |
1634 | } | |
1635 | #endif | |
1636 | } | |
1637 | ||
1638 | /* ARGSUSED */ | |
1639 | static void | |
1640 | arc_buf_watch(arc_buf_t *buf) | |
1641 | { | |
1642 | #ifndef _KERNEL | |
1643 | if (arc_watch) | |
2aa34383 | 1644 | ASSERT0(mprotect(buf->b_data, arc_buf_size(buf), |
d3c2ae1c | 1645 | PROT_READ)); |
498877ba | 1646 | #endif |
34dc7c2f BB |
1647 | } |
1648 | ||
b9541d6b CW |
1649 | static arc_buf_contents_t |
1650 | arc_buf_type(arc_buf_hdr_t *hdr) | |
1651 | { | |
d3c2ae1c | 1652 | arc_buf_contents_t type; |
b9541d6b | 1653 | if (HDR_ISTYPE_METADATA(hdr)) { |
d3c2ae1c | 1654 | type = ARC_BUFC_METADATA; |
b9541d6b | 1655 | } else { |
d3c2ae1c | 1656 | type = ARC_BUFC_DATA; |
b9541d6b | 1657 | } |
d3c2ae1c GW |
1658 | VERIFY3U(hdr->b_type, ==, type); |
1659 | return (type); | |
b9541d6b CW |
1660 | } |
1661 | ||
2aa34383 DK |
1662 | boolean_t |
1663 | arc_is_metadata(arc_buf_t *buf) | |
1664 | { | |
1665 | return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); | |
1666 | } | |
1667 | ||
b9541d6b CW |
1668 | static uint32_t |
1669 | arc_bufc_to_flags(arc_buf_contents_t type) | |
1670 | { | |
1671 | switch (type) { | |
1672 | case ARC_BUFC_DATA: | |
1673 | /* metadata field is 0 if buffer contains normal data */ | |
1674 | return (0); | |
1675 | case ARC_BUFC_METADATA: | |
1676 | return (ARC_FLAG_BUFC_METADATA); | |
1677 | default: | |
1678 | break; | |
1679 | } | |
1680 | panic("undefined ARC buffer type!"); | |
1681 | return ((uint32_t)-1); | |
1682 | } | |
1683 | ||
34dc7c2f BB |
1684 | void |
1685 | arc_buf_thaw(arc_buf_t *buf) | |
1686 | { | |
d3c2ae1c GW |
1687 | arc_buf_hdr_t *hdr = buf->b_hdr; |
1688 | ||
2aa34383 DK |
1689 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); |
1690 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
1691 | ||
524b4217 | 1692 | arc_cksum_verify(buf); |
34dc7c2f | 1693 | |
2aa34383 DK |
1694 | /* |
1695 | * Compressed buffers do not manipulate the b_freeze_cksum or | |
1696 | * allocate b_thawed. | |
1697 | */ | |
1698 | if (ARC_BUF_COMPRESSED(buf)) { | |
a7004725 DK |
1699 | ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || |
1700 | arc_hdr_has_uncompressed_buf(hdr)); | |
2aa34383 DK |
1701 | return; |
1702 | } | |
1703 | ||
d3c2ae1c GW |
1704 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1705 | arc_cksum_free(hdr); | |
498877ba | 1706 | arc_buf_unwatch(buf); |
34dc7c2f BB |
1707 | } |
1708 | ||
1709 | void | |
1710 | arc_buf_freeze(arc_buf_t *buf) | |
1711 | { | |
d3c2ae1c | 1712 | arc_buf_hdr_t *hdr = buf->b_hdr; |
428870ff BB |
1713 | kmutex_t *hash_lock; |
1714 | ||
34dc7c2f BB |
1715 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) |
1716 | return; | |
1717 | ||
2aa34383 | 1718 | if (ARC_BUF_COMPRESSED(buf)) { |
a7004725 DK |
1719 | ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || |
1720 | arc_hdr_has_uncompressed_buf(hdr)); | |
2aa34383 DK |
1721 | return; |
1722 | } | |
1723 | ||
d3c2ae1c | 1724 | hash_lock = HDR_LOCK(hdr); |
428870ff BB |
1725 | mutex_enter(hash_lock); |
1726 | ||
d3c2ae1c GW |
1727 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1728 | ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL || | |
1729 | hdr->b_l1hdr.b_state == arc_anon); | |
1730 | arc_cksum_compute(buf); | |
428870ff | 1731 | mutex_exit(hash_lock); |
34dc7c2f BB |
1732 | } |
1733 | ||
d3c2ae1c GW |
1734 | /* |
1735 | * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, | |
1736 | * the following functions should be used to ensure that the flags are | |
1737 | * updated in a thread-safe way. When manipulating the flags either | |
1738 | * the hash_lock must be held or the hdr must be undiscoverable. This | |
1739 | * ensures that we're not racing with any other threads when updating | |
1740 | * the flags. | |
1741 | */ | |
1742 | static inline void | |
1743 | arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) | |
1744 | { | |
1745 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
1746 | hdr->b_flags |= flags; | |
1747 | } | |
1748 | ||
1749 | static inline void | |
1750 | arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) | |
1751 | { | |
1752 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
1753 | hdr->b_flags &= ~flags; | |
1754 | } | |
1755 | ||
1756 | /* | |
1757 | * Setting the compression bits in the arc_buf_hdr_t's b_flags is | |
1758 | * done in a special way since we have to clear and set bits | |
1759 | * at the same time. Consumers that wish to set the compression bits | |
1760 | * must use this function to ensure that the flags are updated in | |
1761 | * thread-safe manner. | |
1762 | */ | |
1763 | static void | |
1764 | arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) | |
1765 | { | |
1766 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
1767 | ||
1768 | /* | |
1769 | * Holes and embedded blocks will always have a psize = 0 so | |
1770 | * we ignore the compression of the blkptr and set the | |
d3c2ae1c GW |
1771 | * want to uncompress them. Mark them as uncompressed. |
1772 | */ | |
1773 | if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { | |
1774 | arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); | |
d3c2ae1c | 1775 | ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); |
d3c2ae1c GW |
1776 | } else { |
1777 | arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); | |
d3c2ae1c GW |
1778 | ASSERT(HDR_COMPRESSION_ENABLED(hdr)); |
1779 | } | |
b5256303 TC |
1780 | |
1781 | HDR_SET_COMPRESS(hdr, cmp); | |
1782 | ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); | |
d3c2ae1c GW |
1783 | } |
1784 | ||
524b4217 DK |
1785 | /* |
1786 | * Looks for another buf on the same hdr which has the data decompressed, copies | |
1787 | * from it, and returns true. If no such buf exists, returns false. | |
1788 | */ | |
1789 | static boolean_t | |
1790 | arc_buf_try_copy_decompressed_data(arc_buf_t *buf) | |
1791 | { | |
1792 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
524b4217 DK |
1793 | boolean_t copied = B_FALSE; |
1794 | ||
1795 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
1796 | ASSERT3P(buf->b_data, !=, NULL); | |
1797 | ASSERT(!ARC_BUF_COMPRESSED(buf)); | |
1798 | ||
a7004725 | 1799 | for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; |
524b4217 DK |
1800 | from = from->b_next) { |
1801 | /* can't use our own data buffer */ | |
1802 | if (from == buf) { | |
1803 | continue; | |
1804 | } | |
1805 | ||
1806 | if (!ARC_BUF_COMPRESSED(from)) { | |
1807 | bcopy(from->b_data, buf->b_data, arc_buf_size(buf)); | |
1808 | copied = B_TRUE; | |
1809 | break; | |
1810 | } | |
1811 | } | |
1812 | ||
1813 | /* | |
1814 | * There were no decompressed bufs, so there should not be a | |
1815 | * checksum on the hdr either. | |
1816 | */ | |
1817 | EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); | |
1818 | ||
1819 | return (copied); | |
1820 | } | |
1821 | ||
b5256303 TC |
1822 | /* |
1823 | * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t. | |
1824 | */ | |
1825 | static uint64_t | |
1826 | arc_hdr_size(arc_buf_hdr_t *hdr) | |
1827 | { | |
1828 | uint64_t size; | |
1829 | ||
1830 | if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && | |
1831 | HDR_GET_PSIZE(hdr) > 0) { | |
1832 | size = HDR_GET_PSIZE(hdr); | |
1833 | } else { | |
1834 | ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); | |
1835 | size = HDR_GET_LSIZE(hdr); | |
1836 | } | |
1837 | return (size); | |
1838 | } | |
1839 | ||
1840 | static int | |
1841 | arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj) | |
1842 | { | |
1843 | int ret; | |
1844 | uint64_t csize; | |
1845 | uint64_t lsize = HDR_GET_LSIZE(hdr); | |
1846 | uint64_t psize = HDR_GET_PSIZE(hdr); | |
1847 | void *tmpbuf = NULL; | |
1848 | abd_t *abd = hdr->b_l1hdr.b_pabd; | |
1849 | ||
1850 | ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); | |
1851 | ASSERT(HDR_AUTHENTICATED(hdr)); | |
1852 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); | |
1853 | ||
1854 | /* | |
1855 | * The MAC is calculated on the compressed data that is stored on disk. | |
1856 | * However, if compressed arc is disabled we will only have the | |
1857 | * decompressed data available to us now. Compress it into a temporary | |
1858 | * abd so we can verify the MAC. The performance overhead of this will | |
1859 | * be relatively low, since most objects in an encrypted objset will | |
1860 | * be encrypted (instead of authenticated) anyway. | |
1861 | */ | |
1862 | if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && | |
1863 | !HDR_COMPRESSION_ENABLED(hdr)) { | |
1864 | tmpbuf = zio_buf_alloc(lsize); | |
1865 | abd = abd_get_from_buf(tmpbuf, lsize); | |
1866 | abd_take_ownership_of_buf(abd, B_TRUE); | |
1867 | ||
1868 | csize = zio_compress_data(HDR_GET_COMPRESS(hdr), | |
1869 | hdr->b_l1hdr.b_pabd, tmpbuf, lsize); | |
1870 | ASSERT3U(csize, <=, psize); | |
1871 | abd_zero_off(abd, csize, psize - csize); | |
1872 | } | |
1873 | ||
1874 | /* | |
1875 | * Authentication is best effort. We authenticate whenever the key is | |
1876 | * available. If we succeed we clear ARC_FLAG_NOAUTH. | |
1877 | */ | |
1878 | if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) { | |
1879 | ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); | |
1880 | ASSERT3U(lsize, ==, psize); | |
1881 | ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd, | |
1882 | psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); | |
1883 | } else { | |
1884 | ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize, | |
1885 | hdr->b_crypt_hdr.b_mac); | |
1886 | } | |
1887 | ||
1888 | if (ret == 0) | |
1889 | arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH); | |
1890 | else if (ret != ENOENT) | |
1891 | goto error; | |
1892 | ||
1893 | if (tmpbuf != NULL) | |
1894 | abd_free(abd); | |
1895 | ||
1896 | return (0); | |
1897 | ||
1898 | error: | |
1899 | if (tmpbuf != NULL) | |
1900 | abd_free(abd); | |
1901 | ||
1902 | return (ret); | |
1903 | } | |
1904 | ||
1905 | /* | |
1906 | * This function will take a header that only has raw encrypted data in | |
1907 | * b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in | |
1908 | * b_l1hdr.b_pabd. If designated in the header flags, this function will | |
1909 | * also decompress the data. | |
1910 | */ | |
1911 | static int | |
1912 | arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj) | |
1913 | { | |
1914 | int ret; | |
1915 | dsl_crypto_key_t *dck = NULL; | |
1916 | abd_t *cabd = NULL; | |
1917 | void *tmp = NULL; | |
1918 | boolean_t no_crypt = B_FALSE; | |
1919 | boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); | |
1920 | ||
1921 | ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); | |
1922 | ASSERT(HDR_ENCRYPTED(hdr)); | |
1923 | ||
1924 | arc_hdr_alloc_abd(hdr, B_FALSE); | |
1925 | ||
1926 | /* | |
1927 | * We must be careful to use the passed-in dsobj value here and | |
1928 | * not the value in b_dsobj. b_dsobj is meant to be a best guess for | |
1929 | * the L2ARC, which has the luxury of being able to fail without real | |
1930 | * consequences (the data simply won't make it to the L2ARC). In | |
1931 | * reality, the dsobj stored in the header may belong to a dataset | |
1932 | * that has been unmounted or otherwise disowned, meaning the key | |
1933 | * won't be accessible via that dsobj anymore. | |
1934 | */ | |
1935 | ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck); | |
1936 | if (ret != 0) { | |
1937 | ret = SET_ERROR(EACCES); | |
1938 | goto error; | |
1939 | } | |
1940 | ||
1941 | ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key, | |
1942 | hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot, | |
1943 | hdr->b_crypt_hdr.b_iv, hdr->b_crypt_hdr.b_mac, | |
1944 | HDR_GET_PSIZE(hdr), bswap, hdr->b_l1hdr.b_pabd, | |
1945 | hdr->b_crypt_hdr.b_rabd, &no_crypt); | |
1946 | if (ret != 0) | |
1947 | goto error; | |
1948 | ||
1949 | if (no_crypt) { | |
1950 | abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd, | |
1951 | HDR_GET_PSIZE(hdr)); | |
1952 | } | |
1953 | ||
1954 | /* | |
1955 | * If this header has disabled arc compression but the b_pabd is | |
1956 | * compressed after decrypting it, we need to decompress the newly | |
1957 | * decrypted data. | |
1958 | */ | |
1959 | if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && | |
1960 | !HDR_COMPRESSION_ENABLED(hdr)) { | |
1961 | /* | |
1962 | * We want to make sure that we are correctly honoring the | |
1963 | * zfs_abd_scatter_enabled setting, so we allocate an abd here | |
1964 | * and then loan a buffer from it, rather than allocating a | |
1965 | * linear buffer and wrapping it in an abd later. | |
1966 | */ | |
1967 | cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); | |
1968 | tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); | |
1969 | ||
1970 | ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), | |
1971 | hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), | |
1972 | HDR_GET_LSIZE(hdr)); | |
1973 | if (ret != 0) { | |
1974 | abd_return_buf(cabd, tmp, arc_hdr_size(hdr)); | |
1975 | goto error; | |
1976 | } | |
1977 | ||
1978 | abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); | |
1979 | arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, | |
1980 | arc_hdr_size(hdr), hdr); | |
1981 | hdr->b_l1hdr.b_pabd = cabd; | |
1982 | } | |
1983 | ||
1984 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
1985 | ||
1986 | return (0); | |
1987 | ||
1988 | error: | |
1989 | arc_hdr_free_abd(hdr, B_FALSE); | |
1990 | if (dck != NULL) | |
1991 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
1992 | if (cabd != NULL) | |
1993 | arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr); | |
1994 | ||
1995 | return (ret); | |
1996 | } | |
1997 | ||
1998 | /* | |
1999 | * This function is called during arc_buf_fill() to prepare the header's | |
2000 | * abd plaintext pointer for use. This involves authenticated protected | |
2001 | * data and decrypting encrypted data into the plaintext abd. | |
2002 | */ | |
2003 | static int | |
2004 | arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa, | |
2005 | uint64_t dsobj, boolean_t noauth) | |
2006 | { | |
2007 | int ret; | |
2008 | ||
2009 | ASSERT(HDR_PROTECTED(hdr)); | |
2010 | ||
2011 | if (hash_lock != NULL) | |
2012 | mutex_enter(hash_lock); | |
2013 | ||
2014 | if (HDR_NOAUTH(hdr) && !noauth) { | |
2015 | /* | |
2016 | * The caller requested authenticated data but our data has | |
2017 | * not been authenticated yet. Verify the MAC now if we can. | |
2018 | */ | |
2019 | ret = arc_hdr_authenticate(hdr, spa, dsobj); | |
2020 | if (ret != 0) | |
2021 | goto error; | |
2022 | } else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) { | |
2023 | /* | |
2024 | * If we only have the encrypted version of the data, but the | |
2025 | * unencrypted version was requested we take this opportunity | |
2026 | * to store the decrypted version in the header for future use. | |
2027 | */ | |
2028 | ret = arc_hdr_decrypt(hdr, spa, dsobj); | |
2029 | if (ret != 0) | |
2030 | goto error; | |
2031 | } | |
2032 | ||
2033 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); | |
2034 | ||
2035 | if (hash_lock != NULL) | |
2036 | mutex_exit(hash_lock); | |
2037 | ||
2038 | return (0); | |
2039 | ||
2040 | error: | |
2041 | if (hash_lock != NULL) | |
2042 | mutex_exit(hash_lock); | |
2043 | ||
2044 | return (ret); | |
2045 | } | |
2046 | ||
2047 | /* | |
2048 | * This function is used by the dbuf code to decrypt bonus buffers in place. | |
2049 | * The dbuf code itself doesn't have any locking for decrypting a shared dnode | |
2050 | * block, so we use the hash lock here to protect against concurrent calls to | |
2051 | * arc_buf_fill(). | |
2052 | */ | |
2053 | static void | |
2054 | arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock) | |
2055 | { | |
2056 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2057 | ||
2058 | ASSERT(HDR_ENCRYPTED(hdr)); | |
2059 | ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); | |
2060 | ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr))); | |
2061 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); | |
2062 | ||
2063 | zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data, | |
2064 | arc_buf_size(buf)); | |
2065 | buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; | |
2066 | buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; | |
2067 | hdr->b_crypt_hdr.b_ebufcnt -= 1; | |
2068 | } | |
2069 | ||
524b4217 DK |
2070 | /* |
2071 | * Given a buf that has a data buffer attached to it, this function will | |
2072 | * efficiently fill the buf with data of the specified compression setting from | |
2073 | * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr | |
2074 | * are already sharing a data buf, no copy is performed. | |
2075 | * | |
2076 | * If the buf is marked as compressed but uncompressed data was requested, this | |
2077 | * will allocate a new data buffer for the buf, remove that flag, and fill the | |
2078 | * buf with uncompressed data. You can't request a compressed buf on a hdr with | |
2079 | * uncompressed data, and (since we haven't added support for it yet) if you | |
2080 | * want compressed data your buf must already be marked as compressed and have | |
2081 | * the correct-sized data buffer. | |
2082 | */ | |
2083 | static int | |
b5256303 | 2084 | arc_buf_fill(arc_buf_t *buf, spa_t *spa, uint64_t dsobj, arc_fill_flags_t flags) |
d3c2ae1c | 2085 | { |
b5256303 | 2086 | int error = 0; |
d3c2ae1c | 2087 | arc_buf_hdr_t *hdr = buf->b_hdr; |
b5256303 TC |
2088 | boolean_t hdr_compressed = |
2089 | (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); | |
2090 | boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0; | |
2091 | boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0; | |
d3c2ae1c | 2092 | dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; |
b5256303 | 2093 | kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr); |
d3c2ae1c | 2094 | |
524b4217 | 2095 | ASSERT3P(buf->b_data, !=, NULL); |
b5256303 | 2096 | IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf)); |
524b4217 | 2097 | IMPLY(compressed, ARC_BUF_COMPRESSED(buf)); |
b5256303 TC |
2098 | IMPLY(encrypted, HDR_ENCRYPTED(hdr)); |
2099 | IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf)); | |
2100 | IMPLY(encrypted, ARC_BUF_COMPRESSED(buf)); | |
2101 | IMPLY(encrypted, !ARC_BUF_SHARED(buf)); | |
2102 | ||
2103 | /* | |
2104 | * If the caller wanted encrypted data we just need to copy it from | |
2105 | * b_rabd and potentially byteswap it. We won't be able to do any | |
2106 | * further transforms on it. | |
2107 | */ | |
2108 | if (encrypted) { | |
2109 | ASSERT(HDR_HAS_RABD(hdr)); | |
2110 | abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd, | |
2111 | HDR_GET_PSIZE(hdr)); | |
2112 | goto byteswap; | |
2113 | } | |
2114 | ||
2115 | /* | |
2116 | * Adjust encrypted and authenticated headers to accomodate the | |
2117 | * request if needed. | |
2118 | */ | |
2119 | if (HDR_PROTECTED(hdr)) { | |
2120 | error = arc_fill_hdr_crypt(hdr, hash_lock, spa, | |
2121 | dsobj, !!(flags & ARC_FILL_NOAUTH)); | |
2122 | if (error != 0) | |
2123 | return (error); | |
2124 | } | |
2125 | ||
2126 | /* | |
2127 | * There is a special case here for dnode blocks which are | |
2128 | * decrypting their bonus buffers. These blocks may request to | |
2129 | * be decrypted in-place. This is necessary because there may | |
2130 | * be many dnodes pointing into this buffer and there is | |
2131 | * currently no method to synchronize replacing the backing | |
2132 | * b_data buffer and updating all of the pointers. Here we use | |
2133 | * the hash lock to ensure there are no races. If the need | |
2134 | * arises for other types to be decrypted in-place, they must | |
2135 | * add handling here as well. | |
2136 | */ | |
2137 | if ((flags & ARC_FILL_IN_PLACE) != 0) { | |
2138 | ASSERT(!hdr_compressed); | |
2139 | ASSERT(!compressed); | |
2140 | ASSERT(!encrypted); | |
2141 | ||
2142 | if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) { | |
2143 | ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE); | |
2144 | ||
2145 | if (hash_lock != NULL) | |
2146 | mutex_enter(hash_lock); | |
2147 | arc_buf_untransform_in_place(buf, hash_lock); | |
2148 | if (hash_lock != NULL) | |
2149 | mutex_exit(hash_lock); | |
2150 | ||
2151 | /* Compute the hdr's checksum if necessary */ | |
2152 | arc_cksum_compute(buf); | |
2153 | } | |
2154 | ||
2155 | return (0); | |
2156 | } | |
524b4217 DK |
2157 | |
2158 | if (hdr_compressed == compressed) { | |
2aa34383 | 2159 | if (!arc_buf_is_shared(buf)) { |
a6255b7f | 2160 | abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, |
524b4217 | 2161 | arc_buf_size(buf)); |
2aa34383 | 2162 | } |
d3c2ae1c | 2163 | } else { |
524b4217 DK |
2164 | ASSERT(hdr_compressed); |
2165 | ASSERT(!compressed); | |
d3c2ae1c | 2166 | ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr)); |
2aa34383 DK |
2167 | |
2168 | /* | |
524b4217 DK |
2169 | * If the buf is sharing its data with the hdr, unlink it and |
2170 | * allocate a new data buffer for the buf. | |
2aa34383 | 2171 | */ |
524b4217 DK |
2172 | if (arc_buf_is_shared(buf)) { |
2173 | ASSERT(ARC_BUF_COMPRESSED(buf)); | |
2174 | ||
2175 | /* We need to give the buf it's own b_data */ | |
2176 | buf->b_flags &= ~ARC_BUF_FLAG_SHARED; | |
2aa34383 DK |
2177 | buf->b_data = |
2178 | arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); | |
2179 | arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); | |
2180 | ||
524b4217 | 2181 | /* Previously overhead was 0; just add new overhead */ |
2aa34383 | 2182 | ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); |
524b4217 DK |
2183 | } else if (ARC_BUF_COMPRESSED(buf)) { |
2184 | /* We need to reallocate the buf's b_data */ | |
2185 | arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), | |
2186 | buf); | |
2187 | buf->b_data = | |
2188 | arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); | |
2189 | ||
2190 | /* We increased the size of b_data; update overhead */ | |
2191 | ARCSTAT_INCR(arcstat_overhead_size, | |
2192 | HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); | |
2aa34383 DK |
2193 | } |
2194 | ||
524b4217 DK |
2195 | /* |
2196 | * Regardless of the buf's previous compression settings, it | |
2197 | * should not be compressed at the end of this function. | |
2198 | */ | |
2199 | buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; | |
2200 | ||
2201 | /* | |
2202 | * Try copying the data from another buf which already has a | |
2203 | * decompressed version. If that's not possible, it's time to | |
2204 | * bite the bullet and decompress the data from the hdr. | |
2205 | */ | |
2206 | if (arc_buf_try_copy_decompressed_data(buf)) { | |
2207 | /* Skip byteswapping and checksumming (already done) */ | |
2208 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL); | |
2209 | return (0); | |
2210 | } else { | |
b5256303 | 2211 | error = zio_decompress_data(HDR_GET_COMPRESS(hdr), |
a6255b7f | 2212 | hdr->b_l1hdr.b_pabd, buf->b_data, |
524b4217 DK |
2213 | HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); |
2214 | ||
2215 | /* | |
2216 | * Absent hardware errors or software bugs, this should | |
2217 | * be impossible, but log it anyway so we can debug it. | |
2218 | */ | |
2219 | if (error != 0) { | |
2220 | zfs_dbgmsg( | |
2221 | "hdr %p, compress %d, psize %d, lsize %d", | |
b5256303 | 2222 | hdr, arc_hdr_get_compress(hdr), |
524b4217 DK |
2223 | HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); |
2224 | return (SET_ERROR(EIO)); | |
2225 | } | |
d3c2ae1c GW |
2226 | } |
2227 | } | |
524b4217 | 2228 | |
b5256303 | 2229 | byteswap: |
524b4217 | 2230 | /* Byteswap the buf's data if necessary */ |
d3c2ae1c GW |
2231 | if (bswap != DMU_BSWAP_NUMFUNCS) { |
2232 | ASSERT(!HDR_SHARED_DATA(hdr)); | |
2233 | ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); | |
2234 | dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); | |
2235 | } | |
524b4217 DK |
2236 | |
2237 | /* Compute the hdr's checksum if necessary */ | |
d3c2ae1c | 2238 | arc_cksum_compute(buf); |
524b4217 | 2239 | |
d3c2ae1c GW |
2240 | return (0); |
2241 | } | |
2242 | ||
2243 | /* | |
b5256303 TC |
2244 | * If this function is being called to decrypt an encrypted buffer or verify an |
2245 | * authenticated one, the key must be loaded and a mapping must be made | |
2246 | * available in the keystore via spa_keystore_create_mapping() or one of its | |
2247 | * callers. | |
d3c2ae1c | 2248 | */ |
b5256303 TC |
2249 | int |
2250 | arc_untransform(arc_buf_t *buf, spa_t *spa, uint64_t dsobj, boolean_t in_place) | |
d3c2ae1c | 2251 | { |
b5256303 | 2252 | arc_fill_flags_t flags = 0; |
d3c2ae1c | 2253 | |
b5256303 TC |
2254 | if (in_place) |
2255 | flags |= ARC_FILL_IN_PLACE; | |
2256 | ||
2257 | return (arc_buf_fill(buf, spa, dsobj, flags)); | |
d3c2ae1c GW |
2258 | } |
2259 | ||
2260 | /* | |
2261 | * Increment the amount of evictable space in the arc_state_t's refcount. | |
2262 | * We account for the space used by the hdr and the arc buf individually | |
2263 | * so that we can add and remove them from the refcount individually. | |
2264 | */ | |
34dc7c2f | 2265 | static void |
d3c2ae1c GW |
2266 | arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) |
2267 | { | |
2268 | arc_buf_contents_t type = arc_buf_type(hdr); | |
d3c2ae1c GW |
2269 | arc_buf_t *buf; |
2270 | ||
2271 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
2272 | ||
2273 | if (GHOST_STATE(state)) { | |
2274 | ASSERT0(hdr->b_l1hdr.b_bufcnt); | |
2275 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
a6255b7f | 2276 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 2277 | ASSERT(!HDR_HAS_RABD(hdr)); |
2aa34383 DK |
2278 | (void) refcount_add_many(&state->arcs_esize[type], |
2279 | HDR_GET_LSIZE(hdr), hdr); | |
d3c2ae1c GW |
2280 | return; |
2281 | } | |
2282 | ||
2283 | ASSERT(!GHOST_STATE(state)); | |
a6255b7f | 2284 | if (hdr->b_l1hdr.b_pabd != NULL) { |
d3c2ae1c GW |
2285 | (void) refcount_add_many(&state->arcs_esize[type], |
2286 | arc_hdr_size(hdr), hdr); | |
2287 | } | |
b5256303 TC |
2288 | if (HDR_HAS_RABD(hdr)) { |
2289 | (void) refcount_add_many(&state->arcs_esize[type], | |
2290 | HDR_GET_PSIZE(hdr), hdr); | |
2291 | } | |
2292 | ||
d3c2ae1c | 2293 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { |
2aa34383 | 2294 | if (arc_buf_is_shared(buf)) |
d3c2ae1c | 2295 | continue; |
2aa34383 DK |
2296 | (void) refcount_add_many(&state->arcs_esize[type], |
2297 | arc_buf_size(buf), buf); | |
d3c2ae1c GW |
2298 | } |
2299 | } | |
2300 | ||
2301 | /* | |
2302 | * Decrement the amount of evictable space in the arc_state_t's refcount. | |
2303 | * We account for the space used by the hdr and the arc buf individually | |
2304 | * so that we can add and remove them from the refcount individually. | |
2305 | */ | |
2306 | static void | |
2aa34383 | 2307 | arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) |
d3c2ae1c GW |
2308 | { |
2309 | arc_buf_contents_t type = arc_buf_type(hdr); | |
d3c2ae1c GW |
2310 | arc_buf_t *buf; |
2311 | ||
2312 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
2313 | ||
2314 | if (GHOST_STATE(state)) { | |
2315 | ASSERT0(hdr->b_l1hdr.b_bufcnt); | |
2316 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
a6255b7f | 2317 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 2318 | ASSERT(!HDR_HAS_RABD(hdr)); |
d3c2ae1c | 2319 | (void) refcount_remove_many(&state->arcs_esize[type], |
2aa34383 | 2320 | HDR_GET_LSIZE(hdr), hdr); |
d3c2ae1c GW |
2321 | return; |
2322 | } | |
2323 | ||
2324 | ASSERT(!GHOST_STATE(state)); | |
a6255b7f | 2325 | if (hdr->b_l1hdr.b_pabd != NULL) { |
d3c2ae1c GW |
2326 | (void) refcount_remove_many(&state->arcs_esize[type], |
2327 | arc_hdr_size(hdr), hdr); | |
2328 | } | |
b5256303 TC |
2329 | if (HDR_HAS_RABD(hdr)) { |
2330 | (void) refcount_remove_many(&state->arcs_esize[type], | |
2331 | HDR_GET_PSIZE(hdr), hdr); | |
2332 | } | |
2333 | ||
d3c2ae1c | 2334 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { |
2aa34383 | 2335 | if (arc_buf_is_shared(buf)) |
d3c2ae1c | 2336 | continue; |
d3c2ae1c | 2337 | (void) refcount_remove_many(&state->arcs_esize[type], |
2aa34383 | 2338 | arc_buf_size(buf), buf); |
d3c2ae1c GW |
2339 | } |
2340 | } | |
2341 | ||
2342 | /* | |
2343 | * Add a reference to this hdr indicating that someone is actively | |
2344 | * referencing that memory. When the refcount transitions from 0 to 1, | |
2345 | * we remove it from the respective arc_state_t list to indicate that | |
2346 | * it is not evictable. | |
2347 | */ | |
2348 | static void | |
2349 | add_reference(arc_buf_hdr_t *hdr, void *tag) | |
34dc7c2f | 2350 | { |
b9541d6b CW |
2351 | arc_state_t *state; |
2352 | ||
2353 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
d3c2ae1c GW |
2354 | if (!MUTEX_HELD(HDR_LOCK(hdr))) { |
2355 | ASSERT(hdr->b_l1hdr.b_state == arc_anon); | |
2356 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
2357 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
2358 | } | |
34dc7c2f | 2359 | |
b9541d6b CW |
2360 | state = hdr->b_l1hdr.b_state; |
2361 | ||
2362 | if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && | |
2363 | (state != arc_anon)) { | |
2364 | /* We don't use the L2-only state list. */ | |
2365 | if (state != arc_l2c_only) { | |
64fc7762 | 2366 | multilist_remove(state->arcs_list[arc_buf_type(hdr)], |
d3c2ae1c | 2367 | hdr); |
2aa34383 | 2368 | arc_evictable_space_decrement(hdr, state); |
34dc7c2f | 2369 | } |
b128c09f | 2370 | /* remove the prefetch flag if we get a reference */ |
d3c2ae1c | 2371 | arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); |
34dc7c2f BB |
2372 | } |
2373 | } | |
2374 | ||
d3c2ae1c GW |
2375 | /* |
2376 | * Remove a reference from this hdr. When the reference transitions from | |
2377 | * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's | |
2378 | * list making it eligible for eviction. | |
2379 | */ | |
34dc7c2f | 2380 | static int |
2a432414 | 2381 | remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) |
34dc7c2f BB |
2382 | { |
2383 | int cnt; | |
b9541d6b | 2384 | arc_state_t *state = hdr->b_l1hdr.b_state; |
34dc7c2f | 2385 | |
b9541d6b | 2386 | ASSERT(HDR_HAS_L1HDR(hdr)); |
34dc7c2f BB |
2387 | ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); |
2388 | ASSERT(!GHOST_STATE(state)); | |
2389 | ||
b9541d6b CW |
2390 | /* |
2391 | * arc_l2c_only counts as a ghost state so we don't need to explicitly | |
2392 | * check to prevent usage of the arc_l2c_only list. | |
2393 | */ | |
2394 | if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && | |
34dc7c2f | 2395 | (state != arc_anon)) { |
64fc7762 | 2396 | multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr); |
d3c2ae1c GW |
2397 | ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); |
2398 | arc_evictable_space_increment(hdr, state); | |
34dc7c2f BB |
2399 | } |
2400 | return (cnt); | |
2401 | } | |
2402 | ||
e0b0ca98 BB |
2403 | /* |
2404 | * Returns detailed information about a specific arc buffer. When the | |
2405 | * state_index argument is set the function will calculate the arc header | |
2406 | * list position for its arc state. Since this requires a linear traversal | |
2407 | * callers are strongly encourage not to do this. However, it can be helpful | |
2408 | * for targeted analysis so the functionality is provided. | |
2409 | */ | |
2410 | void | |
2411 | arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index) | |
2412 | { | |
2413 | arc_buf_hdr_t *hdr = ab->b_hdr; | |
b9541d6b CW |
2414 | l1arc_buf_hdr_t *l1hdr = NULL; |
2415 | l2arc_buf_hdr_t *l2hdr = NULL; | |
2416 | arc_state_t *state = NULL; | |
2417 | ||
8887c7d7 TC |
2418 | memset(abi, 0, sizeof (arc_buf_info_t)); |
2419 | ||
2420 | if (hdr == NULL) | |
2421 | return; | |
2422 | ||
2423 | abi->abi_flags = hdr->b_flags; | |
2424 | ||
b9541d6b CW |
2425 | if (HDR_HAS_L1HDR(hdr)) { |
2426 | l1hdr = &hdr->b_l1hdr; | |
2427 | state = l1hdr->b_state; | |
2428 | } | |
2429 | if (HDR_HAS_L2HDR(hdr)) | |
2430 | l2hdr = &hdr->b_l2hdr; | |
e0b0ca98 | 2431 | |
b9541d6b | 2432 | if (l1hdr) { |
d3c2ae1c | 2433 | abi->abi_bufcnt = l1hdr->b_bufcnt; |
b9541d6b CW |
2434 | abi->abi_access = l1hdr->b_arc_access; |
2435 | abi->abi_mru_hits = l1hdr->b_mru_hits; | |
2436 | abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits; | |
2437 | abi->abi_mfu_hits = l1hdr->b_mfu_hits; | |
2438 | abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits; | |
2439 | abi->abi_holds = refcount_count(&l1hdr->b_refcnt); | |
2440 | } | |
2441 | ||
2442 | if (l2hdr) { | |
2443 | abi->abi_l2arc_dattr = l2hdr->b_daddr; | |
b9541d6b CW |
2444 | abi->abi_l2arc_hits = l2hdr->b_hits; |
2445 | } | |
2446 | ||
e0b0ca98 | 2447 | abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; |
b9541d6b | 2448 | abi->abi_state_contents = arc_buf_type(hdr); |
d3c2ae1c | 2449 | abi->abi_size = arc_hdr_size(hdr); |
e0b0ca98 BB |
2450 | } |
2451 | ||
34dc7c2f | 2452 | /* |
ca0bf58d | 2453 | * Move the supplied buffer to the indicated state. The hash lock |
34dc7c2f BB |
2454 | * for the buffer must be held by the caller. |
2455 | */ | |
2456 | static void | |
2a432414 GW |
2457 | arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, |
2458 | kmutex_t *hash_lock) | |
34dc7c2f | 2459 | { |
b9541d6b CW |
2460 | arc_state_t *old_state; |
2461 | int64_t refcnt; | |
d3c2ae1c GW |
2462 | uint32_t bufcnt; |
2463 | boolean_t update_old, update_new; | |
b9541d6b CW |
2464 | arc_buf_contents_t buftype = arc_buf_type(hdr); |
2465 | ||
2466 | /* | |
2467 | * We almost always have an L1 hdr here, since we call arc_hdr_realloc() | |
2468 | * in arc_read() when bringing a buffer out of the L2ARC. However, the | |
2469 | * L1 hdr doesn't always exist when we change state to arc_anon before | |
2470 | * destroying a header, in which case reallocating to add the L1 hdr is | |
2471 | * pointless. | |
2472 | */ | |
2473 | if (HDR_HAS_L1HDR(hdr)) { | |
2474 | old_state = hdr->b_l1hdr.b_state; | |
2475 | refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); | |
d3c2ae1c | 2476 | bufcnt = hdr->b_l1hdr.b_bufcnt; |
b5256303 TC |
2477 | update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL || |
2478 | HDR_HAS_RABD(hdr)); | |
b9541d6b CW |
2479 | } else { |
2480 | old_state = arc_l2c_only; | |
2481 | refcnt = 0; | |
d3c2ae1c GW |
2482 | bufcnt = 0; |
2483 | update_old = B_FALSE; | |
b9541d6b | 2484 | } |
d3c2ae1c | 2485 | update_new = update_old; |
34dc7c2f BB |
2486 | |
2487 | ASSERT(MUTEX_HELD(hash_lock)); | |
e8b96c60 | 2488 | ASSERT3P(new_state, !=, old_state); |
d3c2ae1c GW |
2489 | ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); |
2490 | ASSERT(old_state != arc_anon || bufcnt <= 1); | |
34dc7c2f BB |
2491 | |
2492 | /* | |
2493 | * If this buffer is evictable, transfer it from the | |
2494 | * old state list to the new state list. | |
2495 | */ | |
2496 | if (refcnt == 0) { | |
b9541d6b | 2497 | if (old_state != arc_anon && old_state != arc_l2c_only) { |
b9541d6b | 2498 | ASSERT(HDR_HAS_L1HDR(hdr)); |
64fc7762 | 2499 | multilist_remove(old_state->arcs_list[buftype], hdr); |
34dc7c2f | 2500 | |
d3c2ae1c GW |
2501 | if (GHOST_STATE(old_state)) { |
2502 | ASSERT0(bufcnt); | |
2503 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
2504 | update_old = B_TRUE; | |
34dc7c2f | 2505 | } |
2aa34383 | 2506 | arc_evictable_space_decrement(hdr, old_state); |
34dc7c2f | 2507 | } |
b9541d6b | 2508 | if (new_state != arc_anon && new_state != arc_l2c_only) { |
b9541d6b CW |
2509 | /* |
2510 | * An L1 header always exists here, since if we're | |
2511 | * moving to some L1-cached state (i.e. not l2c_only or | |
2512 | * anonymous), we realloc the header to add an L1hdr | |
2513 | * beforehand. | |
2514 | */ | |
2515 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
64fc7762 | 2516 | multilist_insert(new_state->arcs_list[buftype], hdr); |
34dc7c2f | 2517 | |
34dc7c2f | 2518 | if (GHOST_STATE(new_state)) { |
d3c2ae1c GW |
2519 | ASSERT0(bufcnt); |
2520 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
2521 | update_new = B_TRUE; | |
34dc7c2f | 2522 | } |
d3c2ae1c | 2523 | arc_evictable_space_increment(hdr, new_state); |
34dc7c2f BB |
2524 | } |
2525 | } | |
2526 | ||
d3c2ae1c | 2527 | ASSERT(!HDR_EMPTY(hdr)); |
2a432414 GW |
2528 | if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) |
2529 | buf_hash_remove(hdr); | |
34dc7c2f | 2530 | |
b9541d6b | 2531 | /* adjust state sizes (ignore arc_l2c_only) */ |
36da08ef | 2532 | |
d3c2ae1c | 2533 | if (update_new && new_state != arc_l2c_only) { |
36da08ef PS |
2534 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2535 | if (GHOST_STATE(new_state)) { | |
d3c2ae1c | 2536 | ASSERT0(bufcnt); |
36da08ef PS |
2537 | |
2538 | /* | |
d3c2ae1c | 2539 | * When moving a header to a ghost state, we first |
36da08ef | 2540 | * remove all arc buffers. Thus, we'll have a |
d3c2ae1c | 2541 | * bufcnt of zero, and no arc buffer to use for |
36da08ef PS |
2542 | * the reference. As a result, we use the arc |
2543 | * header pointer for the reference. | |
2544 | */ | |
2545 | (void) refcount_add_many(&new_state->arcs_size, | |
d3c2ae1c | 2546 | HDR_GET_LSIZE(hdr), hdr); |
a6255b7f | 2547 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 2548 | ASSERT(!HDR_HAS_RABD(hdr)); |
36da08ef PS |
2549 | } else { |
2550 | arc_buf_t *buf; | |
d3c2ae1c | 2551 | uint32_t buffers = 0; |
36da08ef PS |
2552 | |
2553 | /* | |
2554 | * Each individual buffer holds a unique reference, | |
2555 | * thus we must remove each of these references one | |
2556 | * at a time. | |
2557 | */ | |
2558 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; | |
2559 | buf = buf->b_next) { | |
d3c2ae1c GW |
2560 | ASSERT3U(bufcnt, !=, 0); |
2561 | buffers++; | |
2562 | ||
2563 | /* | |
2564 | * When the arc_buf_t is sharing the data | |
2565 | * block with the hdr, the owner of the | |
2566 | * reference belongs to the hdr. Only | |
2567 | * add to the refcount if the arc_buf_t is | |
2568 | * not shared. | |
2569 | */ | |
2aa34383 | 2570 | if (arc_buf_is_shared(buf)) |
d3c2ae1c | 2571 | continue; |
d3c2ae1c | 2572 | |
36da08ef | 2573 | (void) refcount_add_many(&new_state->arcs_size, |
2aa34383 | 2574 | arc_buf_size(buf), buf); |
d3c2ae1c GW |
2575 | } |
2576 | ASSERT3U(bufcnt, ==, buffers); | |
2577 | ||
a6255b7f | 2578 | if (hdr->b_l1hdr.b_pabd != NULL) { |
d3c2ae1c GW |
2579 | (void) refcount_add_many(&new_state->arcs_size, |
2580 | arc_hdr_size(hdr), hdr); | |
b5256303 TC |
2581 | } |
2582 | ||
2583 | if (HDR_HAS_RABD(hdr)) { | |
2584 | (void) refcount_add_many(&new_state->arcs_size, | |
2585 | HDR_GET_PSIZE(hdr), hdr); | |
36da08ef PS |
2586 | } |
2587 | } | |
2588 | } | |
2589 | ||
d3c2ae1c | 2590 | if (update_old && old_state != arc_l2c_only) { |
36da08ef PS |
2591 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2592 | if (GHOST_STATE(old_state)) { | |
d3c2ae1c | 2593 | ASSERT0(bufcnt); |
a6255b7f | 2594 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 2595 | ASSERT(!HDR_HAS_RABD(hdr)); |
d3c2ae1c | 2596 | |
36da08ef PS |
2597 | /* |
2598 | * When moving a header off of a ghost state, | |
d3c2ae1c GW |
2599 | * the header will not contain any arc buffers. |
2600 | * We use the arc header pointer for the reference | |
2601 | * which is exactly what we did when we put the | |
2602 | * header on the ghost state. | |
36da08ef PS |
2603 | */ |
2604 | ||
36da08ef | 2605 | (void) refcount_remove_many(&old_state->arcs_size, |
d3c2ae1c | 2606 | HDR_GET_LSIZE(hdr), hdr); |
36da08ef PS |
2607 | } else { |
2608 | arc_buf_t *buf; | |
d3c2ae1c | 2609 | uint32_t buffers = 0; |
36da08ef PS |
2610 | |
2611 | /* | |
2612 | * Each individual buffer holds a unique reference, | |
2613 | * thus we must remove each of these references one | |
2614 | * at a time. | |
2615 | */ | |
2616 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; | |
2617 | buf = buf->b_next) { | |
d3c2ae1c GW |
2618 | ASSERT3U(bufcnt, !=, 0); |
2619 | buffers++; | |
2620 | ||
2621 | /* | |
2622 | * When the arc_buf_t is sharing the data | |
2623 | * block with the hdr, the owner of the | |
2624 | * reference belongs to the hdr. Only | |
2625 | * add to the refcount if the arc_buf_t is | |
2626 | * not shared. | |
2627 | */ | |
2aa34383 | 2628 | if (arc_buf_is_shared(buf)) |
d3c2ae1c | 2629 | continue; |
d3c2ae1c | 2630 | |
36da08ef | 2631 | (void) refcount_remove_many( |
2aa34383 | 2632 | &old_state->arcs_size, arc_buf_size(buf), |
d3c2ae1c | 2633 | buf); |
36da08ef | 2634 | } |
d3c2ae1c | 2635 | ASSERT3U(bufcnt, ==, buffers); |
b5256303 TC |
2636 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || |
2637 | HDR_HAS_RABD(hdr)); | |
2638 | ||
2639 | if (hdr->b_l1hdr.b_pabd != NULL) { | |
2640 | (void) refcount_remove_many( | |
2641 | &old_state->arcs_size, arc_hdr_size(hdr), | |
2642 | hdr); | |
2643 | } | |
2644 | ||
2645 | if (HDR_HAS_RABD(hdr)) { | |
2646 | (void) refcount_remove_many( | |
2647 | &old_state->arcs_size, HDR_GET_PSIZE(hdr), | |
2648 | hdr); | |
2649 | } | |
36da08ef | 2650 | } |
34dc7c2f | 2651 | } |
36da08ef | 2652 | |
b9541d6b CW |
2653 | if (HDR_HAS_L1HDR(hdr)) |
2654 | hdr->b_l1hdr.b_state = new_state; | |
34dc7c2f | 2655 | |
b9541d6b CW |
2656 | /* |
2657 | * L2 headers should never be on the L2 state list since they don't | |
2658 | * have L1 headers allocated. | |
2659 | */ | |
64fc7762 MA |
2660 | ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && |
2661 | multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); | |
34dc7c2f BB |
2662 | } |
2663 | ||
2664 | void | |
d164b209 | 2665 | arc_space_consume(uint64_t space, arc_space_type_t type) |
34dc7c2f | 2666 | { |
d164b209 BB |
2667 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
2668 | ||
2669 | switch (type) { | |
e75c13c3 BB |
2670 | default: |
2671 | break; | |
d164b209 BB |
2672 | case ARC_SPACE_DATA: |
2673 | ARCSTAT_INCR(arcstat_data_size, space); | |
2674 | break; | |
cc7f677c | 2675 | case ARC_SPACE_META: |
500445c0 | 2676 | ARCSTAT_INCR(arcstat_metadata_size, space); |
cc7f677c | 2677 | break; |
25458cbe TC |
2678 | case ARC_SPACE_BONUS: |
2679 | ARCSTAT_INCR(arcstat_bonus_size, space); | |
2680 | break; | |
2681 | case ARC_SPACE_DNODE: | |
2682 | ARCSTAT_INCR(arcstat_dnode_size, space); | |
2683 | break; | |
2684 | case ARC_SPACE_DBUF: | |
2685 | ARCSTAT_INCR(arcstat_dbuf_size, space); | |
d164b209 BB |
2686 | break; |
2687 | case ARC_SPACE_HDRS: | |
2688 | ARCSTAT_INCR(arcstat_hdr_size, space); | |
2689 | break; | |
2690 | case ARC_SPACE_L2HDRS: | |
2691 | ARCSTAT_INCR(arcstat_l2_hdr_size, space); | |
2692 | break; | |
2693 | } | |
2694 | ||
500445c0 | 2695 | if (type != ARC_SPACE_DATA) |
cc7f677c PS |
2696 | ARCSTAT_INCR(arcstat_meta_used, space); |
2697 | ||
34dc7c2f BB |
2698 | atomic_add_64(&arc_size, space); |
2699 | } | |
2700 | ||
2701 | void | |
d164b209 | 2702 | arc_space_return(uint64_t space, arc_space_type_t type) |
34dc7c2f | 2703 | { |
d164b209 BB |
2704 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
2705 | ||
2706 | switch (type) { | |
e75c13c3 BB |
2707 | default: |
2708 | break; | |
d164b209 BB |
2709 | case ARC_SPACE_DATA: |
2710 | ARCSTAT_INCR(arcstat_data_size, -space); | |
2711 | break; | |
cc7f677c | 2712 | case ARC_SPACE_META: |
500445c0 | 2713 | ARCSTAT_INCR(arcstat_metadata_size, -space); |
cc7f677c | 2714 | break; |
25458cbe TC |
2715 | case ARC_SPACE_BONUS: |
2716 | ARCSTAT_INCR(arcstat_bonus_size, -space); | |
2717 | break; | |
2718 | case ARC_SPACE_DNODE: | |
2719 | ARCSTAT_INCR(arcstat_dnode_size, -space); | |
2720 | break; | |
2721 | case ARC_SPACE_DBUF: | |
2722 | ARCSTAT_INCR(arcstat_dbuf_size, -space); | |
d164b209 BB |
2723 | break; |
2724 | case ARC_SPACE_HDRS: | |
2725 | ARCSTAT_INCR(arcstat_hdr_size, -space); | |
2726 | break; | |
2727 | case ARC_SPACE_L2HDRS: | |
2728 | ARCSTAT_INCR(arcstat_l2_hdr_size, -space); | |
2729 | break; | |
2730 | } | |
2731 | ||
cc7f677c PS |
2732 | if (type != ARC_SPACE_DATA) { |
2733 | ASSERT(arc_meta_used >= space); | |
500445c0 PS |
2734 | if (arc_meta_max < arc_meta_used) |
2735 | arc_meta_max = arc_meta_used; | |
cc7f677c PS |
2736 | ARCSTAT_INCR(arcstat_meta_used, -space); |
2737 | } | |
2738 | ||
34dc7c2f BB |
2739 | ASSERT(arc_size >= space); |
2740 | atomic_add_64(&arc_size, -space); | |
2741 | } | |
2742 | ||
d3c2ae1c | 2743 | /* |
524b4217 | 2744 | * Given a hdr and a buf, returns whether that buf can share its b_data buffer |
a6255b7f | 2745 | * with the hdr's b_pabd. |
d3c2ae1c | 2746 | */ |
524b4217 DK |
2747 | static boolean_t |
2748 | arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) | |
2749 | { | |
524b4217 DK |
2750 | /* |
2751 | * The criteria for sharing a hdr's data are: | |
b5256303 TC |
2752 | * 1. the buffer is not encrypted |
2753 | * 2. the hdr's compression matches the buf's compression | |
2754 | * 3. the hdr doesn't need to be byteswapped | |
2755 | * 4. the hdr isn't already being shared | |
2756 | * 5. the buf is either compressed or it is the last buf in the hdr list | |
524b4217 | 2757 | * |
b5256303 | 2758 | * Criterion #5 maintains the invariant that shared uncompressed |
524b4217 DK |
2759 | * bufs must be the final buf in the hdr's b_buf list. Reading this, you |
2760 | * might ask, "if a compressed buf is allocated first, won't that be the | |
2761 | * last thing in the list?", but in that case it's impossible to create | |
2762 | * a shared uncompressed buf anyway (because the hdr must be compressed | |
2763 | * to have the compressed buf). You might also think that #3 is | |
2764 | * sufficient to make this guarantee, however it's possible | |
2765 | * (specifically in the rare L2ARC write race mentioned in | |
2766 | * arc_buf_alloc_impl()) there will be an existing uncompressed buf that | |
2767 | * is sharable, but wasn't at the time of its allocation. Rather than | |
2768 | * allow a new shared uncompressed buf to be created and then shuffle | |
2769 | * the list around to make it the last element, this simply disallows | |
2770 | * sharing if the new buf isn't the first to be added. | |
2771 | */ | |
2772 | ASSERT3P(buf->b_hdr, ==, hdr); | |
b5256303 TC |
2773 | boolean_t hdr_compressed = |
2774 | arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF; | |
a7004725 | 2775 | boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0; |
b5256303 TC |
2776 | return (!ARC_BUF_ENCRYPTED(buf) && |
2777 | buf_compressed == hdr_compressed && | |
524b4217 DK |
2778 | hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && |
2779 | !HDR_SHARED_DATA(hdr) && | |
2780 | (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf))); | |
2781 | } | |
2782 | ||
2783 | /* | |
2784 | * Allocate a buf for this hdr. If you care about the data that's in the hdr, | |
2785 | * or if you want a compressed buffer, pass those flags in. Returns 0 if the | |
2786 | * copy was made successfully, or an error code otherwise. | |
2787 | */ | |
2788 | static int | |
b5256303 TC |
2789 | arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj, void *tag, |
2790 | boolean_t encrypted, boolean_t compressed, boolean_t noauth, | |
524b4217 | 2791 | boolean_t fill, arc_buf_t **ret) |
34dc7c2f | 2792 | { |
34dc7c2f | 2793 | arc_buf_t *buf; |
b5256303 | 2794 | arc_fill_flags_t flags = ARC_FILL_LOCKED; |
34dc7c2f | 2795 | |
d3c2ae1c GW |
2796 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2797 | ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); | |
2798 | VERIFY(hdr->b_type == ARC_BUFC_DATA || | |
2799 | hdr->b_type == ARC_BUFC_METADATA); | |
524b4217 DK |
2800 | ASSERT3P(ret, !=, NULL); |
2801 | ASSERT3P(*ret, ==, NULL); | |
b5256303 | 2802 | IMPLY(encrypted, compressed); |
d3c2ae1c | 2803 | |
b9541d6b CW |
2804 | hdr->b_l1hdr.b_mru_hits = 0; |
2805 | hdr->b_l1hdr.b_mru_ghost_hits = 0; | |
2806 | hdr->b_l1hdr.b_mfu_hits = 0; | |
2807 | hdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
2808 | hdr->b_l1hdr.b_l2_hits = 0; | |
2809 | ||
524b4217 | 2810 | buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
34dc7c2f BB |
2811 | buf->b_hdr = hdr; |
2812 | buf->b_data = NULL; | |
2aa34383 | 2813 | buf->b_next = hdr->b_l1hdr.b_buf; |
524b4217 | 2814 | buf->b_flags = 0; |
b9541d6b | 2815 | |
d3c2ae1c GW |
2816 | add_reference(hdr, tag); |
2817 | ||
2818 | /* | |
2819 | * We're about to change the hdr's b_flags. We must either | |
2820 | * hold the hash_lock or be undiscoverable. | |
2821 | */ | |
2822 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
2823 | ||
2824 | /* | |
524b4217 | 2825 | * Only honor requests for compressed bufs if the hdr is actually |
b5256303 TC |
2826 | * compressed. This must be overriden if the buffer is encrypted since |
2827 | * encrypted buffers cannot be decompressed. | |
524b4217 | 2828 | */ |
b5256303 TC |
2829 | if (encrypted) { |
2830 | buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; | |
2831 | buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED; | |
2832 | flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED; | |
2833 | } else if (compressed && | |
2834 | arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { | |
524b4217 | 2835 | buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; |
b5256303 TC |
2836 | flags |= ARC_FILL_COMPRESSED; |
2837 | } | |
2838 | ||
2839 | if (noauth) { | |
2840 | ASSERT0(encrypted); | |
2841 | flags |= ARC_FILL_NOAUTH; | |
2842 | } | |
524b4217 | 2843 | |
524b4217 DK |
2844 | /* |
2845 | * If the hdr's data can be shared then we share the data buffer and | |
2846 | * set the appropriate bit in the hdr's b_flags to indicate the hdr is | |
2aa34383 | 2847 | * allocate a new buffer to store the buf's data. |
524b4217 | 2848 | * |
a6255b7f DQ |
2849 | * There are two additional restrictions here because we're sharing |
2850 | * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be | |
2851 | * actively involved in an L2ARC write, because if this buf is used by | |
2852 | * an arc_write() then the hdr's data buffer will be released when the | |
524b4217 | 2853 | * write completes, even though the L2ARC write might still be using it. |
a6255b7f DQ |
2854 | * Second, the hdr's ABD must be linear so that the buf's user doesn't |
2855 | * need to be ABD-aware. | |
d3c2ae1c | 2856 | */ |
a7004725 | 2857 | boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) && |
b5256303 | 2858 | hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(hdr->b_l1hdr.b_pabd); |
524b4217 DK |
2859 | |
2860 | /* Set up b_data and sharing */ | |
2861 | if (can_share) { | |
a6255b7f | 2862 | buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); |
524b4217 | 2863 | buf->b_flags |= ARC_BUF_FLAG_SHARED; |
d3c2ae1c GW |
2864 | arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); |
2865 | } else { | |
524b4217 DK |
2866 | buf->b_data = |
2867 | arc_get_data_buf(hdr, arc_buf_size(buf), buf); | |
2868 | ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); | |
d3c2ae1c GW |
2869 | } |
2870 | VERIFY3P(buf->b_data, !=, NULL); | |
b9541d6b CW |
2871 | |
2872 | hdr->b_l1hdr.b_buf = buf; | |
d3c2ae1c | 2873 | hdr->b_l1hdr.b_bufcnt += 1; |
b5256303 TC |
2874 | if (encrypted) |
2875 | hdr->b_crypt_hdr.b_ebufcnt += 1; | |
b9541d6b | 2876 | |
524b4217 DK |
2877 | /* |
2878 | * If the user wants the data from the hdr, we need to either copy or | |
2879 | * decompress the data. | |
2880 | */ | |
2881 | if (fill) { | |
b5256303 | 2882 | return (arc_buf_fill(buf, spa, dsobj, flags)); |
524b4217 | 2883 | } |
d3c2ae1c | 2884 | |
524b4217 | 2885 | return (0); |
34dc7c2f BB |
2886 | } |
2887 | ||
9babb374 BB |
2888 | static char *arc_onloan_tag = "onloan"; |
2889 | ||
a7004725 DK |
2890 | static inline void |
2891 | arc_loaned_bytes_update(int64_t delta) | |
2892 | { | |
2893 | atomic_add_64(&arc_loaned_bytes, delta); | |
2894 | ||
2895 | /* assert that it did not wrap around */ | |
2896 | ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); | |
2897 | } | |
2898 | ||
9babb374 BB |
2899 | /* |
2900 | * Loan out an anonymous arc buffer. Loaned buffers are not counted as in | |
2901 | * flight data by arc_tempreserve_space() until they are "returned". Loaned | |
2902 | * buffers must be returned to the arc before they can be used by the DMU or | |
2903 | * freed. | |
2904 | */ | |
2905 | arc_buf_t * | |
2aa34383 | 2906 | arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size) |
9babb374 | 2907 | { |
2aa34383 DK |
2908 | arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag, |
2909 | is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size); | |
9babb374 | 2910 | |
a7004725 DK |
2911 | arc_loaned_bytes_update(size); |
2912 | ||
9babb374 BB |
2913 | return (buf); |
2914 | } | |
2915 | ||
2aa34383 DK |
2916 | arc_buf_t * |
2917 | arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize, | |
2918 | enum zio_compress compression_type) | |
2919 | { | |
2920 | arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag, | |
2921 | psize, lsize, compression_type); | |
2922 | ||
a7004725 DK |
2923 | arc_loaned_bytes_update(psize); |
2924 | ||
2aa34383 DK |
2925 | return (buf); |
2926 | } | |
2927 | ||
b5256303 TC |
2928 | arc_buf_t * |
2929 | arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder, | |
2930 | const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, | |
2931 | dmu_object_type_t ot, uint64_t psize, uint64_t lsize, | |
2932 | enum zio_compress compression_type) | |
2933 | { | |
2934 | arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj, | |
2935 | byteorder, salt, iv, mac, ot, psize, lsize, compression_type); | |
2936 | ||
2937 | atomic_add_64(&arc_loaned_bytes, psize); | |
2938 | return (buf); | |
2939 | } | |
2940 | ||
2aa34383 | 2941 | |
9babb374 BB |
2942 | /* |
2943 | * Return a loaned arc buffer to the arc. | |
2944 | */ | |
2945 | void | |
2946 | arc_return_buf(arc_buf_t *buf, void *tag) | |
2947 | { | |
2948 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2949 | ||
d3c2ae1c | 2950 | ASSERT3P(buf->b_data, !=, NULL); |
b9541d6b CW |
2951 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2952 | (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); | |
2953 | (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); | |
9babb374 | 2954 | |
a7004725 | 2955 | arc_loaned_bytes_update(-arc_buf_size(buf)); |
9babb374 BB |
2956 | } |
2957 | ||
428870ff BB |
2958 | /* Detach an arc_buf from a dbuf (tag) */ |
2959 | void | |
2960 | arc_loan_inuse_buf(arc_buf_t *buf, void *tag) | |
2961 | { | |
b9541d6b | 2962 | arc_buf_hdr_t *hdr = buf->b_hdr; |
428870ff | 2963 | |
d3c2ae1c | 2964 | ASSERT3P(buf->b_data, !=, NULL); |
b9541d6b CW |
2965 | ASSERT(HDR_HAS_L1HDR(hdr)); |
2966 | (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); | |
2967 | (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); | |
428870ff | 2968 | |
a7004725 | 2969 | arc_loaned_bytes_update(arc_buf_size(buf)); |
428870ff BB |
2970 | } |
2971 | ||
d3c2ae1c | 2972 | static void |
a6255b7f | 2973 | l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type) |
34dc7c2f | 2974 | { |
d3c2ae1c | 2975 | l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); |
34dc7c2f | 2976 | |
a6255b7f | 2977 | df->l2df_abd = abd; |
d3c2ae1c GW |
2978 | df->l2df_size = size; |
2979 | df->l2df_type = type; | |
2980 | mutex_enter(&l2arc_free_on_write_mtx); | |
2981 | list_insert_head(l2arc_free_on_write, df); | |
2982 | mutex_exit(&l2arc_free_on_write_mtx); | |
2983 | } | |
428870ff | 2984 | |
d3c2ae1c | 2985 | static void |
b5256303 | 2986 | arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata) |
d3c2ae1c GW |
2987 | { |
2988 | arc_state_t *state = hdr->b_l1hdr.b_state; | |
2989 | arc_buf_contents_t type = arc_buf_type(hdr); | |
b5256303 | 2990 | uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); |
1eb5bfa3 | 2991 | |
d3c2ae1c GW |
2992 | /* protected by hash lock, if in the hash table */ |
2993 | if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { | |
2994 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
2995 | ASSERT(state != arc_anon && state != arc_l2c_only); | |
2996 | ||
2997 | (void) refcount_remove_many(&state->arcs_esize[type], | |
2998 | size, hdr); | |
1eb5bfa3 | 2999 | } |
d3c2ae1c | 3000 | (void) refcount_remove_many(&state->arcs_size, size, hdr); |
423e7b62 AG |
3001 | if (type == ARC_BUFC_METADATA) { |
3002 | arc_space_return(size, ARC_SPACE_META); | |
3003 | } else { | |
3004 | ASSERT(type == ARC_BUFC_DATA); | |
3005 | arc_space_return(size, ARC_SPACE_DATA); | |
3006 | } | |
d3c2ae1c | 3007 | |
b5256303 TC |
3008 | if (free_rdata) { |
3009 | l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type); | |
3010 | } else { | |
3011 | l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); | |
3012 | } | |
34dc7c2f BB |
3013 | } |
3014 | ||
d3c2ae1c GW |
3015 | /* |
3016 | * Share the arc_buf_t's data with the hdr. Whenever we are sharing the | |
3017 | * data buffer, we transfer the refcount ownership to the hdr and update | |
3018 | * the appropriate kstats. | |
3019 | */ | |
3020 | static void | |
3021 | arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) | |
34dc7c2f | 3022 | { |
524b4217 | 3023 | ASSERT(arc_can_share(hdr, buf)); |
a6255b7f | 3024 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 3025 | ASSERT(!ARC_BUF_ENCRYPTED(buf)); |
d3c2ae1c | 3026 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); |
34dc7c2f BB |
3027 | |
3028 | /* | |
d3c2ae1c GW |
3029 | * Start sharing the data buffer. We transfer the |
3030 | * refcount ownership to the hdr since it always owns | |
3031 | * the refcount whenever an arc_buf_t is shared. | |
34dc7c2f | 3032 | */ |
d3c2ae1c | 3033 | refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr); |
a6255b7f DQ |
3034 | hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); |
3035 | abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, | |
3036 | HDR_ISTYPE_METADATA(hdr)); | |
d3c2ae1c | 3037 | arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); |
524b4217 | 3038 | buf->b_flags |= ARC_BUF_FLAG_SHARED; |
34dc7c2f | 3039 | |
d3c2ae1c GW |
3040 | /* |
3041 | * Since we've transferred ownership to the hdr we need | |
3042 | * to increment its compressed and uncompressed kstats and | |
3043 | * decrement the overhead size. | |
3044 | */ | |
3045 | ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); | |
3046 | ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); | |
2aa34383 | 3047 | ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); |
34dc7c2f BB |
3048 | } |
3049 | ||
ca0bf58d | 3050 | static void |
d3c2ae1c | 3051 | arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) |
ca0bf58d | 3052 | { |
d3c2ae1c | 3053 | ASSERT(arc_buf_is_shared(buf)); |
a6255b7f | 3054 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); |
d3c2ae1c | 3055 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); |
ca0bf58d | 3056 | |
d3c2ae1c GW |
3057 | /* |
3058 | * We are no longer sharing this buffer so we need | |
3059 | * to transfer its ownership to the rightful owner. | |
3060 | */ | |
3061 | refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf); | |
3062 | arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); | |
a6255b7f DQ |
3063 | abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); |
3064 | abd_put(hdr->b_l1hdr.b_pabd); | |
3065 | hdr->b_l1hdr.b_pabd = NULL; | |
524b4217 | 3066 | buf->b_flags &= ~ARC_BUF_FLAG_SHARED; |
d3c2ae1c GW |
3067 | |
3068 | /* | |
3069 | * Since the buffer is no longer shared between | |
3070 | * the arc buf and the hdr, count it as overhead. | |
3071 | */ | |
3072 | ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); | |
3073 | ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); | |
2aa34383 | 3074 | ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); |
ca0bf58d PS |
3075 | } |
3076 | ||
34dc7c2f | 3077 | /* |
2aa34383 DK |
3078 | * Remove an arc_buf_t from the hdr's buf list and return the last |
3079 | * arc_buf_t on the list. If no buffers remain on the list then return | |
3080 | * NULL. | |
3081 | */ | |
3082 | static arc_buf_t * | |
3083 | arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) | |
3084 | { | |
2aa34383 DK |
3085 | ASSERT(HDR_HAS_L1HDR(hdr)); |
3086 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
3087 | ||
a7004725 DK |
3088 | arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; |
3089 | arc_buf_t *lastbuf = NULL; | |
3090 | ||
2aa34383 DK |
3091 | /* |
3092 | * Remove the buf from the hdr list and locate the last | |
3093 | * remaining buffer on the list. | |
3094 | */ | |
3095 | while (*bufp != NULL) { | |
3096 | if (*bufp == buf) | |
3097 | *bufp = buf->b_next; | |
3098 | ||
3099 | /* | |
3100 | * If we've removed a buffer in the middle of | |
3101 | * the list then update the lastbuf and update | |
3102 | * bufp. | |
3103 | */ | |
3104 | if (*bufp != NULL) { | |
3105 | lastbuf = *bufp; | |
3106 | bufp = &(*bufp)->b_next; | |
3107 | } | |
3108 | } | |
3109 | buf->b_next = NULL; | |
3110 | ASSERT3P(lastbuf, !=, buf); | |
3111 | IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL); | |
3112 | IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL); | |
3113 | IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf)); | |
3114 | ||
3115 | return (lastbuf); | |
3116 | } | |
3117 | ||
3118 | /* | |
3119 | * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's | |
3120 | * list and free it. | |
34dc7c2f BB |
3121 | */ |
3122 | static void | |
2aa34383 | 3123 | arc_buf_destroy_impl(arc_buf_t *buf) |
34dc7c2f | 3124 | { |
498877ba | 3125 | arc_buf_hdr_t *hdr = buf->b_hdr; |
ca0bf58d PS |
3126 | |
3127 | /* | |
524b4217 DK |
3128 | * Free up the data associated with the buf but only if we're not |
3129 | * sharing this with the hdr. If we are sharing it with the hdr, the | |
3130 | * hdr is responsible for doing the free. | |
ca0bf58d | 3131 | */ |
d3c2ae1c GW |
3132 | if (buf->b_data != NULL) { |
3133 | /* | |
3134 | * We're about to change the hdr's b_flags. We must either | |
3135 | * hold the hash_lock or be undiscoverable. | |
3136 | */ | |
3137 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); | |
3138 | ||
524b4217 | 3139 | arc_cksum_verify(buf); |
d3c2ae1c GW |
3140 | arc_buf_unwatch(buf); |
3141 | ||
2aa34383 | 3142 | if (arc_buf_is_shared(buf)) { |
d3c2ae1c GW |
3143 | arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); |
3144 | } else { | |
2aa34383 | 3145 | uint64_t size = arc_buf_size(buf); |
d3c2ae1c GW |
3146 | arc_free_data_buf(hdr, buf->b_data, size, buf); |
3147 | ARCSTAT_INCR(arcstat_overhead_size, -size); | |
3148 | } | |
3149 | buf->b_data = NULL; | |
3150 | ||
3151 | ASSERT(hdr->b_l1hdr.b_bufcnt > 0); | |
3152 | hdr->b_l1hdr.b_bufcnt -= 1; | |
b5256303 TC |
3153 | |
3154 | if (ARC_BUF_ENCRYPTED(buf)) | |
3155 | hdr->b_crypt_hdr.b_ebufcnt -= 1; | |
3156 | ||
3157 | /* | |
3158 | * if we have no more encrypted buffers and we've already | |
3159 | * gotten a copy of the decrypted data we can free b_rabd to | |
3160 | * save some space. | |
3161 | */ | |
3162 | if (hdr->b_crypt_hdr.b_ebufcnt == 0 && HDR_HAS_RABD(hdr) && | |
3163 | hdr->b_l1hdr.b_pabd != NULL) | |
3164 | arc_hdr_free_abd(hdr, B_TRUE); | |
d3c2ae1c GW |
3165 | } |
3166 | ||
a7004725 | 3167 | arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); |
d3c2ae1c | 3168 | |
524b4217 | 3169 | if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) { |
2aa34383 | 3170 | /* |
524b4217 | 3171 | * If the current arc_buf_t is sharing its data buffer with the |
a6255b7f | 3172 | * hdr, then reassign the hdr's b_pabd to share it with the new |
524b4217 DK |
3173 | * buffer at the end of the list. The shared buffer is always |
3174 | * the last one on the hdr's buffer list. | |
3175 | * | |
3176 | * There is an equivalent case for compressed bufs, but since | |
3177 | * they aren't guaranteed to be the last buf in the list and | |
3178 | * that is an exceedingly rare case, we just allow that space be | |
b5256303 TC |
3179 | * wasted temporarily. We must also be careful not to share |
3180 | * encrypted buffers, since they cannot be shared. | |
2aa34383 | 3181 | */ |
b5256303 | 3182 | if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) { |
524b4217 | 3183 | /* Only one buf can be shared at once */ |
2aa34383 | 3184 | VERIFY(!arc_buf_is_shared(lastbuf)); |
524b4217 DK |
3185 | /* hdr is uncompressed so can't have compressed buf */ |
3186 | VERIFY(!ARC_BUF_COMPRESSED(lastbuf)); | |
d3c2ae1c | 3187 | |
a6255b7f | 3188 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); |
b5256303 | 3189 | arc_hdr_free_abd(hdr, B_FALSE); |
d3c2ae1c | 3190 | |
2aa34383 DK |
3191 | /* |
3192 | * We must setup a new shared block between the | |
3193 | * last buffer and the hdr. The data would have | |
3194 | * been allocated by the arc buf so we need to transfer | |
3195 | * ownership to the hdr since it's now being shared. | |
3196 | */ | |
3197 | arc_share_buf(hdr, lastbuf); | |
3198 | } | |
3199 | } else if (HDR_SHARED_DATA(hdr)) { | |
d3c2ae1c | 3200 | /* |
2aa34383 DK |
3201 | * Uncompressed shared buffers are always at the end |
3202 | * of the list. Compressed buffers don't have the | |
3203 | * same requirements. This makes it hard to | |
3204 | * simply assert that the lastbuf is shared so | |
3205 | * we rely on the hdr's compression flags to determine | |
3206 | * if we have a compressed, shared buffer. | |
d3c2ae1c | 3207 | */ |
2aa34383 DK |
3208 | ASSERT3P(lastbuf, !=, NULL); |
3209 | ASSERT(arc_buf_is_shared(lastbuf) || | |
b5256303 | 3210 | arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); |
ca0bf58d PS |
3211 | } |
3212 | ||
a7004725 DK |
3213 | /* |
3214 | * Free the checksum if we're removing the last uncompressed buf from | |
3215 | * this hdr. | |
3216 | */ | |
3217 | if (!arc_hdr_has_uncompressed_buf(hdr)) { | |
d3c2ae1c | 3218 | arc_cksum_free(hdr); |
a7004725 | 3219 | } |
d3c2ae1c GW |
3220 | |
3221 | /* clean up the buf */ | |
3222 | buf->b_hdr = NULL; | |
3223 | kmem_cache_free(buf_cache, buf); | |
3224 | } | |
3225 | ||
3226 | static void | |
b5256303 | 3227 | arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata) |
d3c2ae1c | 3228 | { |
b5256303 TC |
3229 | uint64_t size; |
3230 | ||
d3c2ae1c GW |
3231 | ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); |
3232 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
b5256303 TC |
3233 | ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata); |
3234 | IMPLY(alloc_rdata, HDR_PROTECTED(hdr)); | |
d3c2ae1c | 3235 | |
b5256303 TC |
3236 | if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) |
3237 | hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; | |
d3c2ae1c | 3238 | |
b5256303 TC |
3239 | if (alloc_rdata) { |
3240 | size = HDR_GET_PSIZE(hdr); | |
3241 | ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL); | |
3242 | hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr); | |
3243 | ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL); | |
3244 | ARCSTAT_INCR(arcstat_raw_size, size); | |
3245 | } else { | |
3246 | size = arc_hdr_size(hdr); | |
3247 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); | |
3248 | hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr); | |
3249 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); | |
3250 | } | |
3251 | ||
3252 | ARCSTAT_INCR(arcstat_compressed_size, size); | |
d3c2ae1c GW |
3253 | ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); |
3254 | } | |
3255 | ||
3256 | static void | |
b5256303 | 3257 | arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata) |
d3c2ae1c | 3258 | { |
b5256303 TC |
3259 | uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr); |
3260 | ||
d3c2ae1c | 3261 | ASSERT(HDR_HAS_L1HDR(hdr)); |
b5256303 TC |
3262 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); |
3263 | IMPLY(free_rdata, HDR_HAS_RABD(hdr)); | |
d3c2ae1c | 3264 | |
ca0bf58d | 3265 | /* |
d3c2ae1c GW |
3266 | * If the hdr is currently being written to the l2arc then |
3267 | * we defer freeing the data by adding it to the l2arc_free_on_write | |
3268 | * list. The l2arc will free the data once it's finished | |
3269 | * writing it to the l2arc device. | |
ca0bf58d | 3270 | */ |
d3c2ae1c | 3271 | if (HDR_L2_WRITING(hdr)) { |
b5256303 | 3272 | arc_hdr_free_on_write(hdr, free_rdata); |
d3c2ae1c | 3273 | ARCSTAT_BUMP(arcstat_l2_free_on_write); |
b5256303 TC |
3274 | } else if (free_rdata) { |
3275 | arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr); | |
d3c2ae1c | 3276 | } else { |
b5256303 | 3277 | arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr); |
ca0bf58d PS |
3278 | } |
3279 | ||
b5256303 TC |
3280 | if (free_rdata) { |
3281 | hdr->b_crypt_hdr.b_rabd = NULL; | |
3282 | ARCSTAT_INCR(arcstat_raw_size, -size); | |
3283 | } else { | |
3284 | hdr->b_l1hdr.b_pabd = NULL; | |
3285 | } | |
3286 | ||
3287 | if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr)) | |
3288 | hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; | |
3289 | ||
3290 | ARCSTAT_INCR(arcstat_compressed_size, -size); | |
d3c2ae1c GW |
3291 | ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); |
3292 | } | |
3293 | ||
3294 | static arc_buf_hdr_t * | |
3295 | arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, | |
b5256303 TC |
3296 | boolean_t protected, enum zio_compress compression_type, |
3297 | arc_buf_contents_t type, boolean_t alloc_rdata) | |
d3c2ae1c GW |
3298 | { |
3299 | arc_buf_hdr_t *hdr; | |
3300 | ||
d3c2ae1c | 3301 | VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); |
b5256303 TC |
3302 | if (protected) { |
3303 | hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE); | |
3304 | } else { | |
3305 | hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); | |
3306 | } | |
d3c2ae1c | 3307 | |
d3c2ae1c GW |
3308 | ASSERT(HDR_EMPTY(hdr)); |
3309 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); | |
3310 | HDR_SET_PSIZE(hdr, psize); | |
3311 | HDR_SET_LSIZE(hdr, lsize); | |
3312 | hdr->b_spa = spa; | |
3313 | hdr->b_type = type; | |
3314 | hdr->b_flags = 0; | |
3315 | arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); | |
2aa34383 | 3316 | arc_hdr_set_compress(hdr, compression_type); |
b5256303 TC |
3317 | if (protected) |
3318 | arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED); | |
ca0bf58d | 3319 | |
d3c2ae1c GW |
3320 | hdr->b_l1hdr.b_state = arc_anon; |
3321 | hdr->b_l1hdr.b_arc_access = 0; | |
3322 | hdr->b_l1hdr.b_bufcnt = 0; | |
3323 | hdr->b_l1hdr.b_buf = NULL; | |
ca0bf58d | 3324 | |
d3c2ae1c GW |
3325 | /* |
3326 | * Allocate the hdr's buffer. This will contain either | |
3327 | * the compressed or uncompressed data depending on the block | |
3328 | * it references and compressed arc enablement. | |
3329 | */ | |
b5256303 | 3330 | arc_hdr_alloc_abd(hdr, alloc_rdata); |
d3c2ae1c | 3331 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
ca0bf58d | 3332 | |
d3c2ae1c | 3333 | return (hdr); |
ca0bf58d PS |
3334 | } |
3335 | ||
bd089c54 | 3336 | /* |
d3c2ae1c GW |
3337 | * Transition between the two allocation states for the arc_buf_hdr struct. |
3338 | * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without | |
3339 | * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller | |
3340 | * version is used when a cache buffer is only in the L2ARC in order to reduce | |
3341 | * memory usage. | |
bd089c54 | 3342 | */ |
d3c2ae1c GW |
3343 | static arc_buf_hdr_t * |
3344 | arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) | |
34dc7c2f | 3345 | { |
d3c2ae1c GW |
3346 | arc_buf_hdr_t *nhdr; |
3347 | l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; | |
34dc7c2f | 3348 | |
d3c2ae1c GW |
3349 | ASSERT(HDR_HAS_L2HDR(hdr)); |
3350 | ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || | |
3351 | (old == hdr_l2only_cache && new == hdr_full_cache)); | |
34dc7c2f | 3352 | |
b5256303 TC |
3353 | /* |
3354 | * if the caller wanted a new full header and the header is to be | |
3355 | * encrypted we will actually allocate the header from the full crypt | |
3356 | * cache instead. The same applies to freeing from the old cache. | |
3357 | */ | |
3358 | if (HDR_PROTECTED(hdr) && new == hdr_full_cache) | |
3359 | new = hdr_full_crypt_cache; | |
3360 | if (HDR_PROTECTED(hdr) && old == hdr_full_cache) | |
3361 | old = hdr_full_crypt_cache; | |
3362 | ||
d3c2ae1c | 3363 | nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); |
428870ff | 3364 | |
d3c2ae1c GW |
3365 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); |
3366 | buf_hash_remove(hdr); | |
ca0bf58d | 3367 | |
d3c2ae1c | 3368 | bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); |
34dc7c2f | 3369 | |
b5256303 | 3370 | if (new == hdr_full_cache || new == hdr_full_crypt_cache) { |
d3c2ae1c GW |
3371 | arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); |
3372 | /* | |
3373 | * arc_access and arc_change_state need to be aware that a | |
3374 | * header has just come out of L2ARC, so we set its state to | |
3375 | * l2c_only even though it's about to change. | |
3376 | */ | |
3377 | nhdr->b_l1hdr.b_state = arc_l2c_only; | |
34dc7c2f | 3378 | |
d3c2ae1c | 3379 | /* Verify previous threads set to NULL before freeing */ |
a6255b7f | 3380 | ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 3381 | ASSERT(!HDR_HAS_RABD(hdr)); |
d3c2ae1c GW |
3382 | } else { |
3383 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
3384 | ASSERT0(hdr->b_l1hdr.b_bufcnt); | |
3385 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); | |
36da08ef | 3386 | |
d3c2ae1c GW |
3387 | /* |
3388 | * If we've reached here, We must have been called from | |
3389 | * arc_evict_hdr(), as such we should have already been | |
3390 | * removed from any ghost list we were previously on | |
3391 | * (which protects us from racing with arc_evict_state), | |
3392 | * thus no locking is needed during this check. | |
3393 | */ | |
3394 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
1eb5bfa3 GW |
3395 | |
3396 | /* | |
d3c2ae1c GW |
3397 | * A buffer must not be moved into the arc_l2c_only |
3398 | * state if it's not finished being written out to the | |
a6255b7f | 3399 | * l2arc device. Otherwise, the b_l1hdr.b_pabd field |
d3c2ae1c | 3400 | * might try to be accessed, even though it was removed. |
1eb5bfa3 | 3401 | */ |
d3c2ae1c | 3402 | VERIFY(!HDR_L2_WRITING(hdr)); |
a6255b7f | 3403 | VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 3404 | ASSERT(!HDR_HAS_RABD(hdr)); |
d3c2ae1c GW |
3405 | |
3406 | arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); | |
34dc7c2f | 3407 | } |
d3c2ae1c GW |
3408 | /* |
3409 | * The header has been reallocated so we need to re-insert it into any | |
3410 | * lists it was on. | |
3411 | */ | |
3412 | (void) buf_hash_insert(nhdr, NULL); | |
34dc7c2f | 3413 | |
d3c2ae1c | 3414 | ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); |
34dc7c2f | 3415 | |
d3c2ae1c GW |
3416 | mutex_enter(&dev->l2ad_mtx); |
3417 | ||
3418 | /* | |
3419 | * We must place the realloc'ed header back into the list at | |
3420 | * the same spot. Otherwise, if it's placed earlier in the list, | |
3421 | * l2arc_write_buffers() could find it during the function's | |
3422 | * write phase, and try to write it out to the l2arc. | |
3423 | */ | |
3424 | list_insert_after(&dev->l2ad_buflist, hdr, nhdr); | |
3425 | list_remove(&dev->l2ad_buflist, hdr); | |
34dc7c2f | 3426 | |
d3c2ae1c | 3427 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f | 3428 | |
d3c2ae1c GW |
3429 | /* |
3430 | * Since we're using the pointer address as the tag when | |
3431 | * incrementing and decrementing the l2ad_alloc refcount, we | |
3432 | * must remove the old pointer (that we're about to destroy) and | |
3433 | * add the new pointer to the refcount. Otherwise we'd remove | |
3434 | * the wrong pointer address when calling arc_hdr_destroy() later. | |
3435 | */ | |
3436 | ||
3437 | (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); | |
3438 | (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr); | |
3439 | ||
3440 | buf_discard_identity(hdr); | |
3441 | kmem_cache_free(old, hdr); | |
3442 | ||
3443 | return (nhdr); | |
3444 | } | |
3445 | ||
b5256303 TC |
3446 | /* |
3447 | * This function allows an L1 header to be reallocated as a crypt | |
3448 | * header and vice versa. If we are going to a crypt header, the | |
3449 | * new fields will be zeroed out. | |
3450 | */ | |
3451 | static arc_buf_hdr_t * | |
3452 | arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt) | |
3453 | { | |
3454 | arc_buf_hdr_t *nhdr; | |
3455 | arc_buf_t *buf; | |
3456 | kmem_cache_t *ncache, *ocache; | |
3457 | ||
3458 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
3459 | ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt); | |
3460 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); | |
3461 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
3462 | ||
3463 | if (need_crypt) { | |
3464 | ncache = hdr_full_crypt_cache; | |
3465 | ocache = hdr_full_cache; | |
3466 | } else { | |
3467 | ncache = hdr_full_cache; | |
3468 | ocache = hdr_full_crypt_cache; | |
3469 | } | |
3470 | ||
3471 | nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE); | |
3472 | bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); | |
3473 | nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum; | |
3474 | nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt; | |
3475 | nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap; | |
3476 | nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state; | |
3477 | nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access; | |
3478 | nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits; | |
3479 | nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits; | |
3480 | nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits; | |
3481 | nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits; | |
3482 | nhdr->b_l1hdr.b_l2_hits = hdr->b_l1hdr.b_l2_hits; | |
3483 | nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb; | |
3484 | nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd; | |
3485 | nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf; | |
3486 | ||
3487 | /* | |
3488 | * This refcount_add() exists only to ensure that the individual | |
3489 | * arc buffers always point to a header that is referenced, avoiding | |
3490 | * a small race condition that could trigger ASSERTs. | |
3491 | */ | |
3492 | (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG); | |
3493 | ||
3494 | for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) { | |
3495 | mutex_enter(&buf->b_evict_lock); | |
3496 | buf->b_hdr = nhdr; | |
3497 | mutex_exit(&buf->b_evict_lock); | |
3498 | } | |
3499 | ||
3500 | refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt); | |
3501 | (void) refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG); | |
3502 | ||
3503 | if (need_crypt) { | |
3504 | arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED); | |
3505 | } else { | |
3506 | arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED); | |
3507 | } | |
3508 | ||
3509 | buf_discard_identity(hdr); | |
3510 | kmem_cache_free(ocache, hdr); | |
3511 | ||
3512 | return (nhdr); | |
3513 | } | |
3514 | ||
3515 | /* | |
3516 | * This function is used by the send / receive code to convert a newly | |
3517 | * allocated arc_buf_t to one that is suitable for a raw encrypted write. It | |
3518 | * is also used to allow the root objset block to be uupdated without altering | |
3519 | * its embedded MACs. Both block types will always be uncompressed so we do not | |
3520 | * have to worry about compression type or psize. | |
3521 | */ | |
3522 | void | |
3523 | arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder, | |
3524 | dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv, | |
3525 | const uint8_t *mac) | |
3526 | { | |
3527 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3528 | ||
3529 | ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET); | |
3530 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
3531 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); | |
3532 | ||
3533 | buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED); | |
3534 | if (!HDR_PROTECTED(hdr)) | |
3535 | hdr = arc_hdr_realloc_crypt(hdr, B_TRUE); | |
3536 | hdr->b_crypt_hdr.b_dsobj = dsobj; | |
3537 | hdr->b_crypt_hdr.b_ot = ot; | |
3538 | hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? | |
3539 | DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); | |
3540 | if (!arc_hdr_has_uncompressed_buf(hdr)) | |
3541 | arc_cksum_free(hdr); | |
3542 | ||
3543 | if (salt != NULL) | |
3544 | bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); | |
3545 | if (iv != NULL) | |
3546 | bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); | |
3547 | if (mac != NULL) | |
3548 | bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); | |
3549 | } | |
3550 | ||
d3c2ae1c GW |
3551 | /* |
3552 | * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. | |
3553 | * The buf is returned thawed since we expect the consumer to modify it. | |
3554 | */ | |
3555 | arc_buf_t * | |
2aa34383 | 3556 | arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size) |
d3c2ae1c | 3557 | { |
d3c2ae1c | 3558 | arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, |
b5256303 | 3559 | B_FALSE, ZIO_COMPRESS_OFF, type, B_FALSE); |
d3c2ae1c | 3560 | ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); |
2aa34383 | 3561 | |
a7004725 | 3562 | arc_buf_t *buf = NULL; |
b5256303 TC |
3563 | VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE, B_FALSE, |
3564 | B_FALSE, B_FALSE, &buf)); | |
d3c2ae1c | 3565 | arc_buf_thaw(buf); |
2aa34383 DK |
3566 | |
3567 | return (buf); | |
3568 | } | |
3569 | ||
3570 | /* | |
3571 | * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this | |
3572 | * for bufs containing metadata. | |
3573 | */ | |
3574 | arc_buf_t * | |
3575 | arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize, | |
3576 | enum zio_compress compression_type) | |
3577 | { | |
2aa34383 DK |
3578 | ASSERT3U(lsize, >, 0); |
3579 | ASSERT3U(lsize, >=, psize); | |
b5256303 TC |
3580 | ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF); |
3581 | ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); | |
2aa34383 | 3582 | |
a7004725 | 3583 | arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, |
b5256303 | 3584 | B_FALSE, compression_type, ARC_BUFC_DATA, B_FALSE); |
2aa34383 DK |
3585 | ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); |
3586 | ||
a7004725 | 3587 | arc_buf_t *buf = NULL; |
b5256303 TC |
3588 | VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE, |
3589 | B_TRUE, B_FALSE, B_FALSE, &buf)); | |
2aa34383 DK |
3590 | arc_buf_thaw(buf); |
3591 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); | |
3592 | ||
a6255b7f DQ |
3593 | if (!arc_buf_is_shared(buf)) { |
3594 | /* | |
3595 | * To ensure that the hdr has the correct data in it if we call | |
b5256303 | 3596 | * arc_untransform() on this buf before it's been written to |
a6255b7f DQ |
3597 | * disk, it's easiest if we just set up sharing between the |
3598 | * buf and the hdr. | |
3599 | */ | |
3600 | ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd)); | |
b5256303 | 3601 | arc_hdr_free_abd(hdr, B_FALSE); |
a6255b7f DQ |
3602 | arc_share_buf(hdr, buf); |
3603 | } | |
3604 | ||
d3c2ae1c | 3605 | return (buf); |
34dc7c2f BB |
3606 | } |
3607 | ||
b5256303 TC |
3608 | arc_buf_t * |
3609 | arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder, | |
3610 | const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, | |
3611 | dmu_object_type_t ot, uint64_t psize, uint64_t lsize, | |
3612 | enum zio_compress compression_type) | |
3613 | { | |
3614 | arc_buf_hdr_t *hdr; | |
3615 | arc_buf_t *buf; | |
3616 | arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ? | |
3617 | ARC_BUFC_METADATA : ARC_BUFC_DATA; | |
3618 | ||
3619 | ASSERT3U(lsize, >, 0); | |
3620 | ASSERT3U(lsize, >=, psize); | |
3621 | ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF); | |
3622 | ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS); | |
3623 | ||
3624 | hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE, | |
3625 | compression_type, type, B_TRUE); | |
3626 | ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); | |
3627 | ||
3628 | hdr->b_crypt_hdr.b_dsobj = dsobj; | |
3629 | hdr->b_crypt_hdr.b_ot = ot; | |
3630 | hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ? | |
3631 | DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot); | |
3632 | bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN); | |
3633 | bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN); | |
3634 | bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN); | |
3635 | ||
3636 | /* | |
3637 | * This buffer will be considered encrypted even if the ot is not an | |
3638 | * encrypted type. It will become authenticated instead in | |
3639 | * arc_write_ready(). | |
3640 | */ | |
3641 | buf = NULL; | |
3642 | VERIFY0(arc_buf_alloc_impl(hdr, spa, dsobj, tag, B_TRUE, B_TRUE, | |
3643 | B_FALSE, B_FALSE, &buf)); | |
3644 | arc_buf_thaw(buf); | |
3645 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); | |
3646 | ||
3647 | return (buf); | |
3648 | } | |
3649 | ||
d962d5da PS |
3650 | static void |
3651 | arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) | |
3652 | { | |
3653 | l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; | |
3654 | l2arc_dev_t *dev = l2hdr->b_dev; | |
01850391 | 3655 | uint64_t psize = arc_hdr_size(hdr); |
d962d5da PS |
3656 | |
3657 | ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); | |
3658 | ASSERT(HDR_HAS_L2HDR(hdr)); | |
3659 | ||
3660 | list_remove(&dev->l2ad_buflist, hdr); | |
3661 | ||
01850391 AG |
3662 | ARCSTAT_INCR(arcstat_l2_psize, -psize); |
3663 | ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); | |
d962d5da | 3664 | |
01850391 | 3665 | vdev_space_update(dev->l2ad_vdev, -psize, 0, 0); |
d962d5da | 3666 | |
01850391 | 3667 | (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr); |
d3c2ae1c | 3668 | arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); |
d962d5da PS |
3669 | } |
3670 | ||
34dc7c2f BB |
3671 | static void |
3672 | arc_hdr_destroy(arc_buf_hdr_t *hdr) | |
3673 | { | |
b9541d6b CW |
3674 | if (HDR_HAS_L1HDR(hdr)) { |
3675 | ASSERT(hdr->b_l1hdr.b_buf == NULL || | |
d3c2ae1c | 3676 | hdr->b_l1hdr.b_bufcnt > 0); |
b9541d6b CW |
3677 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
3678 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); | |
3679 | } | |
34dc7c2f | 3680 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
b9541d6b CW |
3681 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); |
3682 | ||
d3c2ae1c GW |
3683 | if (!HDR_EMPTY(hdr)) |
3684 | buf_discard_identity(hdr); | |
3685 | ||
b9541d6b | 3686 | if (HDR_HAS_L2HDR(hdr)) { |
d962d5da PS |
3687 | l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; |
3688 | boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); | |
428870ff | 3689 | |
d962d5da PS |
3690 | if (!buflist_held) |
3691 | mutex_enter(&dev->l2ad_mtx); | |
b9541d6b | 3692 | |
ca0bf58d | 3693 | /* |
d962d5da PS |
3694 | * Even though we checked this conditional above, we |
3695 | * need to check this again now that we have the | |
3696 | * l2ad_mtx. This is because we could be racing with | |
3697 | * another thread calling l2arc_evict() which might have | |
3698 | * destroyed this header's L2 portion as we were waiting | |
3699 | * to acquire the l2ad_mtx. If that happens, we don't | |
3700 | * want to re-destroy the header's L2 portion. | |
ca0bf58d | 3701 | */ |
d962d5da PS |
3702 | if (HDR_HAS_L2HDR(hdr)) |
3703 | arc_hdr_l2hdr_destroy(hdr); | |
428870ff BB |
3704 | |
3705 | if (!buflist_held) | |
d962d5da | 3706 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
3707 | } |
3708 | ||
d3c2ae1c GW |
3709 | if (HDR_HAS_L1HDR(hdr)) { |
3710 | arc_cksum_free(hdr); | |
b9541d6b | 3711 | |
d3c2ae1c | 3712 | while (hdr->b_l1hdr.b_buf != NULL) |
2aa34383 | 3713 | arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); |
34dc7c2f | 3714 | |
b5256303 TC |
3715 | if (hdr->b_l1hdr.b_pabd != NULL) { |
3716 | arc_hdr_free_abd(hdr, B_FALSE); | |
3717 | } | |
3718 | ||
3719 | if (HDR_HAS_RABD(hdr)) { | |
3720 | arc_hdr_free_abd(hdr, B_TRUE); | |
3721 | } | |
b9541d6b CW |
3722 | } |
3723 | ||
34dc7c2f | 3724 | ASSERT3P(hdr->b_hash_next, ==, NULL); |
b9541d6b | 3725 | if (HDR_HAS_L1HDR(hdr)) { |
ca0bf58d | 3726 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); |
b9541d6b | 3727 | ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); |
b5256303 TC |
3728 | |
3729 | if (!HDR_PROTECTED(hdr)) { | |
3730 | kmem_cache_free(hdr_full_cache, hdr); | |
3731 | } else { | |
3732 | kmem_cache_free(hdr_full_crypt_cache, hdr); | |
3733 | } | |
b9541d6b CW |
3734 | } else { |
3735 | kmem_cache_free(hdr_l2only_cache, hdr); | |
3736 | } | |
34dc7c2f BB |
3737 | } |
3738 | ||
3739 | void | |
d3c2ae1c | 3740 | arc_buf_destroy(arc_buf_t *buf, void* tag) |
34dc7c2f BB |
3741 | { |
3742 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
96c080cb | 3743 | kmutex_t *hash_lock = HDR_LOCK(hdr); |
34dc7c2f | 3744 | |
b9541d6b | 3745 | if (hdr->b_l1hdr.b_state == arc_anon) { |
d3c2ae1c GW |
3746 | ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); |
3747 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
3748 | VERIFY0(remove_reference(hdr, NULL, tag)); | |
3749 | arc_hdr_destroy(hdr); | |
3750 | return; | |
34dc7c2f BB |
3751 | } |
3752 | ||
3753 | mutex_enter(hash_lock); | |
d3c2ae1c GW |
3754 | ASSERT3P(hdr, ==, buf->b_hdr); |
3755 | ASSERT(hdr->b_l1hdr.b_bufcnt > 0); | |
428870ff | 3756 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); |
d3c2ae1c GW |
3757 | ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); |
3758 | ASSERT3P(buf->b_data, !=, NULL); | |
34dc7c2f BB |
3759 | |
3760 | (void) remove_reference(hdr, hash_lock, tag); | |
2aa34383 | 3761 | arc_buf_destroy_impl(buf); |
34dc7c2f | 3762 | mutex_exit(hash_lock); |
34dc7c2f BB |
3763 | } |
3764 | ||
34dc7c2f | 3765 | /* |
ca0bf58d PS |
3766 | * Evict the arc_buf_hdr that is provided as a parameter. The resultant |
3767 | * state of the header is dependent on its state prior to entering this | |
3768 | * function. The following transitions are possible: | |
34dc7c2f | 3769 | * |
ca0bf58d PS |
3770 | * - arc_mru -> arc_mru_ghost |
3771 | * - arc_mfu -> arc_mfu_ghost | |
3772 | * - arc_mru_ghost -> arc_l2c_only | |
3773 | * - arc_mru_ghost -> deleted | |
3774 | * - arc_mfu_ghost -> arc_l2c_only | |
3775 | * - arc_mfu_ghost -> deleted | |
34dc7c2f | 3776 | */ |
ca0bf58d PS |
3777 | static int64_t |
3778 | arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) | |
34dc7c2f | 3779 | { |
ca0bf58d PS |
3780 | arc_state_t *evicted_state, *state; |
3781 | int64_t bytes_evicted = 0; | |
34dc7c2f | 3782 | |
ca0bf58d PS |
3783 | ASSERT(MUTEX_HELD(hash_lock)); |
3784 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
e8b96c60 | 3785 | |
ca0bf58d PS |
3786 | state = hdr->b_l1hdr.b_state; |
3787 | if (GHOST_STATE(state)) { | |
3788 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
d3c2ae1c | 3789 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); |
e8b96c60 MA |
3790 | |
3791 | /* | |
ca0bf58d | 3792 | * l2arc_write_buffers() relies on a header's L1 portion |
a6255b7f | 3793 | * (i.e. its b_pabd field) during it's write phase. |
ca0bf58d PS |
3794 | * Thus, we cannot push a header onto the arc_l2c_only |
3795 | * state (removing its L1 piece) until the header is | |
3796 | * done being written to the l2arc. | |
e8b96c60 | 3797 | */ |
ca0bf58d PS |
3798 | if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { |
3799 | ARCSTAT_BUMP(arcstat_evict_l2_skip); | |
3800 | return (bytes_evicted); | |
e8b96c60 MA |
3801 | } |
3802 | ||
ca0bf58d | 3803 | ARCSTAT_BUMP(arcstat_deleted); |
d3c2ae1c | 3804 | bytes_evicted += HDR_GET_LSIZE(hdr); |
428870ff | 3805 | |
ca0bf58d | 3806 | DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); |
428870ff | 3807 | |
ca0bf58d | 3808 | if (HDR_HAS_L2HDR(hdr)) { |
a6255b7f | 3809 | ASSERT(hdr->b_l1hdr.b_pabd == NULL); |
b5256303 | 3810 | ASSERT(!HDR_HAS_RABD(hdr)); |
ca0bf58d PS |
3811 | /* |
3812 | * This buffer is cached on the 2nd Level ARC; | |
3813 | * don't destroy the header. | |
3814 | */ | |
3815 | arc_change_state(arc_l2c_only, hdr, hash_lock); | |
3816 | /* | |
3817 | * dropping from L1+L2 cached to L2-only, | |
3818 | * realloc to remove the L1 header. | |
3819 | */ | |
3820 | hdr = arc_hdr_realloc(hdr, hdr_full_cache, | |
3821 | hdr_l2only_cache); | |
34dc7c2f | 3822 | } else { |
ca0bf58d PS |
3823 | arc_change_state(arc_anon, hdr, hash_lock); |
3824 | arc_hdr_destroy(hdr); | |
34dc7c2f | 3825 | } |
ca0bf58d | 3826 | return (bytes_evicted); |
34dc7c2f BB |
3827 | } |
3828 | ||
ca0bf58d PS |
3829 | ASSERT(state == arc_mru || state == arc_mfu); |
3830 | evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
34dc7c2f | 3831 | |
ca0bf58d PS |
3832 | /* prefetch buffers have a minimum lifespan */ |
3833 | if (HDR_IO_IN_PROGRESS(hdr) || | |
3834 | ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && | |
3835 | ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < | |
3836 | arc_min_prefetch_lifespan)) { | |
3837 | ARCSTAT_BUMP(arcstat_evict_skip); | |
3838 | return (bytes_evicted); | |
da8ccd0e PS |
3839 | } |
3840 | ||
ca0bf58d | 3841 | ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); |
ca0bf58d PS |
3842 | while (hdr->b_l1hdr.b_buf) { |
3843 | arc_buf_t *buf = hdr->b_l1hdr.b_buf; | |
3844 | if (!mutex_tryenter(&buf->b_evict_lock)) { | |
3845 | ARCSTAT_BUMP(arcstat_mutex_miss); | |
3846 | break; | |
3847 | } | |
3848 | if (buf->b_data != NULL) | |
d3c2ae1c GW |
3849 | bytes_evicted += HDR_GET_LSIZE(hdr); |
3850 | mutex_exit(&buf->b_evict_lock); | |
2aa34383 | 3851 | arc_buf_destroy_impl(buf); |
ca0bf58d | 3852 | } |
34dc7c2f | 3853 | |
ca0bf58d | 3854 | if (HDR_HAS_L2HDR(hdr)) { |
d3c2ae1c | 3855 | ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); |
ca0bf58d | 3856 | } else { |
d3c2ae1c GW |
3857 | if (l2arc_write_eligible(hdr->b_spa, hdr)) { |
3858 | ARCSTAT_INCR(arcstat_evict_l2_eligible, | |
3859 | HDR_GET_LSIZE(hdr)); | |
3860 | } else { | |
3861 | ARCSTAT_INCR(arcstat_evict_l2_ineligible, | |
3862 | HDR_GET_LSIZE(hdr)); | |
3863 | } | |
ca0bf58d | 3864 | } |
34dc7c2f | 3865 | |
d3c2ae1c GW |
3866 | if (hdr->b_l1hdr.b_bufcnt == 0) { |
3867 | arc_cksum_free(hdr); | |
3868 | ||
3869 | bytes_evicted += arc_hdr_size(hdr); | |
3870 | ||
3871 | /* | |
3872 | * If this hdr is being evicted and has a compressed | |
3873 | * buffer then we discard it here before we change states. | |
3874 | * This ensures that the accounting is updated correctly | |
a6255b7f | 3875 | * in arc_free_data_impl(). |
d3c2ae1c | 3876 | */ |
b5256303 TC |
3877 | if (hdr->b_l1hdr.b_pabd != NULL) |
3878 | arc_hdr_free_abd(hdr, B_FALSE); | |
3879 | ||
3880 | if (HDR_HAS_RABD(hdr)) | |
3881 | arc_hdr_free_abd(hdr, B_TRUE); | |
d3c2ae1c | 3882 | |
ca0bf58d PS |
3883 | arc_change_state(evicted_state, hdr, hash_lock); |
3884 | ASSERT(HDR_IN_HASH_TABLE(hdr)); | |
d3c2ae1c | 3885 | arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); |
ca0bf58d PS |
3886 | DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); |
3887 | } | |
34dc7c2f | 3888 | |
ca0bf58d | 3889 | return (bytes_evicted); |
34dc7c2f BB |
3890 | } |
3891 | ||
ca0bf58d PS |
3892 | static uint64_t |
3893 | arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, | |
3894 | uint64_t spa, int64_t bytes) | |
34dc7c2f | 3895 | { |
ca0bf58d PS |
3896 | multilist_sublist_t *mls; |
3897 | uint64_t bytes_evicted = 0; | |
3898 | arc_buf_hdr_t *hdr; | |
34dc7c2f | 3899 | kmutex_t *hash_lock; |
ca0bf58d | 3900 | int evict_count = 0; |
34dc7c2f | 3901 | |
ca0bf58d | 3902 | ASSERT3P(marker, !=, NULL); |
96c080cb | 3903 | IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); |
ca0bf58d PS |
3904 | |
3905 | mls = multilist_sublist_lock(ml, idx); | |
572e2857 | 3906 | |
ca0bf58d PS |
3907 | for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; |
3908 | hdr = multilist_sublist_prev(mls, marker)) { | |
3909 | if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || | |
3910 | (evict_count >= zfs_arc_evict_batch_limit)) | |
3911 | break; | |
3912 | ||
3913 | /* | |
3914 | * To keep our iteration location, move the marker | |
3915 | * forward. Since we're not holding hdr's hash lock, we | |
3916 | * must be very careful and not remove 'hdr' from the | |
3917 | * sublist. Otherwise, other consumers might mistake the | |
3918 | * 'hdr' as not being on a sublist when they call the | |
3919 | * multilist_link_active() function (they all rely on | |
3920 | * the hash lock protecting concurrent insertions and | |
3921 | * removals). multilist_sublist_move_forward() was | |
3922 | * specifically implemented to ensure this is the case | |
3923 | * (only 'marker' will be removed and re-inserted). | |
3924 | */ | |
3925 | multilist_sublist_move_forward(mls, marker); | |
3926 | ||
3927 | /* | |
3928 | * The only case where the b_spa field should ever be | |
3929 | * zero, is the marker headers inserted by | |
3930 | * arc_evict_state(). It's possible for multiple threads | |
3931 | * to be calling arc_evict_state() concurrently (e.g. | |
3932 | * dsl_pool_close() and zio_inject_fault()), so we must | |
3933 | * skip any markers we see from these other threads. | |
3934 | */ | |
2a432414 | 3935 | if (hdr->b_spa == 0) |
572e2857 BB |
3936 | continue; |
3937 | ||
ca0bf58d PS |
3938 | /* we're only interested in evicting buffers of a certain spa */ |
3939 | if (spa != 0 && hdr->b_spa != spa) { | |
3940 | ARCSTAT_BUMP(arcstat_evict_skip); | |
428870ff | 3941 | continue; |
ca0bf58d PS |
3942 | } |
3943 | ||
3944 | hash_lock = HDR_LOCK(hdr); | |
e8b96c60 MA |
3945 | |
3946 | /* | |
ca0bf58d PS |
3947 | * We aren't calling this function from any code path |
3948 | * that would already be holding a hash lock, so we're | |
3949 | * asserting on this assumption to be defensive in case | |
3950 | * this ever changes. Without this check, it would be | |
3951 | * possible to incorrectly increment arcstat_mutex_miss | |
3952 | * below (e.g. if the code changed such that we called | |
3953 | * this function with a hash lock held). | |
e8b96c60 | 3954 | */ |
ca0bf58d PS |
3955 | ASSERT(!MUTEX_HELD(hash_lock)); |
3956 | ||
34dc7c2f | 3957 | if (mutex_tryenter(hash_lock)) { |
ca0bf58d PS |
3958 | uint64_t evicted = arc_evict_hdr(hdr, hash_lock); |
3959 | mutex_exit(hash_lock); | |
34dc7c2f | 3960 | |
ca0bf58d | 3961 | bytes_evicted += evicted; |
34dc7c2f | 3962 | |
572e2857 | 3963 | /* |
ca0bf58d PS |
3964 | * If evicted is zero, arc_evict_hdr() must have |
3965 | * decided to skip this header, don't increment | |
3966 | * evict_count in this case. | |
572e2857 | 3967 | */ |
ca0bf58d PS |
3968 | if (evicted != 0) |
3969 | evict_count++; | |
3970 | ||
3971 | /* | |
3972 | * If arc_size isn't overflowing, signal any | |
3973 | * threads that might happen to be waiting. | |
3974 | * | |
3975 | * For each header evicted, we wake up a single | |
3976 | * thread. If we used cv_broadcast, we could | |
3977 | * wake up "too many" threads causing arc_size | |
3978 | * to significantly overflow arc_c; since | |
a6255b7f | 3979 | * arc_get_data_impl() doesn't check for overflow |
ca0bf58d PS |
3980 | * when it's woken up (it doesn't because it's |
3981 | * possible for the ARC to be overflowing while | |
3982 | * full of un-evictable buffers, and the | |
3983 | * function should proceed in this case). | |
3984 | * | |
3985 | * If threads are left sleeping, due to not | |
3986 | * using cv_broadcast, they will be woken up | |
3987 | * just before arc_reclaim_thread() sleeps. | |
3988 | */ | |
3989 | mutex_enter(&arc_reclaim_lock); | |
3990 | if (!arc_is_overflowing()) | |
3991 | cv_signal(&arc_reclaim_waiters_cv); | |
3992 | mutex_exit(&arc_reclaim_lock); | |
e8b96c60 | 3993 | } else { |
ca0bf58d | 3994 | ARCSTAT_BUMP(arcstat_mutex_miss); |
e8b96c60 | 3995 | } |
34dc7c2f | 3996 | } |
34dc7c2f | 3997 | |
ca0bf58d | 3998 | multilist_sublist_unlock(mls); |
34dc7c2f | 3999 | |
ca0bf58d | 4000 | return (bytes_evicted); |
34dc7c2f BB |
4001 | } |
4002 | ||
ca0bf58d PS |
4003 | /* |
4004 | * Evict buffers from the given arc state, until we've removed the | |
4005 | * specified number of bytes. Move the removed buffers to the | |
4006 | * appropriate evict state. | |
4007 | * | |
4008 | * This function makes a "best effort". It skips over any buffers | |
4009 | * it can't get a hash_lock on, and so, may not catch all candidates. | |
4010 | * It may also return without evicting as much space as requested. | |
4011 | * | |
4012 | * If bytes is specified using the special value ARC_EVICT_ALL, this | |
4013 | * will evict all available (i.e. unlocked and evictable) buffers from | |
4014 | * the given arc state; which is used by arc_flush(). | |
4015 | */ | |
4016 | static uint64_t | |
4017 | arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, | |
4018 | arc_buf_contents_t type) | |
34dc7c2f | 4019 | { |
ca0bf58d | 4020 | uint64_t total_evicted = 0; |
64fc7762 | 4021 | multilist_t *ml = state->arcs_list[type]; |
ca0bf58d PS |
4022 | int num_sublists; |
4023 | arc_buf_hdr_t **markers; | |
4024 | int i; | |
4025 | ||
96c080cb | 4026 | IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); |
ca0bf58d PS |
4027 | |
4028 | num_sublists = multilist_get_num_sublists(ml); | |
d164b209 BB |
4029 | |
4030 | /* | |
ca0bf58d PS |
4031 | * If we've tried to evict from each sublist, made some |
4032 | * progress, but still have not hit the target number of bytes | |
4033 | * to evict, we want to keep trying. The markers allow us to | |
4034 | * pick up where we left off for each individual sublist, rather | |
4035 | * than starting from the tail each time. | |
d164b209 | 4036 | */ |
ca0bf58d PS |
4037 | markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); |
4038 | for (i = 0; i < num_sublists; i++) { | |
4039 | multilist_sublist_t *mls; | |
34dc7c2f | 4040 | |
ca0bf58d PS |
4041 | markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); |
4042 | ||
4043 | /* | |
4044 | * A b_spa of 0 is used to indicate that this header is | |
4045 | * a marker. This fact is used in arc_adjust_type() and | |
4046 | * arc_evict_state_impl(). | |
4047 | */ | |
4048 | markers[i]->b_spa = 0; | |
34dc7c2f | 4049 | |
ca0bf58d PS |
4050 | mls = multilist_sublist_lock(ml, i); |
4051 | multilist_sublist_insert_tail(mls, markers[i]); | |
4052 | multilist_sublist_unlock(mls); | |
34dc7c2f BB |
4053 | } |
4054 | ||
d164b209 | 4055 | /* |
ca0bf58d PS |
4056 | * While we haven't hit our target number of bytes to evict, or |
4057 | * we're evicting all available buffers. | |
d164b209 | 4058 | */ |
ca0bf58d | 4059 | while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { |
25458cbe TC |
4060 | int sublist_idx = multilist_get_random_index(ml); |
4061 | uint64_t scan_evicted = 0; | |
4062 | ||
4063 | /* | |
4064 | * Try to reduce pinned dnodes with a floor of arc_dnode_limit. | |
4065 | * Request that 10% of the LRUs be scanned by the superblock | |
4066 | * shrinker. | |
4067 | */ | |
4068 | if (type == ARC_BUFC_DATA && arc_dnode_size > arc_dnode_limit) | |
4069 | arc_prune_async((arc_dnode_size - arc_dnode_limit) / | |
4070 | sizeof (dnode_t) / zfs_arc_dnode_reduce_percent); | |
4071 | ||
ca0bf58d PS |
4072 | /* |
4073 | * Start eviction using a randomly selected sublist, | |
4074 | * this is to try and evenly balance eviction across all | |
4075 | * sublists. Always starting at the same sublist | |
4076 | * (e.g. index 0) would cause evictions to favor certain | |
4077 | * sublists over others. | |
4078 | */ | |
ca0bf58d PS |
4079 | for (i = 0; i < num_sublists; i++) { |
4080 | uint64_t bytes_remaining; | |
4081 | uint64_t bytes_evicted; | |
d164b209 | 4082 | |
ca0bf58d PS |
4083 | if (bytes == ARC_EVICT_ALL) |
4084 | bytes_remaining = ARC_EVICT_ALL; | |
4085 | else if (total_evicted < bytes) | |
4086 | bytes_remaining = bytes - total_evicted; | |
4087 | else | |
4088 | break; | |
34dc7c2f | 4089 | |
ca0bf58d PS |
4090 | bytes_evicted = arc_evict_state_impl(ml, sublist_idx, |
4091 | markers[sublist_idx], spa, bytes_remaining); | |
4092 | ||
4093 | scan_evicted += bytes_evicted; | |
4094 | total_evicted += bytes_evicted; | |
4095 | ||
4096 | /* we've reached the end, wrap to the beginning */ | |
4097 | if (++sublist_idx >= num_sublists) | |
4098 | sublist_idx = 0; | |
4099 | } | |
4100 | ||
4101 | /* | |
4102 | * If we didn't evict anything during this scan, we have | |
4103 | * no reason to believe we'll evict more during another | |
4104 | * scan, so break the loop. | |
4105 | */ | |
4106 | if (scan_evicted == 0) { | |
4107 | /* This isn't possible, let's make that obvious */ | |
4108 | ASSERT3S(bytes, !=, 0); | |
34dc7c2f | 4109 | |
ca0bf58d PS |
4110 | /* |
4111 | * When bytes is ARC_EVICT_ALL, the only way to | |
4112 | * break the loop is when scan_evicted is zero. | |
4113 | * In that case, we actually have evicted enough, | |
4114 | * so we don't want to increment the kstat. | |
4115 | */ | |
4116 | if (bytes != ARC_EVICT_ALL) { | |
4117 | ASSERT3S(total_evicted, <, bytes); | |
4118 | ARCSTAT_BUMP(arcstat_evict_not_enough); | |
4119 | } | |
d164b209 | 4120 | |
ca0bf58d PS |
4121 | break; |
4122 | } | |
d164b209 | 4123 | } |
34dc7c2f | 4124 | |
ca0bf58d PS |
4125 | for (i = 0; i < num_sublists; i++) { |
4126 | multilist_sublist_t *mls = multilist_sublist_lock(ml, i); | |
4127 | multilist_sublist_remove(mls, markers[i]); | |
4128 | multilist_sublist_unlock(mls); | |
34dc7c2f | 4129 | |
ca0bf58d | 4130 | kmem_cache_free(hdr_full_cache, markers[i]); |
34dc7c2f | 4131 | } |
ca0bf58d PS |
4132 | kmem_free(markers, sizeof (*markers) * num_sublists); |
4133 | ||
4134 | return (total_evicted); | |
4135 | } | |
4136 | ||
4137 | /* | |
4138 | * Flush all "evictable" data of the given type from the arc state | |
4139 | * specified. This will not evict any "active" buffers (i.e. referenced). | |
4140 | * | |
d3c2ae1c | 4141 | * When 'retry' is set to B_FALSE, the function will make a single pass |
ca0bf58d PS |
4142 | * over the state and evict any buffers that it can. Since it doesn't |
4143 | * continually retry the eviction, it might end up leaving some buffers | |
4144 | * in the ARC due to lock misses. | |
4145 | * | |
d3c2ae1c | 4146 | * When 'retry' is set to B_TRUE, the function will continually retry the |
ca0bf58d PS |
4147 | * eviction until *all* evictable buffers have been removed from the |
4148 | * state. As a result, if concurrent insertions into the state are | |
4149 | * allowed (e.g. if the ARC isn't shutting down), this function might | |
4150 | * wind up in an infinite loop, continually trying to evict buffers. | |
4151 | */ | |
4152 | static uint64_t | |
4153 | arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, | |
4154 | boolean_t retry) | |
4155 | { | |
4156 | uint64_t evicted = 0; | |
4157 | ||
d3c2ae1c | 4158 | while (refcount_count(&state->arcs_esize[type]) != 0) { |
ca0bf58d PS |
4159 | evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); |
4160 | ||
4161 | if (!retry) | |
4162 | break; | |
4163 | } | |
4164 | ||
4165 | return (evicted); | |
34dc7c2f BB |
4166 | } |
4167 | ||
ab26409d | 4168 | /* |
ef5b2e10 BB |
4169 | * Helper function for arc_prune_async() it is responsible for safely |
4170 | * handling the execution of a registered arc_prune_func_t. | |
ab26409d BB |
4171 | */ |
4172 | static void | |
f6046738 | 4173 | arc_prune_task(void *ptr) |
ab26409d | 4174 | { |
f6046738 BB |
4175 | arc_prune_t *ap = (arc_prune_t *)ptr; |
4176 | arc_prune_func_t *func = ap->p_pfunc; | |
ab26409d | 4177 | |
f6046738 BB |
4178 | if (func != NULL) |
4179 | func(ap->p_adjust, ap->p_private); | |
ab26409d | 4180 | |
4442f60d | 4181 | refcount_remove(&ap->p_refcnt, func); |
f6046738 | 4182 | } |
ab26409d | 4183 | |
f6046738 BB |
4184 | /* |
4185 | * Notify registered consumers they must drop holds on a portion of the ARC | |
4186 | * buffered they reference. This provides a mechanism to ensure the ARC can | |
4187 | * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This | |
4188 | * is analogous to dnlc_reduce_cache() but more generic. | |
4189 | * | |
ef5b2e10 | 4190 | * This operation is performed asynchronously so it may be safely called |
ca67b33a | 4191 | * in the context of the arc_reclaim_thread(). A reference is taken here |
f6046738 BB |
4192 | * for each registered arc_prune_t and the arc_prune_task() is responsible |
4193 | * for releasing it once the registered arc_prune_func_t has completed. | |
4194 | */ | |
4195 | static void | |
4196 | arc_prune_async(int64_t adjust) | |
4197 | { | |
4198 | arc_prune_t *ap; | |
ab26409d | 4199 | |
f6046738 BB |
4200 | mutex_enter(&arc_prune_mtx); |
4201 | for (ap = list_head(&arc_prune_list); ap != NULL; | |
4202 | ap = list_next(&arc_prune_list, ap)) { | |
ab26409d | 4203 | |
f6046738 BB |
4204 | if (refcount_count(&ap->p_refcnt) >= 2) |
4205 | continue; | |
ab26409d | 4206 | |
f6046738 BB |
4207 | refcount_add(&ap->p_refcnt, ap->p_pfunc); |
4208 | ap->p_adjust = adjust; | |
b60eac3d | 4209 | if (taskq_dispatch(arc_prune_taskq, arc_prune_task, |
48d3eb40 | 4210 | ap, TQ_SLEEP) == TASKQID_INVALID) { |
b60eac3d | 4211 | refcount_remove(&ap->p_refcnt, ap->p_pfunc); |
4212 | continue; | |
4213 | } | |
f6046738 | 4214 | ARCSTAT_BUMP(arcstat_prune); |
ab26409d | 4215 | } |
ab26409d BB |
4216 | mutex_exit(&arc_prune_mtx); |
4217 | } | |
4218 | ||
ca0bf58d PS |
4219 | /* |
4220 | * Evict the specified number of bytes from the state specified, | |
4221 | * restricting eviction to the spa and type given. This function | |
4222 | * prevents us from trying to evict more from a state's list than | |
4223 | * is "evictable", and to skip evicting altogether when passed a | |
4224 | * negative value for "bytes". In contrast, arc_evict_state() will | |
4225 | * evict everything it can, when passed a negative value for "bytes". | |
4226 | */ | |
4227 | static uint64_t | |
4228 | arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, | |
4229 | arc_buf_contents_t type) | |
4230 | { | |
4231 | int64_t delta; | |
4232 | ||
d3c2ae1c GW |
4233 | if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) { |
4234 | delta = MIN(refcount_count(&state->arcs_esize[type]), bytes); | |
ca0bf58d PS |
4235 | return (arc_evict_state(state, spa, delta, type)); |
4236 | } | |
4237 | ||
4238 | return (0); | |
4239 | } | |
4240 | ||
4241 | /* | |
4242 | * The goal of this function is to evict enough meta data buffers from the | |
4243 | * ARC in order to enforce the arc_meta_limit. Achieving this is slightly | |
4244 | * more complicated than it appears because it is common for data buffers | |
4245 | * to have holds on meta data buffers. In addition, dnode meta data buffers | |
4246 | * will be held by the dnodes in the block preventing them from being freed. | |
4247 | * This means we can't simply traverse the ARC and expect to always find | |
4248 | * enough unheld meta data buffer to release. | |
4249 | * | |
4250 | * Therefore, this function has been updated to make alternating passes | |
4251 | * over the ARC releasing data buffers and then newly unheld meta data | |
4252 | * buffers. This ensures forward progress is maintained and arc_meta_used | |
4253 | * will decrease. Normally this is sufficient, but if required the ARC | |
4254 | * will call the registered prune callbacks causing dentry and inodes to | |
4255 | * be dropped from the VFS cache. This will make dnode meta data buffers | |
4256 | * available for reclaim. | |
4257 | */ | |
4258 | static uint64_t | |
f6046738 | 4259 | arc_adjust_meta_balanced(void) |
ca0bf58d | 4260 | { |
25e2ab16 TC |
4261 | int64_t delta, prune = 0, adjustmnt; |
4262 | uint64_t total_evicted = 0; | |
ca0bf58d | 4263 | arc_buf_contents_t type = ARC_BUFC_DATA; |
ca67b33a | 4264 | int restarts = MAX(zfs_arc_meta_adjust_restarts, 0); |
ca0bf58d PS |
4265 | |
4266 | restart: | |
4267 | /* | |
4268 | * This slightly differs than the way we evict from the mru in | |
4269 | * arc_adjust because we don't have a "target" value (i.e. no | |
4270 | * "meta" arc_p). As a result, I think we can completely | |
4271 | * cannibalize the metadata in the MRU before we evict the | |
4272 | * metadata from the MFU. I think we probably need to implement a | |
4273 | * "metadata arc_p" value to do this properly. | |
4274 | */ | |
4275 | adjustmnt = arc_meta_used - arc_meta_limit; | |
4276 | ||
d3c2ae1c GW |
4277 | if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) { |
4278 | delta = MIN(refcount_count(&arc_mru->arcs_esize[type]), | |
4279 | adjustmnt); | |
ca0bf58d PS |
4280 | total_evicted += arc_adjust_impl(arc_mru, 0, delta, type); |
4281 | adjustmnt -= delta; | |
4282 | } | |
4283 | ||
4284 | /* | |
4285 | * We can't afford to recalculate adjustmnt here. If we do, | |
4286 | * new metadata buffers can sneak into the MRU or ANON lists, | |
4287 | * thus penalize the MFU metadata. Although the fudge factor is | |
4288 | * small, it has been empirically shown to be significant for | |
4289 | * certain workloads (e.g. creating many empty directories). As | |
4290 | * such, we use the original calculation for adjustmnt, and | |
4291 | * simply decrement the amount of data evicted from the MRU. | |
4292 | */ | |
4293 | ||
d3c2ae1c GW |
4294 | if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) { |
4295 | delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]), | |
4296 | adjustmnt); | |
ca0bf58d PS |
4297 | total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type); |
4298 | } | |
4299 | ||
4300 | adjustmnt = arc_meta_used - arc_meta_limit; | |
4301 | ||
d3c2ae1c GW |
4302 | if (adjustmnt > 0 && |
4303 | refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) { | |
ca0bf58d | 4304 | delta = MIN(adjustmnt, |
d3c2ae1c | 4305 | refcount_count(&arc_mru_ghost->arcs_esize[type])); |
ca0bf58d PS |
4306 | total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type); |
4307 | adjustmnt -= delta; | |
4308 | } | |
4309 | ||
d3c2ae1c GW |
4310 | if (adjustmnt > 0 && |
4311 | refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) { | |
ca0bf58d | 4312 | delta = MIN(adjustmnt, |
d3c2ae1c | 4313 | refcount_count(&arc_mfu_ghost->arcs_esize[type])); |
ca0bf58d PS |
4314 | total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type); |
4315 | } | |
4316 | ||
4317 | /* | |
4318 | * If after attempting to make the requested adjustment to the ARC | |
4319 | * the meta limit is still being exceeded then request that the | |
4320 | * higher layers drop some cached objects which have holds on ARC | |
4321 | * meta buffers. Requests to the upper layers will be made with | |
4322 | * increasingly large scan sizes until the ARC is below the limit. | |
4323 | */ | |
4324 | if (arc_meta_used > arc_meta_limit) { | |
4325 | if (type == ARC_BUFC_DATA) { | |
4326 | type = ARC_BUFC_METADATA; | |
4327 | } else { | |
4328 | type = ARC_BUFC_DATA; | |
4329 | ||
4330 | if (zfs_arc_meta_prune) { | |
4331 | prune += zfs_arc_meta_prune; | |
f6046738 | 4332 | arc_prune_async(prune); |
ca0bf58d PS |
4333 | } |
4334 | } | |
4335 | ||
4336 | if (restarts > 0) { | |
4337 | restarts--; | |
4338 | goto restart; | |
4339 | } | |
4340 | } | |
4341 | return (total_evicted); | |
4342 | } | |
4343 | ||
f6046738 BB |
4344 | /* |
4345 | * Evict metadata buffers from the cache, such that arc_meta_used is | |
4346 | * capped by the arc_meta_limit tunable. | |
4347 | */ | |
4348 | static uint64_t | |
4349 | arc_adjust_meta_only(void) | |
4350 | { | |
4351 | uint64_t total_evicted = 0; | |
4352 | int64_t target; | |
4353 | ||
4354 | /* | |
4355 | * If we're over the meta limit, we want to evict enough | |
4356 | * metadata to get back under the meta limit. We don't want to | |
4357 | * evict so much that we drop the MRU below arc_p, though. If | |
4358 | * we're over the meta limit more than we're over arc_p, we | |
4359 | * evict some from the MRU here, and some from the MFU below. | |
4360 | */ | |
4361 | target = MIN((int64_t)(arc_meta_used - arc_meta_limit), | |
36da08ef PS |
4362 | (int64_t)(refcount_count(&arc_anon->arcs_size) + |
4363 | refcount_count(&arc_mru->arcs_size) - arc_p)); | |
f6046738 BB |
4364 | |
4365 | total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
4366 | ||
4367 | /* | |
4368 | * Similar to the above, we want to evict enough bytes to get us | |
4369 | * below the meta limit, but not so much as to drop us below the | |
2aa34383 | 4370 | * space allotted to the MFU (which is defined as arc_c - arc_p). |
f6046738 BB |
4371 | */ |
4372 | target = MIN((int64_t)(arc_meta_used - arc_meta_limit), | |
36da08ef | 4373 | (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); |
f6046738 BB |
4374 | |
4375 | total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
4376 | ||
4377 | return (total_evicted); | |
4378 | } | |
4379 | ||
4380 | static uint64_t | |
4381 | arc_adjust_meta(void) | |
4382 | { | |
4383 | if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY) | |
4384 | return (arc_adjust_meta_only()); | |
4385 | else | |
4386 | return (arc_adjust_meta_balanced()); | |
4387 | } | |
4388 | ||
ca0bf58d PS |
4389 | /* |
4390 | * Return the type of the oldest buffer in the given arc state | |
4391 | * | |
4392 | * This function will select a random sublist of type ARC_BUFC_DATA and | |
4393 | * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist | |
4394 | * is compared, and the type which contains the "older" buffer will be | |
4395 | * returned. | |
4396 | */ | |
4397 | static arc_buf_contents_t | |
4398 | arc_adjust_type(arc_state_t *state) | |
4399 | { | |
64fc7762 MA |
4400 | multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA]; |
4401 | multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA]; | |
ca0bf58d PS |
4402 | int data_idx = multilist_get_random_index(data_ml); |
4403 | int meta_idx = multilist_get_random_index(meta_ml); | |
4404 | multilist_sublist_t *data_mls; | |
4405 | multilist_sublist_t *meta_mls; | |
4406 | arc_buf_contents_t type; | |
4407 | arc_buf_hdr_t *data_hdr; | |
4408 | arc_buf_hdr_t *meta_hdr; | |
4409 | ||
4410 | /* | |
4411 | * We keep the sublist lock until we're finished, to prevent | |
4412 | * the headers from being destroyed via arc_evict_state(). | |
4413 | */ | |
4414 | data_mls = multilist_sublist_lock(data_ml, data_idx); | |
4415 | meta_mls = multilist_sublist_lock(meta_ml, meta_idx); | |
4416 | ||
4417 | /* | |
4418 | * These two loops are to ensure we skip any markers that | |
4419 | * might be at the tail of the lists due to arc_evict_state(). | |
4420 | */ | |
4421 | ||
4422 | for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; | |
4423 | data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { | |
4424 | if (data_hdr->b_spa != 0) | |
4425 | break; | |
4426 | } | |
4427 | ||
4428 | for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; | |
4429 | meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { | |
4430 | if (meta_hdr->b_spa != 0) | |
4431 | break; | |
4432 | } | |
4433 | ||
4434 | if (data_hdr == NULL && meta_hdr == NULL) { | |
4435 | type = ARC_BUFC_DATA; | |
4436 | } else if (data_hdr == NULL) { | |
4437 | ASSERT3P(meta_hdr, !=, NULL); | |
4438 | type = ARC_BUFC_METADATA; | |
4439 | } else if (meta_hdr == NULL) { | |
4440 | ASSERT3P(data_hdr, !=, NULL); | |
4441 | type = ARC_BUFC_DATA; | |
4442 | } else { | |
4443 | ASSERT3P(data_hdr, !=, NULL); | |
4444 | ASSERT3P(meta_hdr, !=, NULL); | |
4445 | ||
4446 | /* The headers can't be on the sublist without an L1 header */ | |
4447 | ASSERT(HDR_HAS_L1HDR(data_hdr)); | |
4448 | ASSERT(HDR_HAS_L1HDR(meta_hdr)); | |
4449 | ||
4450 | if (data_hdr->b_l1hdr.b_arc_access < | |
4451 | meta_hdr->b_l1hdr.b_arc_access) { | |
4452 | type = ARC_BUFC_DATA; | |
4453 | } else { | |
4454 | type = ARC_BUFC_METADATA; | |
4455 | } | |
4456 | } | |
4457 | ||
4458 | multilist_sublist_unlock(meta_mls); | |
4459 | multilist_sublist_unlock(data_mls); | |
4460 | ||
4461 | return (type); | |
4462 | } | |
4463 | ||
4464 | /* | |
4465 | * Evict buffers from the cache, such that arc_size is capped by arc_c. | |
4466 | */ | |
4467 | static uint64_t | |
4468 | arc_adjust(void) | |
4469 | { | |
4470 | uint64_t total_evicted = 0; | |
4471 | uint64_t bytes; | |
4472 | int64_t target; | |
4473 | ||
4474 | /* | |
4475 | * If we're over arc_meta_limit, we want to correct that before | |
4476 | * potentially evicting data buffers below. | |
4477 | */ | |
4478 | total_evicted += arc_adjust_meta(); | |
4479 | ||
4480 | /* | |
4481 | * Adjust MRU size | |
4482 | * | |
4483 | * If we're over the target cache size, we want to evict enough | |
4484 | * from the list to get back to our target size. We don't want | |
4485 | * to evict too much from the MRU, such that it drops below | |
4486 | * arc_p. So, if we're over our target cache size more than | |
4487 | * the MRU is over arc_p, we'll evict enough to get back to | |
4488 | * arc_p here, and then evict more from the MFU below. | |
4489 | */ | |
4490 | target = MIN((int64_t)(arc_size - arc_c), | |
36da08ef PS |
4491 | (int64_t)(refcount_count(&arc_anon->arcs_size) + |
4492 | refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p)); | |
ca0bf58d PS |
4493 | |
4494 | /* | |
4495 | * If we're below arc_meta_min, always prefer to evict data. | |
4496 | * Otherwise, try to satisfy the requested number of bytes to | |
4497 | * evict from the type which contains older buffers; in an | |
4498 | * effort to keep newer buffers in the cache regardless of their | |
4499 | * type. If we cannot satisfy the number of bytes from this | |
4500 | * type, spill over into the next type. | |
4501 | */ | |
4502 | if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && | |
4503 | arc_meta_used > arc_meta_min) { | |
4504 | bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
4505 | total_evicted += bytes; | |
4506 | ||
4507 | /* | |
4508 | * If we couldn't evict our target number of bytes from | |
4509 | * metadata, we try to get the rest from data. | |
4510 | */ | |
4511 | target -= bytes; | |
4512 | ||
4513 | total_evicted += | |
4514 | arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); | |
4515 | } else { | |
4516 | bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); | |
4517 | total_evicted += bytes; | |
4518 | ||
4519 | /* | |
4520 | * If we couldn't evict our target number of bytes from | |
4521 | * data, we try to get the rest from metadata. | |
4522 | */ | |
4523 | target -= bytes; | |
4524 | ||
4525 | total_evicted += | |
4526 | arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
4527 | } | |
4528 | ||
4529 | /* | |
4530 | * Adjust MFU size | |
4531 | * | |
4532 | * Now that we've tried to evict enough from the MRU to get its | |
4533 | * size back to arc_p, if we're still above the target cache | |
4534 | * size, we evict the rest from the MFU. | |
4535 | */ | |
4536 | target = arc_size - arc_c; | |
4537 | ||
a7b10a93 | 4538 | if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && |
ca0bf58d PS |
4539 | arc_meta_used > arc_meta_min) { |
4540 | bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
4541 | total_evicted += bytes; | |
4542 | ||
4543 | /* | |
4544 | * If we couldn't evict our target number of bytes from | |
4545 | * metadata, we try to get the rest from data. | |
4546 | */ | |
4547 | target -= bytes; | |
4548 | ||
4549 | total_evicted += | |
4550 | arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); | |
4551 | } else { | |
4552 | bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); | |
4553 | total_evicted += bytes; | |
4554 | ||
4555 | /* | |
4556 | * If we couldn't evict our target number of bytes from | |
4557 | * data, we try to get the rest from data. | |
4558 | */ | |
4559 | target -= bytes; | |
4560 | ||
4561 | total_evicted += | |
4562 | arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
4563 | } | |
4564 | ||
4565 | /* | |
4566 | * Adjust ghost lists | |
4567 | * | |
4568 | * In addition to the above, the ARC also defines target values | |
4569 | * for the ghost lists. The sum of the mru list and mru ghost | |
4570 | * list should never exceed the target size of the cache, and | |
4571 | * the sum of the mru list, mfu list, mru ghost list, and mfu | |
4572 | * ghost list should never exceed twice the target size of the | |
4573 | * cache. The following logic enforces these limits on the ghost | |
4574 | * caches, and evicts from them as needed. | |
4575 | */ | |
36da08ef PS |
4576 | target = refcount_count(&arc_mru->arcs_size) + |
4577 | refcount_count(&arc_mru_ghost->arcs_size) - arc_c; | |
ca0bf58d PS |
4578 | |
4579 | bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); | |
4580 | total_evicted += bytes; | |
4581 | ||
4582 | target -= bytes; | |
4583 | ||
4584 | total_evicted += | |
4585 | arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); | |
4586 | ||
4587 | /* | |
4588 | * We assume the sum of the mru list and mfu list is less than | |
4589 | * or equal to arc_c (we enforced this above), which means we | |
4590 | * can use the simpler of the two equations below: | |
4591 | * | |
4592 | * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c | |
4593 | * mru ghost + mfu ghost <= arc_c | |
4594 | */ | |
36da08ef PS |
4595 | target = refcount_count(&arc_mru_ghost->arcs_size) + |
4596 | refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; | |
ca0bf58d PS |
4597 | |
4598 | bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); | |
4599 | total_evicted += bytes; | |
4600 | ||
4601 | target -= bytes; | |
4602 | ||
4603 | total_evicted += | |
4604 | arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); | |
4605 | ||
4606 | return (total_evicted); | |
4607 | } | |
4608 | ||
ca0bf58d PS |
4609 | void |
4610 | arc_flush(spa_t *spa, boolean_t retry) | |
ab26409d | 4611 | { |
ca0bf58d | 4612 | uint64_t guid = 0; |
94520ca4 | 4613 | |
bc888666 | 4614 | /* |
d3c2ae1c | 4615 | * If retry is B_TRUE, a spa must not be specified since we have |
ca0bf58d PS |
4616 | * no good way to determine if all of a spa's buffers have been |
4617 | * evicted from an arc state. | |
bc888666 | 4618 | */ |
ca0bf58d | 4619 | ASSERT(!retry || spa == 0); |
d164b209 | 4620 | |
b9541d6b | 4621 | if (spa != NULL) |
3541dc6d | 4622 | guid = spa_load_guid(spa); |
d164b209 | 4623 | |
ca0bf58d PS |
4624 | (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); |
4625 | (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); | |
4626 | ||
4627 | (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); | |
4628 | (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); | |
4629 | ||
4630 | (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); | |
4631 | (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); | |
34dc7c2f | 4632 | |
ca0bf58d PS |
4633 | (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); |
4634 | (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); | |
34dc7c2f BB |
4635 | } |
4636 | ||
34dc7c2f | 4637 | void |
ca67b33a | 4638 | arc_shrink(int64_t to_free) |
34dc7c2f | 4639 | { |
1b8951b3 | 4640 | uint64_t c = arc_c; |
34dc7c2f | 4641 | |
1b8951b3 TC |
4642 | if (c > to_free && c - to_free > arc_c_min) { |
4643 | arc_c = c - to_free; | |
ca67b33a | 4644 | atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); |
34dc7c2f BB |
4645 | if (arc_c > arc_size) |
4646 | arc_c = MAX(arc_size, arc_c_min); | |
4647 | if (arc_p > arc_c) | |
4648 | arc_p = (arc_c >> 1); | |
4649 | ASSERT(arc_c >= arc_c_min); | |
4650 | ASSERT((int64_t)arc_p >= 0); | |
1b8951b3 TC |
4651 | } else { |
4652 | arc_c = arc_c_min; | |
34dc7c2f BB |
4653 | } |
4654 | ||
4655 | if (arc_size > arc_c) | |
ca0bf58d | 4656 | (void) arc_adjust(); |
34dc7c2f BB |
4657 | } |
4658 | ||
9edb3695 BB |
4659 | /* |
4660 | * Return maximum amount of memory that we could possibly use. Reduced | |
4661 | * to half of all memory in user space which is primarily used for testing. | |
4662 | */ | |
4663 | static uint64_t | |
4664 | arc_all_memory(void) | |
4665 | { | |
4666 | #ifdef _KERNEL | |
70f02287 BB |
4667 | #ifdef CONFIG_HIGHMEM |
4668 | return (ptob(totalram_pages - totalhigh_pages)); | |
4669 | #else | |
4670 | return (ptob(totalram_pages)); | |
4671 | #endif /* CONFIG_HIGHMEM */ | |
9edb3695 BB |
4672 | #else |
4673 | return (ptob(physmem) / 2); | |
70f02287 | 4674 | #endif /* _KERNEL */ |
9edb3695 BB |
4675 | } |
4676 | ||
70f02287 BB |
4677 | /* |
4678 | * Return the amount of memory that is considered free. In user space | |
4679 | * which is primarily used for testing we pretend that free memory ranges | |
4680 | * from 0-20% of all memory. | |
4681 | */ | |
787acae0 GDN |
4682 | static uint64_t |
4683 | arc_free_memory(void) | |
4684 | { | |
70f02287 BB |
4685 | #ifdef _KERNEL |
4686 | #ifdef CONFIG_HIGHMEM | |
4687 | struct sysinfo si; | |
4688 | si_meminfo(&si); | |
4689 | return (ptob(si.freeram - si.freehigh)); | |
4690 | #else | |
787acae0 | 4691 | #ifdef ZFS_GLOBAL_NODE_PAGE_STATE |
70f02287 | 4692 | return (ptob(nr_free_pages() + |
787acae0 GDN |
4693 | global_node_page_state(NR_INACTIVE_FILE) + |
4694 | global_node_page_state(NR_INACTIVE_ANON) + | |
70f02287 | 4695 | global_node_page_state(NR_SLAB_RECLAIMABLE))); |
787acae0 | 4696 | #else |
70f02287 | 4697 | return (ptob(nr_free_pages() + |
787acae0 GDN |
4698 | global_page_state(NR_INACTIVE_FILE) + |
4699 | global_page_state(NR_INACTIVE_ANON) + | |
70f02287 BB |
4700 | global_page_state(NR_SLAB_RECLAIMABLE))); |
4701 | #endif /* ZFS_GLOBAL_NODE_PAGE_STATE */ | |
4702 | #endif /* CONFIG_HIGHMEM */ | |
4703 | #else | |
4704 | return (spa_get_random(arc_all_memory() * 20 / 100)); | |
4705 | #endif /* _KERNEL */ | |
787acae0 | 4706 | } |
787acae0 | 4707 | |
ca67b33a MA |
4708 | typedef enum free_memory_reason_t { |
4709 | FMR_UNKNOWN, | |
4710 | FMR_NEEDFREE, | |
4711 | FMR_LOTSFREE, | |
4712 | FMR_SWAPFS_MINFREE, | |
4713 | FMR_PAGES_PP_MAXIMUM, | |
4714 | FMR_HEAP_ARENA, | |
4715 | FMR_ZIO_ARENA, | |
4716 | } free_memory_reason_t; | |
4717 | ||
4718 | int64_t last_free_memory; | |
4719 | free_memory_reason_t last_free_reason; | |
4720 | ||
4721 | #ifdef _KERNEL | |
ca67b33a MA |
4722 | /* |
4723 | * Additional reserve of pages for pp_reserve. | |
4724 | */ | |
4725 | int64_t arc_pages_pp_reserve = 64; | |
4726 | ||
4727 | /* | |
4728 | * Additional reserve of pages for swapfs. | |
4729 | */ | |
4730 | int64_t arc_swapfs_reserve = 64; | |
ca67b33a MA |
4731 | #endif /* _KERNEL */ |
4732 | ||
4733 | /* | |
4734 | * Return the amount of memory that can be consumed before reclaim will be | |
4735 | * needed. Positive if there is sufficient free memory, negative indicates | |
4736 | * the amount of memory that needs to be freed up. | |
4737 | */ | |
4738 | static int64_t | |
4739 | arc_available_memory(void) | |
4740 | { | |
4741 | int64_t lowest = INT64_MAX; | |
4742 | free_memory_reason_t r = FMR_UNKNOWN; | |
ca67b33a | 4743 | #ifdef _KERNEL |
ca67b33a | 4744 | int64_t n; |
11f552fa | 4745 | #ifdef __linux__ |
70f02287 BB |
4746 | #ifdef freemem |
4747 | #undef freemem | |
4748 | #endif | |
11f552fa BB |
4749 | pgcnt_t needfree = btop(arc_need_free); |
4750 | pgcnt_t lotsfree = btop(arc_sys_free); | |
4751 | pgcnt_t desfree = 0; | |
70f02287 | 4752 | pgcnt_t freemem = btop(arc_free_memory()); |
9edb3695 BB |
4753 | #endif |
4754 | ||
ca67b33a MA |
4755 | if (needfree > 0) { |
4756 | n = PAGESIZE * (-needfree); | |
4757 | if (n < lowest) { | |
4758 | lowest = n; | |
4759 | r = FMR_NEEDFREE; | |
4760 | } | |
4761 | } | |
4762 | ||
4763 | /* | |
4764 | * check that we're out of range of the pageout scanner. It starts to | |
4765 | * schedule paging if freemem is less than lotsfree and needfree. | |
4766 | * lotsfree is the high-water mark for pageout, and needfree is the | |
4767 | * number of needed free pages. We add extra pages here to make sure | |
4768 | * the scanner doesn't start up while we're freeing memory. | |
4769 | */ | |
70f02287 | 4770 | n = PAGESIZE * (freemem - lotsfree - needfree - desfree); |
ca67b33a MA |
4771 | if (n < lowest) { |
4772 | lowest = n; | |
4773 | r = FMR_LOTSFREE; | |
4774 | } | |
4775 | ||
11f552fa | 4776 | #ifndef __linux__ |
ca67b33a MA |
4777 | /* |
4778 | * check to make sure that swapfs has enough space so that anon | |
4779 | * reservations can still succeed. anon_resvmem() checks that the | |
4780 | * availrmem is greater than swapfs_minfree, and the number of reserved | |
4781 | * swap pages. We also add a bit of extra here just to prevent | |
4782 | * circumstances from getting really dire. | |
4783 | */ | |
4784 | n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - | |
4785 | desfree - arc_swapfs_reserve); | |
4786 | if (n < lowest) { | |
4787 | lowest = n; | |
4788 | r = FMR_SWAPFS_MINFREE; | |
4789 | } | |
4790 | ||
ca67b33a MA |
4791 | /* |
4792 | * Check that we have enough availrmem that memory locking (e.g., via | |
4793 | * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum | |
4794 | * stores the number of pages that cannot be locked; when availrmem | |
4795 | * drops below pages_pp_maximum, page locking mechanisms such as | |
4796 | * page_pp_lock() will fail.) | |
4797 | */ | |
4798 | n = PAGESIZE * (availrmem - pages_pp_maximum - | |
4799 | arc_pages_pp_reserve); | |
4800 | if (n < lowest) { | |
4801 | lowest = n; | |
4802 | r = FMR_PAGES_PP_MAXIMUM; | |
4803 | } | |
11f552fa | 4804 | #endif |
ca67b33a | 4805 | |
70f02287 | 4806 | #if defined(_ILP32) |
ca67b33a | 4807 | /* |
70f02287 | 4808 | * If we're on a 32-bit platform, it's possible that we'll exhaust the |
ca67b33a MA |
4809 | * kernel heap space before we ever run out of available physical |
4810 | * memory. Most checks of the size of the heap_area compare against | |
4811 | * tune.t_minarmem, which is the minimum available real memory that we | |
4812 | * can have in the system. However, this is generally fixed at 25 pages | |
4813 | * which is so low that it's useless. In this comparison, we seek to | |
4814 | * calculate the total heap-size, and reclaim if more than 3/4ths of the | |
4815 | * heap is allocated. (Or, in the calculation, if less than 1/4th is | |
4816 | * free) | |
4817 | */ | |
4818 | n = vmem_size(heap_arena, VMEM_FREE) - | |
4819 | (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); | |
4820 | if (n < lowest) { | |
4821 | lowest = n; | |
4822 | r = FMR_HEAP_ARENA; | |
4823 | } | |
4824 | #endif | |
4825 | ||
4826 | /* | |
4827 | * If zio data pages are being allocated out of a separate heap segment, | |
4828 | * then enforce that the size of available vmem for this arena remains | |
d3c2ae1c | 4829 | * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. |
ca67b33a | 4830 | * |
d3c2ae1c GW |
4831 | * Note that reducing the arc_zio_arena_free_shift keeps more virtual |
4832 | * memory (in the zio_arena) free, which can avoid memory | |
4833 | * fragmentation issues. | |
ca67b33a MA |
4834 | */ |
4835 | if (zio_arena != NULL) { | |
9edb3695 BB |
4836 | n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - |
4837 | (vmem_size(zio_arena, VMEM_ALLOC) >> | |
4838 | arc_zio_arena_free_shift); | |
ca67b33a MA |
4839 | if (n < lowest) { |
4840 | lowest = n; | |
4841 | r = FMR_ZIO_ARENA; | |
4842 | } | |
4843 | } | |
11f552fa | 4844 | #else /* _KERNEL */ |
ca67b33a MA |
4845 | /* Every 100 calls, free a small amount */ |
4846 | if (spa_get_random(100) == 0) | |
4847 | lowest = -1024; | |
11f552fa | 4848 | #endif /* _KERNEL */ |
ca67b33a MA |
4849 | |
4850 | last_free_memory = lowest; | |
4851 | last_free_reason = r; | |
4852 | ||
4853 | return (lowest); | |
4854 | } | |
4855 | ||
4856 | /* | |
4857 | * Determine if the system is under memory pressure and is asking | |
d3c2ae1c | 4858 | * to reclaim memory. A return value of B_TRUE indicates that the system |
ca67b33a MA |
4859 | * is under memory pressure and that the arc should adjust accordingly. |
4860 | */ | |
4861 | static boolean_t | |
4862 | arc_reclaim_needed(void) | |
4863 | { | |
4864 | return (arc_available_memory() < 0); | |
4865 | } | |
4866 | ||
34dc7c2f | 4867 | static void |
ca67b33a | 4868 | arc_kmem_reap_now(void) |
34dc7c2f BB |
4869 | { |
4870 | size_t i; | |
4871 | kmem_cache_t *prev_cache = NULL; | |
4872 | kmem_cache_t *prev_data_cache = NULL; | |
4873 | extern kmem_cache_t *zio_buf_cache[]; | |
4874 | extern kmem_cache_t *zio_data_buf_cache[]; | |
669dedb3 | 4875 | extern kmem_cache_t *range_seg_cache; |
34dc7c2f | 4876 | |
70f02287 | 4877 | #ifdef _KERNEL |
f6046738 BB |
4878 | if ((arc_meta_used >= arc_meta_limit) && zfs_arc_meta_prune) { |
4879 | /* | |
4880 | * We are exceeding our meta-data cache limit. | |
4881 | * Prune some entries to release holds on meta-data. | |
4882 | */ | |
ef5b2e10 | 4883 | arc_prune_async(zfs_arc_meta_prune); |
f6046738 | 4884 | } |
70f02287 BB |
4885 | #if defined(_ILP32) |
4886 | /* | |
4887 | * Reclaim unused memory from all kmem caches. | |
4888 | */ | |
4889 | kmem_reap(); | |
4890 | #endif | |
4891 | #endif | |
f6046738 | 4892 | |
34dc7c2f | 4893 | for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { |
70f02287 | 4894 | #if defined(_ILP32) |
d0c614ec | 4895 | /* reach upper limit of cache size on 32-bit */ |
4896 | if (zio_buf_cache[i] == NULL) | |
4897 | break; | |
4898 | #endif | |
34dc7c2f BB |
4899 | if (zio_buf_cache[i] != prev_cache) { |
4900 | prev_cache = zio_buf_cache[i]; | |
4901 | kmem_cache_reap_now(zio_buf_cache[i]); | |
4902 | } | |
4903 | if (zio_data_buf_cache[i] != prev_data_cache) { | |
4904 | prev_data_cache = zio_data_buf_cache[i]; | |
4905 | kmem_cache_reap_now(zio_data_buf_cache[i]); | |
4906 | } | |
4907 | } | |
ca0bf58d | 4908 | kmem_cache_reap_now(buf_cache); |
b9541d6b CW |
4909 | kmem_cache_reap_now(hdr_full_cache); |
4910 | kmem_cache_reap_now(hdr_l2only_cache); | |
669dedb3 | 4911 | kmem_cache_reap_now(range_seg_cache); |
ca67b33a MA |
4912 | |
4913 | if (zio_arena != NULL) { | |
4914 | /* | |
4915 | * Ask the vmem arena to reclaim unused memory from its | |
4916 | * quantum caches. | |
4917 | */ | |
4918 | vmem_qcache_reap(zio_arena); | |
4919 | } | |
34dc7c2f BB |
4920 | } |
4921 | ||
302f753f | 4922 | /* |
a6255b7f | 4923 | * Threads can block in arc_get_data_impl() waiting for this thread to evict |
ca0bf58d | 4924 | * enough data and signal them to proceed. When this happens, the threads in |
a6255b7f | 4925 | * arc_get_data_impl() are sleeping while holding the hash lock for their |
ca0bf58d PS |
4926 | * particular arc header. Thus, we must be careful to never sleep on a |
4927 | * hash lock in this thread. This is to prevent the following deadlock: | |
4928 | * | |
a6255b7f | 4929 | * - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L", |
ca0bf58d PS |
4930 | * waiting for the reclaim thread to signal it. |
4931 | * | |
4932 | * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter, | |
4933 | * fails, and goes to sleep forever. | |
4934 | * | |
4935 | * This possible deadlock is avoided by always acquiring a hash lock | |
4936 | * using mutex_tryenter() from arc_reclaim_thread(). | |
302f753f | 4937 | */ |
34dc7c2f | 4938 | static void |
c25b8f99 | 4939 | arc_reclaim_thread(void *unused) |
34dc7c2f | 4940 | { |
ca67b33a | 4941 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
ae6d0c60 | 4942 | hrtime_t growtime = 0; |
34dc7c2f BB |
4943 | callb_cpr_t cpr; |
4944 | ||
ca0bf58d | 4945 | CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG); |
34dc7c2f | 4946 | |
ca0bf58d | 4947 | mutex_enter(&arc_reclaim_lock); |
ca67b33a MA |
4948 | while (!arc_reclaim_thread_exit) { |
4949 | int64_t to_free; | |
ca67b33a | 4950 | uint64_t evicted = 0; |
30fffb90 | 4951 | uint64_t need_free = arc_need_free; |
ca67b33a | 4952 | arc_tuning_update(); |
34dc7c2f | 4953 | |
d3c2ae1c GW |
4954 | /* |
4955 | * This is necessary in order for the mdb ::arc dcmd to | |
4956 | * show up to date information. Since the ::arc command | |
4957 | * does not call the kstat's update function, without | |
4958 | * this call, the command may show stale stats for the | |
4959 | * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even | |
4960 | * with this change, the data might be up to 1 second | |
4961 | * out of date; but that should suffice. The arc_state_t | |
4962 | * structures can be queried directly if more accurate | |
4963 | * information is needed. | |
4964 | */ | |
4965 | #ifndef __linux__ | |
4966 | if (arc_ksp != NULL) | |
4967 | arc_ksp->ks_update(arc_ksp, KSTAT_READ); | |
4968 | #endif | |
ca67b33a | 4969 | mutex_exit(&arc_reclaim_lock); |
34dc7c2f | 4970 | |
0a252dae GM |
4971 | /* |
4972 | * We call arc_adjust() before (possibly) calling | |
4973 | * arc_kmem_reap_now(), so that we can wake up | |
4974 | * arc_get_data_buf() sooner. | |
4975 | */ | |
4976 | evicted = arc_adjust(); | |
4977 | ||
4978 | int64_t free_memory = arc_available_memory(); | |
ca67b33a | 4979 | if (free_memory < 0) { |
34dc7c2f | 4980 | |
ca67b33a | 4981 | arc_no_grow = B_TRUE; |
b128c09f | 4982 | arc_warm = B_TRUE; |
34dc7c2f | 4983 | |
ca67b33a MA |
4984 | /* |
4985 | * Wait at least zfs_grow_retry (default 5) seconds | |
4986 | * before considering growing. | |
4987 | */ | |
ae6d0c60 | 4988 | growtime = gethrtime() + SEC2NSEC(arc_grow_retry); |
6a8f9b6b | 4989 | |
ca67b33a | 4990 | arc_kmem_reap_now(); |
34dc7c2f | 4991 | |
ca67b33a MA |
4992 | /* |
4993 | * If we are still low on memory, shrink the ARC | |
4994 | * so that we have arc_shrink_min free space. | |
4995 | */ | |
4996 | free_memory = arc_available_memory(); | |
34dc7c2f | 4997 | |
ca67b33a MA |
4998 | to_free = (arc_c >> arc_shrink_shift) - free_memory; |
4999 | if (to_free > 0) { | |
5000 | #ifdef _KERNEL | |
30fffb90 | 5001 | to_free = MAX(to_free, need_free); |
ca67b33a MA |
5002 | #endif |
5003 | arc_shrink(to_free); | |
5004 | } | |
5005 | } else if (free_memory < arc_c >> arc_no_grow_shift) { | |
5006 | arc_no_grow = B_TRUE; | |
ae6d0c60 | 5007 | } else if (gethrtime() >= growtime) { |
ca67b33a MA |
5008 | arc_no_grow = B_FALSE; |
5009 | } | |
bce45ec9 | 5010 | |
ca67b33a | 5011 | mutex_enter(&arc_reclaim_lock); |
bce45ec9 | 5012 | |
ca67b33a MA |
5013 | /* |
5014 | * If evicted is zero, we couldn't evict anything via | |
5015 | * arc_adjust(). This could be due to hash lock | |
5016 | * collisions, but more likely due to the majority of | |
5017 | * arc buffers being unevictable. Therefore, even if | |
5018 | * arc_size is above arc_c, another pass is unlikely to | |
5019 | * be helpful and could potentially cause us to enter an | |
5020 | * infinite loop. | |
5021 | */ | |
5022 | if (arc_size <= arc_c || evicted == 0) { | |
5023 | /* | |
5024 | * We're either no longer overflowing, or we | |
5025 | * can't evict anything more, so we should wake | |
30fffb90 DB |
5026 | * up any threads before we go to sleep and remove |
5027 | * the bytes we were working on from arc_need_free | |
5028 | * since nothing more will be done here. | |
ca67b33a MA |
5029 | */ |
5030 | cv_broadcast(&arc_reclaim_waiters_cv); | |
30fffb90 | 5031 | ARCSTAT_INCR(arcstat_need_free, -need_free); |
bce45ec9 | 5032 | |
ca67b33a MA |
5033 | /* |
5034 | * Block until signaled, or after one second (we | |
5035 | * might need to perform arc_kmem_reap_now() | |
5036 | * even if we aren't being signalled) | |
5037 | */ | |
5038 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
a9bb2b68 | 5039 | (void) cv_timedwait_sig_hires(&arc_reclaim_thread_cv, |
ae6d0c60 | 5040 | &arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0); |
ca67b33a MA |
5041 | CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock); |
5042 | } | |
ca0bf58d | 5043 | } |
bce45ec9 | 5044 | |
d3c2ae1c | 5045 | arc_reclaim_thread_exit = B_FALSE; |
ca0bf58d PS |
5046 | cv_broadcast(&arc_reclaim_thread_cv); |
5047 | CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */ | |
5048 | spl_fstrans_unmark(cookie); | |
5049 | thread_exit(); | |
5050 | } | |
5051 | ||
7cb67b45 BB |
5052 | #ifdef _KERNEL |
5053 | /* | |
302f753f BB |
5054 | * Determine the amount of memory eligible for eviction contained in the |
5055 | * ARC. All clean data reported by the ghost lists can always be safely | |
5056 | * evicted. Due to arc_c_min, the same does not hold for all clean data | |
5057 | * contained by the regular mru and mfu lists. | |
5058 | * | |
5059 | * In the case of the regular mru and mfu lists, we need to report as | |
5060 | * much clean data as possible, such that evicting that same reported | |
5061 | * data will not bring arc_size below arc_c_min. Thus, in certain | |
5062 | * circumstances, the total amount of clean data in the mru and mfu | |
5063 | * lists might not actually be evictable. | |
5064 | * | |
5065 | * The following two distinct cases are accounted for: | |
5066 | * | |
5067 | * 1. The sum of the amount of dirty data contained by both the mru and | |
5068 | * mfu lists, plus the ARC's other accounting (e.g. the anon list), | |
5069 | * is greater than or equal to arc_c_min. | |
5070 | * (i.e. amount of dirty data >= arc_c_min) | |
5071 | * | |
5072 | * This is the easy case; all clean data contained by the mru and mfu | |
5073 | * lists is evictable. Evicting all clean data can only drop arc_size | |
5074 | * to the amount of dirty data, which is greater than arc_c_min. | |
5075 | * | |
5076 | * 2. The sum of the amount of dirty data contained by both the mru and | |
5077 | * mfu lists, plus the ARC's other accounting (e.g. the anon list), | |
5078 | * is less than arc_c_min. | |
5079 | * (i.e. arc_c_min > amount of dirty data) | |
5080 | * | |
5081 | * 2.1. arc_size is greater than or equal arc_c_min. | |
5082 | * (i.e. arc_size >= arc_c_min > amount of dirty data) | |
5083 | * | |
5084 | * In this case, not all clean data from the regular mru and mfu | |
5085 | * lists is actually evictable; we must leave enough clean data | |
5086 | * to keep arc_size above arc_c_min. Thus, the maximum amount of | |
5087 | * evictable data from the two lists combined, is exactly the | |
5088 | * difference between arc_size and arc_c_min. | |
5089 | * | |
5090 | * 2.2. arc_size is less than arc_c_min | |
5091 | * (i.e. arc_c_min > arc_size > amount of dirty data) | |
5092 | * | |
5093 | * In this case, none of the data contained in the mru and mfu | |
5094 | * lists is evictable, even if it's clean. Since arc_size is | |
5095 | * already below arc_c_min, evicting any more would only | |
5096 | * increase this negative difference. | |
7cb67b45 | 5097 | */ |
302f753f | 5098 | static uint64_t |
4ea3f864 GM |
5099 | arc_evictable_memory(void) |
5100 | { | |
302f753f | 5101 | uint64_t arc_clean = |
d3c2ae1c GW |
5102 | refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) + |
5103 | refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) + | |
5104 | refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) + | |
5105 | refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); | |
302f753f BB |
5106 | uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0); |
5107 | ||
03b60eee DB |
5108 | /* |
5109 | * Scale reported evictable memory in proportion to page cache, cap | |
5110 | * at specified min/max. | |
5111 | */ | |
2209e409 DB |
5112 | #ifdef ZFS_GLOBAL_NODE_PAGE_STATE |
5113 | uint64_t min = (ptob(global_node_page_state(NR_FILE_PAGES)) / 100) * | |
5114 | zfs_arc_pc_percent; | |
5115 | #else | |
03b60eee DB |
5116 | uint64_t min = (ptob(global_page_state(NR_FILE_PAGES)) / 100) * |
5117 | zfs_arc_pc_percent; | |
2209e409 | 5118 | #endif |
03b60eee DB |
5119 | min = MAX(arc_c_min, MIN(arc_c_max, min)); |
5120 | ||
5121 | if (arc_dirty >= min) | |
9b50146d | 5122 | return (arc_clean); |
302f753f | 5123 | |
03b60eee | 5124 | return (MAX((int64_t)arc_size - (int64_t)min, 0)); |
302f753f BB |
5125 | } |
5126 | ||
ed6e9cc2 TC |
5127 | /* |
5128 | * If sc->nr_to_scan is zero, the caller is requesting a query of the | |
5129 | * number of objects which can potentially be freed. If it is nonzero, | |
5130 | * the request is to free that many objects. | |
5131 | * | |
5132 | * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks | |
5133 | * in struct shrinker and also require the shrinker to return the number | |
5134 | * of objects freed. | |
5135 | * | |
5136 | * Older kernels require the shrinker to return the number of freeable | |
5137 | * objects following the freeing of nr_to_free. | |
5138 | */ | |
5139 | static spl_shrinker_t | |
7e7baeca | 5140 | __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc) |
7cb67b45 | 5141 | { |
ed6e9cc2 | 5142 | int64_t pages; |
7cb67b45 | 5143 | |
302f753f BB |
5144 | /* The arc is considered warm once reclaim has occurred */ |
5145 | if (unlikely(arc_warm == B_FALSE)) | |
5146 | arc_warm = B_TRUE; | |
7cb67b45 | 5147 | |
302f753f | 5148 | /* Return the potential number of reclaimable pages */ |
ed6e9cc2 | 5149 | pages = btop((int64_t)arc_evictable_memory()); |
302f753f BB |
5150 | if (sc->nr_to_scan == 0) |
5151 | return (pages); | |
3fd70ee6 BB |
5152 | |
5153 | /* Not allowed to perform filesystem reclaim */ | |
7e7baeca | 5154 | if (!(sc->gfp_mask & __GFP_FS)) |
ed6e9cc2 | 5155 | return (SHRINK_STOP); |
3fd70ee6 | 5156 | |
7cb67b45 | 5157 | /* Reclaim in progress */ |
b855550c DB |
5158 | if (mutex_tryenter(&arc_reclaim_lock) == 0) { |
5159 | ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan)); | |
2e91c2fb | 5160 | return (0); |
b855550c | 5161 | } |
7cb67b45 | 5162 | |
ca0bf58d PS |
5163 | mutex_exit(&arc_reclaim_lock); |
5164 | ||
302f753f BB |
5165 | /* |
5166 | * Evict the requested number of pages by shrinking arc_c the | |
44813aef | 5167 | * requested amount. |
302f753f BB |
5168 | */ |
5169 | if (pages > 0) { | |
ca67b33a | 5170 | arc_shrink(ptob(sc->nr_to_scan)); |
44813aef DB |
5171 | if (current_is_kswapd()) |
5172 | arc_kmem_reap_now(); | |
ed6e9cc2 | 5173 | #ifdef HAVE_SPLIT_SHRINKER_CALLBACK |
4149bf49 DB |
5174 | pages = MAX((int64_t)pages - |
5175 | (int64_t)btop(arc_evictable_memory()), 0); | |
ed6e9cc2 | 5176 | #else |
1e3cb67b | 5177 | pages = btop(arc_evictable_memory()); |
ed6e9cc2 | 5178 | #endif |
1a31dcf5 DB |
5179 | /* |
5180 | * We've shrunk what we can, wake up threads. | |
5181 | */ | |
5182 | cv_broadcast(&arc_reclaim_waiters_cv); | |
44813aef | 5183 | } else |
ed6e9cc2 | 5184 | pages = SHRINK_STOP; |
302f753f BB |
5185 | |
5186 | /* | |
5187 | * When direct reclaim is observed it usually indicates a rapid | |
5188 | * increase in memory pressure. This occurs because the kswapd | |
5189 | * threads were unable to asynchronously keep enough free memory | |
5190 | * available. In this case set arc_no_grow to briefly pause arc | |
5191 | * growth to avoid compounding the memory pressure. | |
5192 | */ | |
7cb67b45 | 5193 | if (current_is_kswapd()) { |
302f753f | 5194 | ARCSTAT_BUMP(arcstat_memory_indirect_count); |
7cb67b45 | 5195 | } else { |
302f753f | 5196 | arc_no_grow = B_TRUE; |
44813aef | 5197 | arc_kmem_reap_now(); |
302f753f | 5198 | ARCSTAT_BUMP(arcstat_memory_direct_count); |
7cb67b45 BB |
5199 | } |
5200 | ||
1e3cb67b | 5201 | return (pages); |
7cb67b45 | 5202 | } |
7e7baeca | 5203 | SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func); |
7cb67b45 BB |
5204 | |
5205 | SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS); | |
5206 | #endif /* _KERNEL */ | |
5207 | ||
34dc7c2f BB |
5208 | /* |
5209 | * Adapt arc info given the number of bytes we are trying to add and | |
4e33ba4c | 5210 | * the state that we are coming from. This function is only called |
34dc7c2f BB |
5211 | * when we are adding new content to the cache. |
5212 | */ | |
5213 | static void | |
5214 | arc_adapt(int bytes, arc_state_t *state) | |
5215 | { | |
5216 | int mult; | |
728d6ae9 | 5217 | uint64_t arc_p_min = (arc_c >> arc_p_min_shift); |
36da08ef PS |
5218 | int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); |
5219 | int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); | |
34dc7c2f BB |
5220 | |
5221 | if (state == arc_l2c_only) | |
5222 | return; | |
5223 | ||
5224 | ASSERT(bytes > 0); | |
5225 | /* | |
5226 | * Adapt the target size of the MRU list: | |
5227 | * - if we just hit in the MRU ghost list, then increase | |
5228 | * the target size of the MRU list. | |
5229 | * - if we just hit in the MFU ghost list, then increase | |
5230 | * the target size of the MFU list by decreasing the | |
5231 | * target size of the MRU list. | |
5232 | */ | |
5233 | if (state == arc_mru_ghost) { | |
36da08ef | 5234 | mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); |
62422785 PS |
5235 | if (!zfs_arc_p_dampener_disable) |
5236 | mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ | |
34dc7c2f | 5237 | |
728d6ae9 | 5238 | arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); |
34dc7c2f | 5239 | } else if (state == arc_mfu_ghost) { |
d164b209 BB |
5240 | uint64_t delta; |
5241 | ||
36da08ef | 5242 | mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); |
62422785 PS |
5243 | if (!zfs_arc_p_dampener_disable) |
5244 | mult = MIN(mult, 10); | |
34dc7c2f | 5245 | |
d164b209 | 5246 | delta = MIN(bytes * mult, arc_p); |
728d6ae9 | 5247 | arc_p = MAX(arc_p_min, arc_p - delta); |
34dc7c2f BB |
5248 | } |
5249 | ASSERT((int64_t)arc_p >= 0); | |
5250 | ||
ca67b33a MA |
5251 | if (arc_reclaim_needed()) { |
5252 | cv_signal(&arc_reclaim_thread_cv); | |
5253 | return; | |
5254 | } | |
5255 | ||
34dc7c2f BB |
5256 | if (arc_no_grow) |
5257 | return; | |
5258 | ||
5259 | if (arc_c >= arc_c_max) | |
5260 | return; | |
5261 | ||
5262 | /* | |
5263 | * If we're within (2 * maxblocksize) bytes of the target | |
5264 | * cache size, increment the target cache size | |
5265 | */ | |
935434ef | 5266 | ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT); |
121b3cae | 5267 | if (arc_size >= arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { |
34dc7c2f BB |
5268 | atomic_add_64(&arc_c, (int64_t)bytes); |
5269 | if (arc_c > arc_c_max) | |
5270 | arc_c = arc_c_max; | |
5271 | else if (state == arc_anon) | |
5272 | atomic_add_64(&arc_p, (int64_t)bytes); | |
5273 | if (arc_p > arc_c) | |
5274 | arc_p = arc_c; | |
5275 | } | |
5276 | ASSERT((int64_t)arc_p >= 0); | |
5277 | } | |
5278 | ||
5279 | /* | |
ca0bf58d PS |
5280 | * Check if arc_size has grown past our upper threshold, determined by |
5281 | * zfs_arc_overflow_shift. | |
34dc7c2f | 5282 | */ |
ca0bf58d PS |
5283 | static boolean_t |
5284 | arc_is_overflowing(void) | |
34dc7c2f | 5285 | { |
ca0bf58d PS |
5286 | /* Always allow at least one block of overflow */ |
5287 | uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, | |
5288 | arc_c >> zfs_arc_overflow_shift); | |
34dc7c2f | 5289 | |
ca0bf58d | 5290 | return (arc_size >= arc_c + overflow); |
34dc7c2f BB |
5291 | } |
5292 | ||
a6255b7f DQ |
5293 | static abd_t * |
5294 | arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag) | |
5295 | { | |
5296 | arc_buf_contents_t type = arc_buf_type(hdr); | |
5297 | ||
5298 | arc_get_data_impl(hdr, size, tag); | |
5299 | if (type == ARC_BUFC_METADATA) { | |
5300 | return (abd_alloc(size, B_TRUE)); | |
5301 | } else { | |
5302 | ASSERT(type == ARC_BUFC_DATA); | |
5303 | return (abd_alloc(size, B_FALSE)); | |
5304 | } | |
5305 | } | |
5306 | ||
5307 | static void * | |
5308 | arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) | |
5309 | { | |
5310 | arc_buf_contents_t type = arc_buf_type(hdr); | |
5311 | ||
5312 | arc_get_data_impl(hdr, size, tag); | |
5313 | if (type == ARC_BUFC_METADATA) { | |
5314 | return (zio_buf_alloc(size)); | |
5315 | } else { | |
5316 | ASSERT(type == ARC_BUFC_DATA); | |
5317 | return (zio_data_buf_alloc(size)); | |
5318 | } | |
5319 | } | |
5320 | ||
34dc7c2f | 5321 | /* |
d3c2ae1c GW |
5322 | * Allocate a block and return it to the caller. If we are hitting the |
5323 | * hard limit for the cache size, we must sleep, waiting for the eviction | |
5324 | * thread to catch up. If we're past the target size but below the hard | |
5325 | * limit, we'll only signal the reclaim thread and continue on. | |
34dc7c2f | 5326 | */ |
a6255b7f DQ |
5327 | static void |
5328 | arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) | |
34dc7c2f | 5329 | { |
a6255b7f DQ |
5330 | arc_state_t *state = hdr->b_l1hdr.b_state; |
5331 | arc_buf_contents_t type = arc_buf_type(hdr); | |
34dc7c2f BB |
5332 | |
5333 | arc_adapt(size, state); | |
5334 | ||
5335 | /* | |
ca0bf58d PS |
5336 | * If arc_size is currently overflowing, and has grown past our |
5337 | * upper limit, we must be adding data faster than the evict | |
5338 | * thread can evict. Thus, to ensure we don't compound the | |
5339 | * problem by adding more data and forcing arc_size to grow even | |
5340 | * further past it's target size, we halt and wait for the | |
5341 | * eviction thread to catch up. | |
5342 | * | |
5343 | * It's also possible that the reclaim thread is unable to evict | |
5344 | * enough buffers to get arc_size below the overflow limit (e.g. | |
5345 | * due to buffers being un-evictable, or hash lock collisions). | |
5346 | * In this case, we want to proceed regardless if we're | |
5347 | * overflowing; thus we don't use a while loop here. | |
34dc7c2f | 5348 | */ |
ca0bf58d PS |
5349 | if (arc_is_overflowing()) { |
5350 | mutex_enter(&arc_reclaim_lock); | |
5351 | ||
5352 | /* | |
5353 | * Now that we've acquired the lock, we may no longer be | |
5354 | * over the overflow limit, lets check. | |
5355 | * | |
5356 | * We're ignoring the case of spurious wake ups. If that | |
5357 | * were to happen, it'd let this thread consume an ARC | |
5358 | * buffer before it should have (i.e. before we're under | |
5359 | * the overflow limit and were signalled by the reclaim | |
5360 | * thread). As long as that is a rare occurrence, it | |
5361 | * shouldn't cause any harm. | |
5362 | */ | |
5363 | if (arc_is_overflowing()) { | |
5364 | cv_signal(&arc_reclaim_thread_cv); | |
5365 | cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); | |
34dc7c2f | 5366 | } |
34dc7c2f | 5367 | |
ca0bf58d | 5368 | mutex_exit(&arc_reclaim_lock); |
34dc7c2f | 5369 | } |
ab26409d | 5370 | |
d3c2ae1c | 5371 | VERIFY3U(hdr->b_type, ==, type); |
da8ccd0e | 5372 | if (type == ARC_BUFC_METADATA) { |
ca0bf58d PS |
5373 | arc_space_consume(size, ARC_SPACE_META); |
5374 | } else { | |
ca0bf58d | 5375 | arc_space_consume(size, ARC_SPACE_DATA); |
da8ccd0e PS |
5376 | } |
5377 | ||
34dc7c2f BB |
5378 | /* |
5379 | * Update the state size. Note that ghost states have a | |
5380 | * "ghost size" and so don't need to be updated. | |
5381 | */ | |
d3c2ae1c | 5382 | if (!GHOST_STATE(state)) { |
34dc7c2f | 5383 | |
d3c2ae1c | 5384 | (void) refcount_add_many(&state->arcs_size, size, tag); |
ca0bf58d PS |
5385 | |
5386 | /* | |
5387 | * If this is reached via arc_read, the link is | |
5388 | * protected by the hash lock. If reached via | |
5389 | * arc_buf_alloc, the header should not be accessed by | |
5390 | * any other thread. And, if reached via arc_read_done, | |
5391 | * the hash lock will protect it if it's found in the | |
5392 | * hash table; otherwise no other thread should be | |
5393 | * trying to [add|remove]_reference it. | |
5394 | */ | |
5395 | if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { | |
b9541d6b | 5396 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
d3c2ae1c GW |
5397 | (void) refcount_add_many(&state->arcs_esize[type], |
5398 | size, tag); | |
34dc7c2f | 5399 | } |
d3c2ae1c | 5400 | |
34dc7c2f BB |
5401 | /* |
5402 | * If we are growing the cache, and we are adding anonymous | |
5403 | * data, and we have outgrown arc_p, update arc_p | |
5404 | */ | |
ca0bf58d | 5405 | if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon && |
36da08ef PS |
5406 | (refcount_count(&arc_anon->arcs_size) + |
5407 | refcount_count(&arc_mru->arcs_size) > arc_p)) | |
34dc7c2f BB |
5408 | arc_p = MIN(arc_c, arc_p + size); |
5409 | } | |
a6255b7f DQ |
5410 | } |
5411 | ||
5412 | static void | |
5413 | arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag) | |
5414 | { | |
5415 | arc_free_data_impl(hdr, size, tag); | |
5416 | abd_free(abd); | |
5417 | } | |
5418 | ||
5419 | static void | |
5420 | arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag) | |
5421 | { | |
5422 | arc_buf_contents_t type = arc_buf_type(hdr); | |
5423 | ||
5424 | arc_free_data_impl(hdr, size, tag); | |
5425 | if (type == ARC_BUFC_METADATA) { | |
5426 | zio_buf_free(buf, size); | |
5427 | } else { | |
5428 | ASSERT(type == ARC_BUFC_DATA); | |
5429 | zio_data_buf_free(buf, size); | |
5430 | } | |
d3c2ae1c GW |
5431 | } |
5432 | ||
5433 | /* | |
5434 | * Free the arc data buffer. | |
5435 | */ | |
5436 | static void | |
a6255b7f | 5437 | arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) |
d3c2ae1c GW |
5438 | { |
5439 | arc_state_t *state = hdr->b_l1hdr.b_state; | |
5440 | arc_buf_contents_t type = arc_buf_type(hdr); | |
5441 | ||
5442 | /* protected by hash lock, if in the hash table */ | |
5443 | if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { | |
5444 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
5445 | ASSERT(state != arc_anon && state != arc_l2c_only); | |
5446 | ||
5447 | (void) refcount_remove_many(&state->arcs_esize[type], | |
5448 | size, tag); | |
5449 | } | |
5450 | (void) refcount_remove_many(&state->arcs_size, size, tag); | |
5451 | ||
5452 | VERIFY3U(hdr->b_type, ==, type); | |
5453 | if (type == ARC_BUFC_METADATA) { | |
d3c2ae1c GW |
5454 | arc_space_return(size, ARC_SPACE_META); |
5455 | } else { | |
5456 | ASSERT(type == ARC_BUFC_DATA); | |
d3c2ae1c GW |
5457 | arc_space_return(size, ARC_SPACE_DATA); |
5458 | } | |
34dc7c2f BB |
5459 | } |
5460 | ||
5461 | /* | |
5462 | * This routine is called whenever a buffer is accessed. | |
5463 | * NOTE: the hash lock is dropped in this function. | |
5464 | */ | |
5465 | static void | |
2a432414 | 5466 | arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) |
34dc7c2f | 5467 | { |
428870ff BB |
5468 | clock_t now; |
5469 | ||
34dc7c2f | 5470 | ASSERT(MUTEX_HELD(hash_lock)); |
b9541d6b | 5471 | ASSERT(HDR_HAS_L1HDR(hdr)); |
34dc7c2f | 5472 | |
b9541d6b | 5473 | if (hdr->b_l1hdr.b_state == arc_anon) { |
34dc7c2f BB |
5474 | /* |
5475 | * This buffer is not in the cache, and does not | |
5476 | * appear in our "ghost" list. Add the new buffer | |
5477 | * to the MRU state. | |
5478 | */ | |
5479 | ||
b9541d6b CW |
5480 | ASSERT0(hdr->b_l1hdr.b_arc_access); |
5481 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); | |
2a432414 GW |
5482 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); |
5483 | arc_change_state(arc_mru, hdr, hash_lock); | |
34dc7c2f | 5484 | |
b9541d6b | 5485 | } else if (hdr->b_l1hdr.b_state == arc_mru) { |
428870ff BB |
5486 | now = ddi_get_lbolt(); |
5487 | ||
34dc7c2f BB |
5488 | /* |
5489 | * If this buffer is here because of a prefetch, then either: | |
5490 | * - clear the flag if this is a "referencing" read | |
5491 | * (any subsequent access will bump this into the MFU state). | |
5492 | * or | |
5493 | * - move the buffer to the head of the list if this is | |
5494 | * another prefetch (to make it less likely to be evicted). | |
5495 | */ | |
b9541d6b CW |
5496 | if (HDR_PREFETCH(hdr)) { |
5497 | if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { | |
ca0bf58d PS |
5498 | /* link protected by hash lock */ |
5499 | ASSERT(multilist_link_active( | |
b9541d6b | 5500 | &hdr->b_l1hdr.b_arc_node)); |
34dc7c2f | 5501 | } else { |
d3c2ae1c | 5502 | arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); |
b9541d6b | 5503 | atomic_inc_32(&hdr->b_l1hdr.b_mru_hits); |
34dc7c2f BB |
5504 | ARCSTAT_BUMP(arcstat_mru_hits); |
5505 | } | |
b9541d6b | 5506 | hdr->b_l1hdr.b_arc_access = now; |
34dc7c2f BB |
5507 | return; |
5508 | } | |
5509 | ||
5510 | /* | |
5511 | * This buffer has been "accessed" only once so far, | |
5512 | * but it is still in the cache. Move it to the MFU | |
5513 | * state. | |
5514 | */ | |
b9541d6b CW |
5515 | if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access + |
5516 | ARC_MINTIME)) { | |
34dc7c2f BB |
5517 | /* |
5518 | * More than 125ms have passed since we | |
5519 | * instantiated this buffer. Move it to the | |
5520 | * most frequently used state. | |
5521 | */ | |
b9541d6b | 5522 | hdr->b_l1hdr.b_arc_access = now; |
2a432414 GW |
5523 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
5524 | arc_change_state(arc_mfu, hdr, hash_lock); | |
34dc7c2f | 5525 | } |
b9541d6b | 5526 | atomic_inc_32(&hdr->b_l1hdr.b_mru_hits); |
34dc7c2f | 5527 | ARCSTAT_BUMP(arcstat_mru_hits); |
b9541d6b | 5528 | } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { |
34dc7c2f BB |
5529 | arc_state_t *new_state; |
5530 | /* | |
5531 | * This buffer has been "accessed" recently, but | |
5532 | * was evicted from the cache. Move it to the | |
5533 | * MFU state. | |
5534 | */ | |
5535 | ||
b9541d6b | 5536 | if (HDR_PREFETCH(hdr)) { |
34dc7c2f | 5537 | new_state = arc_mru; |
b9541d6b | 5538 | if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) |
d3c2ae1c | 5539 | arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); |
2a432414 | 5540 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); |
34dc7c2f BB |
5541 | } else { |
5542 | new_state = arc_mfu; | |
2a432414 | 5543 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
34dc7c2f BB |
5544 | } |
5545 | ||
b9541d6b | 5546 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 | 5547 | arc_change_state(new_state, hdr, hash_lock); |
34dc7c2f | 5548 | |
b9541d6b | 5549 | atomic_inc_32(&hdr->b_l1hdr.b_mru_ghost_hits); |
34dc7c2f | 5550 | ARCSTAT_BUMP(arcstat_mru_ghost_hits); |
b9541d6b | 5551 | } else if (hdr->b_l1hdr.b_state == arc_mfu) { |
34dc7c2f BB |
5552 | /* |
5553 | * This buffer has been accessed more than once and is | |
5554 | * still in the cache. Keep it in the MFU state. | |
5555 | * | |
5556 | * NOTE: an add_reference() that occurred when we did | |
5557 | * the arc_read() will have kicked this off the list. | |
5558 | * If it was a prefetch, we will explicitly move it to | |
5559 | * the head of the list now. | |
5560 | */ | |
b9541d6b CW |
5561 | if ((HDR_PREFETCH(hdr)) != 0) { |
5562 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
ca0bf58d PS |
5563 | /* link protected by hash_lock */ |
5564 | ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
34dc7c2f | 5565 | } |
b9541d6b | 5566 | atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits); |
34dc7c2f | 5567 | ARCSTAT_BUMP(arcstat_mfu_hits); |
b9541d6b CW |
5568 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
5569 | } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { | |
34dc7c2f BB |
5570 | arc_state_t *new_state = arc_mfu; |
5571 | /* | |
5572 | * This buffer has been accessed more than once but has | |
5573 | * been evicted from the cache. Move it back to the | |
5574 | * MFU state. | |
5575 | */ | |
5576 | ||
b9541d6b | 5577 | if (HDR_PREFETCH(hdr)) { |
34dc7c2f BB |
5578 | /* |
5579 | * This is a prefetch access... | |
5580 | * move this block back to the MRU state. | |
5581 | */ | |
b9541d6b | 5582 | ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); |
34dc7c2f BB |
5583 | new_state = arc_mru; |
5584 | } | |
5585 | ||
b9541d6b | 5586 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 GW |
5587 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
5588 | arc_change_state(new_state, hdr, hash_lock); | |
34dc7c2f | 5589 | |
b9541d6b | 5590 | atomic_inc_32(&hdr->b_l1hdr.b_mfu_ghost_hits); |
34dc7c2f | 5591 | ARCSTAT_BUMP(arcstat_mfu_ghost_hits); |
b9541d6b | 5592 | } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { |
34dc7c2f BB |
5593 | /* |
5594 | * This buffer is on the 2nd Level ARC. | |
5595 | */ | |
5596 | ||
b9541d6b | 5597 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 GW |
5598 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
5599 | arc_change_state(arc_mfu, hdr, hash_lock); | |
34dc7c2f | 5600 | } else { |
b9541d6b CW |
5601 | cmn_err(CE_PANIC, "invalid arc state 0x%p", |
5602 | hdr->b_l1hdr.b_state); | |
34dc7c2f BB |
5603 | } |
5604 | } | |
5605 | ||
b5256303 | 5606 | /* a generic arc_read_done_func_t which you can use */ |
34dc7c2f BB |
5607 | /* ARGSUSED */ |
5608 | void | |
b5256303 | 5609 | arc_bcopy_func(zio_t *zio, int error, arc_buf_t *buf, void *arg) |
34dc7c2f | 5610 | { |
b5256303 | 5611 | if (error == 0) |
2aa34383 | 5612 | bcopy(buf->b_data, arg, arc_buf_size(buf)); |
d3c2ae1c | 5613 | arc_buf_destroy(buf, arg); |
34dc7c2f BB |
5614 | } |
5615 | ||
b5256303 | 5616 | /* a generic arc_read_done_func_t */ |
34dc7c2f | 5617 | void |
b5256303 | 5618 | arc_getbuf_func(zio_t *zio, int error, arc_buf_t *buf, void *arg) |
34dc7c2f BB |
5619 | { |
5620 | arc_buf_t **bufp = arg; | |
b5256303 | 5621 | if (error != 0) { |
d3c2ae1c | 5622 | arc_buf_destroy(buf, arg); |
34dc7c2f BB |
5623 | *bufp = NULL; |
5624 | } else { | |
5625 | *bufp = buf; | |
428870ff | 5626 | ASSERT(buf->b_data); |
34dc7c2f BB |
5627 | } |
5628 | } | |
5629 | ||
d3c2ae1c GW |
5630 | static void |
5631 | arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) | |
5632 | { | |
5633 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { | |
5634 | ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); | |
b5256303 | 5635 | ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF); |
d3c2ae1c GW |
5636 | } else { |
5637 | if (HDR_COMPRESSION_ENABLED(hdr)) { | |
b5256303 | 5638 | ASSERT3U(arc_hdr_get_compress(hdr), ==, |
d3c2ae1c GW |
5639 | BP_GET_COMPRESS(bp)); |
5640 | } | |
5641 | ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); | |
5642 | ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); | |
b5256303 | 5643 | ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp)); |
d3c2ae1c GW |
5644 | } |
5645 | } | |
5646 | ||
34dc7c2f BB |
5647 | static void |
5648 | arc_read_done(zio_t *zio) | |
5649 | { | |
b5256303 | 5650 | blkptr_t *bp = zio->io_bp; |
d3c2ae1c | 5651 | arc_buf_hdr_t *hdr = zio->io_private; |
9b67f605 | 5652 | kmutex_t *hash_lock = NULL; |
524b4217 DK |
5653 | arc_callback_t *callback_list; |
5654 | arc_callback_t *acb; | |
2aa34383 | 5655 | boolean_t freeable = B_FALSE; |
524b4217 | 5656 | boolean_t no_zio_error = (zio->io_error == 0); |
a7004725 | 5657 | |
34dc7c2f BB |
5658 | /* |
5659 | * The hdr was inserted into hash-table and removed from lists | |
5660 | * prior to starting I/O. We should find this header, since | |
5661 | * it's in the hash table, and it should be legit since it's | |
5662 | * not possible to evict it during the I/O. The only possible | |
5663 | * reason for it not to be found is if we were freed during the | |
5664 | * read. | |
5665 | */ | |
9b67f605 MA |
5666 | if (HDR_IN_HASH_TABLE(hdr)) { |
5667 | arc_buf_hdr_t *found; | |
5668 | ||
5669 | ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); | |
5670 | ASSERT3U(hdr->b_dva.dva_word[0], ==, | |
5671 | BP_IDENTITY(zio->io_bp)->dva_word[0]); | |
5672 | ASSERT3U(hdr->b_dva.dva_word[1], ==, | |
5673 | BP_IDENTITY(zio->io_bp)->dva_word[1]); | |
5674 | ||
d3c2ae1c | 5675 | found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock); |
9b67f605 | 5676 | |
d3c2ae1c | 5677 | ASSERT((found == hdr && |
9b67f605 MA |
5678 | DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || |
5679 | (found == hdr && HDR_L2_READING(hdr))); | |
d3c2ae1c GW |
5680 | ASSERT3P(hash_lock, !=, NULL); |
5681 | } | |
5682 | ||
b5256303 TC |
5683 | if (BP_IS_PROTECTED(bp)) { |
5684 | hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); | |
5685 | hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; | |
5686 | zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, | |
5687 | hdr->b_crypt_hdr.b_iv); | |
5688 | ||
5689 | if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) { | |
5690 | void *tmpbuf; | |
5691 | ||
5692 | tmpbuf = abd_borrow_buf_copy(zio->io_abd, | |
5693 | sizeof (zil_chain_t)); | |
5694 | zio_crypt_decode_mac_zil(tmpbuf, | |
5695 | hdr->b_crypt_hdr.b_mac); | |
5696 | abd_return_buf(zio->io_abd, tmpbuf, | |
5697 | sizeof (zil_chain_t)); | |
5698 | } else { | |
5699 | zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); | |
5700 | } | |
5701 | } | |
5702 | ||
524b4217 | 5703 | if (no_zio_error) { |
d3c2ae1c GW |
5704 | /* byteswap if necessary */ |
5705 | if (BP_SHOULD_BYTESWAP(zio->io_bp)) { | |
5706 | if (BP_GET_LEVEL(zio->io_bp) > 0) { | |
5707 | hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; | |
5708 | } else { | |
5709 | hdr->b_l1hdr.b_byteswap = | |
5710 | DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); | |
5711 | } | |
5712 | } else { | |
5713 | hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; | |
5714 | } | |
9b67f605 | 5715 | } |
34dc7c2f | 5716 | |
d3c2ae1c | 5717 | arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); |
b9541d6b | 5718 | if (l2arc_noprefetch && HDR_PREFETCH(hdr)) |
d3c2ae1c | 5719 | arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); |
34dc7c2f | 5720 | |
b9541d6b | 5721 | callback_list = hdr->b_l1hdr.b_acb; |
d3c2ae1c | 5722 | ASSERT3P(callback_list, !=, NULL); |
34dc7c2f | 5723 | |
524b4217 | 5724 | if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) { |
428870ff BB |
5725 | /* |
5726 | * Only call arc_access on anonymous buffers. This is because | |
5727 | * if we've issued an I/O for an evicted buffer, we've already | |
5728 | * called arc_access (to prevent any simultaneous readers from | |
5729 | * getting confused). | |
5730 | */ | |
5731 | arc_access(hdr, hash_lock); | |
5732 | } | |
5733 | ||
524b4217 DK |
5734 | /* |
5735 | * If a read request has a callback (i.e. acb_done is not NULL), then we | |
5736 | * make a buf containing the data according to the parameters which were | |
5737 | * passed in. The implementation of arc_buf_alloc_impl() ensures that we | |
5738 | * aren't needlessly decompressing the data multiple times. | |
5739 | */ | |
a7004725 | 5740 | int callback_cnt = 0; |
2aa34383 DK |
5741 | for (acb = callback_list; acb != NULL; acb = acb->acb_next) { |
5742 | if (!acb->acb_done) | |
5743 | continue; | |
5744 | ||
524b4217 | 5745 | /* This is a demand read since prefetches don't use callbacks */ |
2aa34383 | 5746 | callback_cnt++; |
524b4217 | 5747 | |
b5256303 TC |
5748 | int error = arc_buf_alloc_impl(hdr, zio->io_spa, |
5749 | zio->io_bookmark.zb_objset, acb->acb_private, | |
5750 | acb->acb_encrypted, acb->acb_compressed, acb->acb_noauth, | |
5751 | no_zio_error, &acb->acb_buf); | |
5752 | ||
5753 | /* | |
5754 | * assert non-speculative zios didn't fail because an | |
5755 | * encryption key wasn't loaded | |
5756 | */ | |
5757 | ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) || | |
5758 | error == 0 || error != ENOENT); | |
5759 | ||
5760 | /* | |
5761 | * If we failed to decrypt, report an error now (as the zio | |
5762 | * layer would have done if it had done the transforms). | |
5763 | */ | |
5764 | if (error == ECKSUM) { | |
5765 | ASSERT(BP_IS_PROTECTED(bp)); | |
5766 | error = SET_ERROR(EIO); | |
5767 | spa_log_error(zio->io_spa, &zio->io_bookmark); | |
5768 | if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) { | |
5769 | zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION, | |
5770 | zio->io_spa, NULL, &zio->io_bookmark, zio, | |
5771 | 0, 0); | |
5772 | } | |
5773 | } | |
5774 | ||
524b4217 DK |
5775 | if (no_zio_error) { |
5776 | zio->io_error = error; | |
34dc7c2f BB |
5777 | } |
5778 | } | |
b9541d6b | 5779 | hdr->b_l1hdr.b_acb = NULL; |
d3c2ae1c | 5780 | arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); |
2aa34383 | 5781 | if (callback_cnt == 0) { |
b5256303 TC |
5782 | ASSERT(HDR_PREFETCH(hdr) || HDR_HAS_RABD(hdr)); |
5783 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); | |
428870ff | 5784 | } |
34dc7c2f | 5785 | |
b9541d6b CW |
5786 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || |
5787 | callback_list != NULL); | |
34dc7c2f | 5788 | |
524b4217 | 5789 | if (no_zio_error) { |
d3c2ae1c GW |
5790 | arc_hdr_verify(hdr, zio->io_bp); |
5791 | } else { | |
5792 | arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); | |
b9541d6b | 5793 | if (hdr->b_l1hdr.b_state != arc_anon) |
34dc7c2f BB |
5794 | arc_change_state(arc_anon, hdr, hash_lock); |
5795 | if (HDR_IN_HASH_TABLE(hdr)) | |
5796 | buf_hash_remove(hdr); | |
b9541d6b | 5797 | freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); |
34dc7c2f BB |
5798 | } |
5799 | ||
5800 | /* | |
5801 | * Broadcast before we drop the hash_lock to avoid the possibility | |
5802 | * that the hdr (and hence the cv) might be freed before we get to | |
5803 | * the cv_broadcast(). | |
5804 | */ | |
b9541d6b | 5805 | cv_broadcast(&hdr->b_l1hdr.b_cv); |
34dc7c2f | 5806 | |
b9541d6b | 5807 | if (hash_lock != NULL) { |
34dc7c2f BB |
5808 | mutex_exit(hash_lock); |
5809 | } else { | |
5810 | /* | |
5811 | * This block was freed while we waited for the read to | |
5812 | * complete. It has been removed from the hash table and | |
5813 | * moved to the anonymous state (so that it won't show up | |
5814 | * in the cache). | |
5815 | */ | |
b9541d6b CW |
5816 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); |
5817 | freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); | |
34dc7c2f BB |
5818 | } |
5819 | ||
5820 | /* execute each callback and free its structure */ | |
5821 | while ((acb = callback_list) != NULL) { | |
b5256303 TC |
5822 | if (acb->acb_done) { |
5823 | acb->acb_done(zio, zio->io_error, acb->acb_buf, | |
5824 | acb->acb_private); | |
5825 | } | |
34dc7c2f BB |
5826 | |
5827 | if (acb->acb_zio_dummy != NULL) { | |
5828 | acb->acb_zio_dummy->io_error = zio->io_error; | |
5829 | zio_nowait(acb->acb_zio_dummy); | |
5830 | } | |
5831 | ||
5832 | callback_list = acb->acb_next; | |
5833 | kmem_free(acb, sizeof (arc_callback_t)); | |
5834 | } | |
5835 | ||
5836 | if (freeable) | |
5837 | arc_hdr_destroy(hdr); | |
5838 | } | |
5839 | ||
5840 | /* | |
5c839890 | 5841 | * "Read" the block at the specified DVA (in bp) via the |
34dc7c2f BB |
5842 | * cache. If the block is found in the cache, invoke the provided |
5843 | * callback immediately and return. Note that the `zio' parameter | |
5844 | * in the callback will be NULL in this case, since no IO was | |
5845 | * required. If the block is not in the cache pass the read request | |
5846 | * on to the spa with a substitute callback function, so that the | |
5847 | * requested block will be added to the cache. | |
5848 | * | |
5849 | * If a read request arrives for a block that has a read in-progress, | |
5850 | * either wait for the in-progress read to complete (and return the | |
5851 | * results); or, if this is a read with a "done" func, add a record | |
5852 | * to the read to invoke the "done" func when the read completes, | |
5853 | * and return; or just return. | |
5854 | * | |
5855 | * arc_read_done() will invoke all the requested "done" functions | |
5856 | * for readers of this block. | |
5857 | */ | |
5858 | int | |
b5256303 TC |
5859 | arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, |
5860 | arc_read_done_func_t *done, void *private, zio_priority_t priority, | |
5861 | int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb) | |
34dc7c2f | 5862 | { |
9b67f605 | 5863 | arc_buf_hdr_t *hdr = NULL; |
9b67f605 | 5864 | kmutex_t *hash_lock = NULL; |
34dc7c2f | 5865 | zio_t *rzio; |
3541dc6d | 5866 | uint64_t guid = spa_load_guid(spa); |
b5256303 TC |
5867 | boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0; |
5868 | boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) && | |
5869 | (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; | |
5870 | boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) && | |
5871 | (zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0; | |
1421c891 | 5872 | int rc = 0; |
34dc7c2f | 5873 | |
9b67f605 MA |
5874 | ASSERT(!BP_IS_EMBEDDED(bp) || |
5875 | BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); | |
5876 | ||
34dc7c2f | 5877 | top: |
9b67f605 MA |
5878 | if (!BP_IS_EMBEDDED(bp)) { |
5879 | /* | |
5880 | * Embedded BP's have no DVA and require no I/O to "read". | |
5881 | * Create an anonymous arc buf to back it. | |
5882 | */ | |
5883 | hdr = buf_hash_find(guid, bp, &hash_lock); | |
5884 | } | |
5885 | ||
b5256303 TC |
5886 | /* |
5887 | * Determine if we have an L1 cache hit or a cache miss. For simplicity | |
5888 | * we maintain encrypted data seperately from compressed / uncompressed | |
5889 | * data. If the user is requesting raw encrypted data and we don't have | |
5890 | * that in the header we will read from disk to guarantee that we can | |
5891 | * get it even if the encryption keys aren't loaded. | |
5892 | */ | |
5893 | if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) || | |
5894 | (hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) { | |
d3c2ae1c | 5895 | arc_buf_t *buf = NULL; |
2a432414 | 5896 | *arc_flags |= ARC_FLAG_CACHED; |
34dc7c2f BB |
5897 | |
5898 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
5899 | ||
7f60329a MA |
5900 | if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && |
5901 | priority == ZIO_PRIORITY_SYNC_READ) { | |
5902 | /* | |
5903 | * This sync read must wait for an | |
5904 | * in-progress async read (e.g. a predictive | |
5905 | * prefetch). Async reads are queued | |
5906 | * separately at the vdev_queue layer, so | |
5907 | * this is a form of priority inversion. | |
5908 | * Ideally, we would "inherit" the demand | |
5909 | * i/o's priority by moving the i/o from | |
5910 | * the async queue to the synchronous queue, | |
5911 | * but there is currently no mechanism to do | |
5912 | * so. Track this so that we can evaluate | |
5913 | * the magnitude of this potential performance | |
5914 | * problem. | |
5915 | * | |
5916 | * Note that if the prefetch i/o is already | |
5917 | * active (has been issued to the device), | |
5918 | * the prefetch improved performance, because | |
5919 | * we issued it sooner than we would have | |
5920 | * without the prefetch. | |
5921 | */ | |
5922 | DTRACE_PROBE1(arc__sync__wait__for__async, | |
5923 | arc_buf_hdr_t *, hdr); | |
5924 | ARCSTAT_BUMP(arcstat_sync_wait_for_async); | |
5925 | } | |
5926 | if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { | |
d3c2ae1c GW |
5927 | arc_hdr_clear_flags(hdr, |
5928 | ARC_FLAG_PREDICTIVE_PREFETCH); | |
7f60329a MA |
5929 | } |
5930 | ||
2a432414 | 5931 | if (*arc_flags & ARC_FLAG_WAIT) { |
b9541d6b | 5932 | cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); |
34dc7c2f BB |
5933 | mutex_exit(hash_lock); |
5934 | goto top; | |
5935 | } | |
2a432414 | 5936 | ASSERT(*arc_flags & ARC_FLAG_NOWAIT); |
34dc7c2f BB |
5937 | |
5938 | if (done) { | |
7f60329a | 5939 | arc_callback_t *acb = NULL; |
34dc7c2f BB |
5940 | |
5941 | acb = kmem_zalloc(sizeof (arc_callback_t), | |
79c76d5b | 5942 | KM_SLEEP); |
34dc7c2f BB |
5943 | acb->acb_done = done; |
5944 | acb->acb_private = private; | |
a7004725 | 5945 | acb->acb_compressed = compressed_read; |
34dc7c2f BB |
5946 | if (pio != NULL) |
5947 | acb->acb_zio_dummy = zio_null(pio, | |
d164b209 | 5948 | spa, NULL, NULL, NULL, zio_flags); |
34dc7c2f | 5949 | |
d3c2ae1c | 5950 | ASSERT3P(acb->acb_done, !=, NULL); |
b9541d6b CW |
5951 | acb->acb_next = hdr->b_l1hdr.b_acb; |
5952 | hdr->b_l1hdr.b_acb = acb; | |
34dc7c2f | 5953 | mutex_exit(hash_lock); |
1421c891 | 5954 | goto out; |
34dc7c2f BB |
5955 | } |
5956 | mutex_exit(hash_lock); | |
1421c891 | 5957 | goto out; |
34dc7c2f BB |
5958 | } |
5959 | ||
b9541d6b CW |
5960 | ASSERT(hdr->b_l1hdr.b_state == arc_mru || |
5961 | hdr->b_l1hdr.b_state == arc_mfu); | |
34dc7c2f BB |
5962 | |
5963 | if (done) { | |
7f60329a MA |
5964 | if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { |
5965 | /* | |
5966 | * This is a demand read which does not have to | |
5967 | * wait for i/o because we did a predictive | |
5968 | * prefetch i/o for it, which has completed. | |
5969 | */ | |
5970 | DTRACE_PROBE1( | |
5971 | arc__demand__hit__predictive__prefetch, | |
5972 | arc_buf_hdr_t *, hdr); | |
5973 | ARCSTAT_BUMP( | |
5974 | arcstat_demand_hit_predictive_prefetch); | |
d3c2ae1c GW |
5975 | arc_hdr_clear_flags(hdr, |
5976 | ARC_FLAG_PREDICTIVE_PREFETCH); | |
7f60329a | 5977 | } |
d3c2ae1c GW |
5978 | ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp)); |
5979 | ||
524b4217 | 5980 | /* Get a buf with the desired data in it. */ |
b5256303 TC |
5981 | rc = arc_buf_alloc_impl(hdr, spa, zb->zb_objset, |
5982 | private, encrypted_read, compressed_read, | |
5983 | noauth_read, B_TRUE, &buf); | |
5984 | ||
5985 | ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) || | |
5986 | rc == 0 || rc != ENOENT); | |
2a432414 | 5987 | } else if (*arc_flags & ARC_FLAG_PREFETCH && |
b9541d6b | 5988 | refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { |
d3c2ae1c | 5989 | arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); |
34dc7c2f BB |
5990 | } |
5991 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); | |
5992 | arc_access(hdr, hash_lock); | |
2a432414 | 5993 | if (*arc_flags & ARC_FLAG_L2CACHE) |
d3c2ae1c | 5994 | arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); |
34dc7c2f BB |
5995 | mutex_exit(hash_lock); |
5996 | ARCSTAT_BUMP(arcstat_hits); | |
b9541d6b CW |
5997 | ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), |
5998 | demand, prefetch, !HDR_ISTYPE_METADATA(hdr), | |
34dc7c2f BB |
5999 | data, metadata, hits); |
6000 | ||
6001 | if (done) | |
b5256303 | 6002 | done(NULL, rc, buf, private); |
34dc7c2f | 6003 | } else { |
d3c2ae1c GW |
6004 | uint64_t lsize = BP_GET_LSIZE(bp); |
6005 | uint64_t psize = BP_GET_PSIZE(bp); | |
9b67f605 | 6006 | arc_callback_t *acb; |
b128c09f | 6007 | vdev_t *vd = NULL; |
a117a6d6 | 6008 | uint64_t addr = 0; |
d164b209 | 6009 | boolean_t devw = B_FALSE; |
d3c2ae1c | 6010 | uint64_t size; |
b5256303 | 6011 | void *hdr_abd; |
34dc7c2f | 6012 | |
5f6d0b6f BB |
6013 | /* |
6014 | * Gracefully handle a damaged logical block size as a | |
1cdb86cb | 6015 | * checksum error. |
5f6d0b6f | 6016 | */ |
d3c2ae1c | 6017 | if (lsize > spa_maxblocksize(spa)) { |
1cdb86cb | 6018 | rc = SET_ERROR(ECKSUM); |
5f6d0b6f BB |
6019 | goto out; |
6020 | } | |
6021 | ||
34dc7c2f BB |
6022 | if (hdr == NULL) { |
6023 | /* this block is not in the cache */ | |
9b67f605 | 6024 | arc_buf_hdr_t *exists = NULL; |
34dc7c2f | 6025 | arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); |
d3c2ae1c | 6026 | hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, |
b5256303 TC |
6027 | BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), type, |
6028 | encrypted_read); | |
d3c2ae1c | 6029 | |
9b67f605 MA |
6030 | if (!BP_IS_EMBEDDED(bp)) { |
6031 | hdr->b_dva = *BP_IDENTITY(bp); | |
6032 | hdr->b_birth = BP_PHYSICAL_BIRTH(bp); | |
9b67f605 MA |
6033 | exists = buf_hash_insert(hdr, &hash_lock); |
6034 | } | |
6035 | if (exists != NULL) { | |
34dc7c2f BB |
6036 | /* somebody beat us to the hash insert */ |
6037 | mutex_exit(hash_lock); | |
428870ff | 6038 | buf_discard_identity(hdr); |
d3c2ae1c | 6039 | arc_hdr_destroy(hdr); |
34dc7c2f BB |
6040 | goto top; /* restart the IO request */ |
6041 | } | |
34dc7c2f | 6042 | } else { |
b9541d6b | 6043 | /* |
b5256303 TC |
6044 | * This block is in the ghost cache or encrypted data |
6045 | * was requested and we didn't have it. If it was | |
6046 | * L2-only (and thus didn't have an L1 hdr), | |
6047 | * we realloc the header to add an L1 hdr. | |
b9541d6b CW |
6048 | */ |
6049 | if (!HDR_HAS_L1HDR(hdr)) { | |
6050 | hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, | |
6051 | hdr_full_cache); | |
6052 | } | |
6053 | ||
b5256303 TC |
6054 | if (GHOST_STATE(hdr->b_l1hdr.b_state)) { |
6055 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); | |
6056 | ASSERT(!HDR_HAS_RABD(hdr)); | |
6057 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
6058 | ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); | |
6059 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
6060 | ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); | |
6061 | } else if (HDR_IO_IN_PROGRESS(hdr)) { | |
6062 | /* | |
6063 | * If this header already had an IO in progress | |
6064 | * and we are performing another IO to fetch | |
6065 | * encrypted data we must wait until the first | |
6066 | * IO completes so as not to confuse | |
6067 | * arc_read_done(). This should be very rare | |
6068 | * and so the performance impact shouldn't | |
6069 | * matter. | |
6070 | */ | |
6071 | cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); | |
6072 | mutex_exit(hash_lock); | |
6073 | goto top; | |
6074 | } | |
34dc7c2f | 6075 | |
7f60329a | 6076 | /* |
d3c2ae1c | 6077 | * This is a delicate dance that we play here. |
b5256303 TC |
6078 | * This hdr might be in the ghost list so we access |
6079 | * it to move it out of the ghost list before we | |
d3c2ae1c GW |
6080 | * initiate the read. If it's a prefetch then |
6081 | * it won't have a callback so we'll remove the | |
6082 | * reference that arc_buf_alloc_impl() created. We | |
6083 | * do this after we've called arc_access() to | |
6084 | * avoid hitting an assert in remove_reference(). | |
7f60329a | 6085 | */ |
428870ff | 6086 | arc_access(hdr, hash_lock); |
b5256303 | 6087 | arc_hdr_alloc_abd(hdr, encrypted_read); |
d3c2ae1c | 6088 | } |
d3c2ae1c | 6089 | |
b5256303 TC |
6090 | if (encrypted_read) { |
6091 | ASSERT(HDR_HAS_RABD(hdr)); | |
6092 | size = HDR_GET_PSIZE(hdr); | |
6093 | hdr_abd = hdr->b_crypt_hdr.b_rabd; | |
d3c2ae1c | 6094 | zio_flags |= ZIO_FLAG_RAW; |
b5256303 TC |
6095 | } else { |
6096 | ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); | |
6097 | size = arc_hdr_size(hdr); | |
6098 | hdr_abd = hdr->b_l1hdr.b_pabd; | |
6099 | ||
6100 | if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) { | |
6101 | zio_flags |= ZIO_FLAG_RAW_COMPRESS; | |
6102 | } | |
6103 | ||
6104 | /* | |
6105 | * For authenticated bp's, we do not ask the ZIO layer | |
6106 | * to authenticate them since this will cause the entire | |
6107 | * IO to fail if the key isn't loaded. Instead, we | |
6108 | * defer authentication until arc_buf_fill(), which will | |
6109 | * verify the data when the key is available. | |
6110 | */ | |
6111 | if (BP_IS_AUTHENTICATED(bp)) | |
6112 | zio_flags |= ZIO_FLAG_RAW_ENCRYPT; | |
34dc7c2f BB |
6113 | } |
6114 | ||
b5256303 TC |
6115 | if (*arc_flags & ARC_FLAG_PREFETCH && |
6116 | refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) | |
d3c2ae1c GW |
6117 | arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); |
6118 | if (*arc_flags & ARC_FLAG_L2CACHE) | |
6119 | arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); | |
b5256303 TC |
6120 | if (BP_IS_AUTHENTICATED(bp)) |
6121 | arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); | |
d3c2ae1c GW |
6122 | if (BP_GET_LEVEL(bp) > 0) |
6123 | arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); | |
7f60329a | 6124 | if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) |
d3c2ae1c | 6125 | arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); |
b9541d6b | 6126 | ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); |
428870ff | 6127 | |
79c76d5b | 6128 | acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); |
34dc7c2f BB |
6129 | acb->acb_done = done; |
6130 | acb->acb_private = private; | |
2aa34383 | 6131 | acb->acb_compressed = compressed_read; |
b5256303 TC |
6132 | acb->acb_encrypted = encrypted_read; |
6133 | acb->acb_noauth = noauth_read; | |
34dc7c2f | 6134 | |
d3c2ae1c | 6135 | ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); |
b9541d6b | 6136 | hdr->b_l1hdr.b_acb = acb; |
d3c2ae1c | 6137 | arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); |
34dc7c2f | 6138 | |
b9541d6b CW |
6139 | if (HDR_HAS_L2HDR(hdr) && |
6140 | (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { | |
6141 | devw = hdr->b_l2hdr.b_dev->l2ad_writing; | |
6142 | addr = hdr->b_l2hdr.b_daddr; | |
b128c09f BB |
6143 | /* |
6144 | * Lock out device removal. | |
6145 | */ | |
6146 | if (vdev_is_dead(vd) || | |
6147 | !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) | |
6148 | vd = NULL; | |
6149 | } | |
6150 | ||
d3c2ae1c GW |
6151 | if (priority == ZIO_PRIORITY_ASYNC_READ) |
6152 | arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); | |
6153 | else | |
6154 | arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); | |
6155 | ||
9b67f605 MA |
6156 | if (hash_lock != NULL) |
6157 | mutex_exit(hash_lock); | |
b128c09f | 6158 | |
e49f1e20 WA |
6159 | /* |
6160 | * At this point, we have a level 1 cache miss. Try again in | |
6161 | * L2ARC if possible. | |
6162 | */ | |
d3c2ae1c GW |
6163 | ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); |
6164 | ||
428870ff | 6165 | DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, |
d3c2ae1c | 6166 | uint64_t, lsize, zbookmark_phys_t *, zb); |
34dc7c2f | 6167 | ARCSTAT_BUMP(arcstat_misses); |
b9541d6b CW |
6168 | ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), |
6169 | demand, prefetch, !HDR_ISTYPE_METADATA(hdr), | |
34dc7c2f BB |
6170 | data, metadata, misses); |
6171 | ||
d164b209 | 6172 | if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { |
34dc7c2f BB |
6173 | /* |
6174 | * Read from the L2ARC if the following are true: | |
b128c09f BB |
6175 | * 1. The L2ARC vdev was previously cached. |
6176 | * 2. This buffer still has L2ARC metadata. | |
6177 | * 3. This buffer isn't currently writing to the L2ARC. | |
6178 | * 4. The L2ARC entry wasn't evicted, which may | |
6179 | * also have invalidated the vdev. | |
d164b209 | 6180 | * 5. This isn't prefetch and l2arc_noprefetch is set. |
34dc7c2f | 6181 | */ |
b9541d6b | 6182 | if (HDR_HAS_L2HDR(hdr) && |
d164b209 BB |
6183 | !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && |
6184 | !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { | |
34dc7c2f | 6185 | l2arc_read_callback_t *cb; |
82710e99 GDN |
6186 | abd_t *abd; |
6187 | uint64_t asize; | |
34dc7c2f BB |
6188 | |
6189 | DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); | |
6190 | ARCSTAT_BUMP(arcstat_l2_hits); | |
b9541d6b | 6191 | atomic_inc_32(&hdr->b_l2hdr.b_hits); |
34dc7c2f | 6192 | |
34dc7c2f | 6193 | cb = kmem_zalloc(sizeof (l2arc_read_callback_t), |
79c76d5b | 6194 | KM_SLEEP); |
d3c2ae1c | 6195 | cb->l2rcb_hdr = hdr; |
34dc7c2f BB |
6196 | cb->l2rcb_bp = *bp; |
6197 | cb->l2rcb_zb = *zb; | |
b128c09f | 6198 | cb->l2rcb_flags = zio_flags; |
34dc7c2f | 6199 | |
82710e99 GDN |
6200 | asize = vdev_psize_to_asize(vd, size); |
6201 | if (asize != size) { | |
6202 | abd = abd_alloc_for_io(asize, | |
6203 | HDR_ISTYPE_METADATA(hdr)); | |
6204 | cb->l2rcb_abd = abd; | |
6205 | } else { | |
b5256303 | 6206 | abd = hdr_abd; |
82710e99 GDN |
6207 | } |
6208 | ||
a117a6d6 | 6209 | ASSERT(addr >= VDEV_LABEL_START_SIZE && |
82710e99 | 6210 | addr + asize <= vd->vdev_psize - |
a117a6d6 GW |
6211 | VDEV_LABEL_END_SIZE); |
6212 | ||
34dc7c2f | 6213 | /* |
b128c09f BB |
6214 | * l2arc read. The SCL_L2ARC lock will be |
6215 | * released by l2arc_read_done(). | |
3a17a7a9 SK |
6216 | * Issue a null zio if the underlying buffer |
6217 | * was squashed to zero size by compression. | |
34dc7c2f | 6218 | */ |
b5256303 | 6219 | ASSERT3U(arc_hdr_get_compress(hdr), !=, |
d3c2ae1c GW |
6220 | ZIO_COMPRESS_EMPTY); |
6221 | rzio = zio_read_phys(pio, vd, addr, | |
82710e99 | 6222 | asize, abd, |
d3c2ae1c GW |
6223 | ZIO_CHECKSUM_OFF, |
6224 | l2arc_read_done, cb, priority, | |
6225 | zio_flags | ZIO_FLAG_DONT_CACHE | | |
6226 | ZIO_FLAG_CANFAIL | | |
6227 | ZIO_FLAG_DONT_PROPAGATE | | |
6228 | ZIO_FLAG_DONT_RETRY, B_FALSE); | |
6229 | ||
34dc7c2f BB |
6230 | DTRACE_PROBE2(l2arc__read, vdev_t *, vd, |
6231 | zio_t *, rzio); | |
b5256303 TC |
6232 | ARCSTAT_INCR(arcstat_l2_read_bytes, |
6233 | HDR_GET_PSIZE(hdr)); | |
34dc7c2f | 6234 | |
2a432414 | 6235 | if (*arc_flags & ARC_FLAG_NOWAIT) { |
b128c09f | 6236 | zio_nowait(rzio); |
1421c891 | 6237 | goto out; |
b128c09f | 6238 | } |
34dc7c2f | 6239 | |
2a432414 | 6240 | ASSERT(*arc_flags & ARC_FLAG_WAIT); |
b128c09f | 6241 | if (zio_wait(rzio) == 0) |
1421c891 | 6242 | goto out; |
b128c09f BB |
6243 | |
6244 | /* l2arc read error; goto zio_read() */ | |
34dc7c2f BB |
6245 | } else { |
6246 | DTRACE_PROBE1(l2arc__miss, | |
6247 | arc_buf_hdr_t *, hdr); | |
6248 | ARCSTAT_BUMP(arcstat_l2_misses); | |
6249 | if (HDR_L2_WRITING(hdr)) | |
6250 | ARCSTAT_BUMP(arcstat_l2_rw_clash); | |
b128c09f | 6251 | spa_config_exit(spa, SCL_L2ARC, vd); |
34dc7c2f | 6252 | } |
d164b209 BB |
6253 | } else { |
6254 | if (vd != NULL) | |
6255 | spa_config_exit(spa, SCL_L2ARC, vd); | |
6256 | if (l2arc_ndev != 0) { | |
6257 | DTRACE_PROBE1(l2arc__miss, | |
6258 | arc_buf_hdr_t *, hdr); | |
6259 | ARCSTAT_BUMP(arcstat_l2_misses); | |
6260 | } | |
34dc7c2f | 6261 | } |
34dc7c2f | 6262 | |
b5256303 | 6263 | rzio = zio_read(pio, spa, bp, hdr_abd, size, |
d3c2ae1c | 6264 | arc_read_done, hdr, priority, zio_flags, zb); |
34dc7c2f | 6265 | |
2a432414 | 6266 | if (*arc_flags & ARC_FLAG_WAIT) { |
1421c891 PS |
6267 | rc = zio_wait(rzio); |
6268 | goto out; | |
6269 | } | |
34dc7c2f | 6270 | |
2a432414 | 6271 | ASSERT(*arc_flags & ARC_FLAG_NOWAIT); |
34dc7c2f BB |
6272 | zio_nowait(rzio); |
6273 | } | |
1421c891 PS |
6274 | |
6275 | out: | |
6276 | spa_read_history_add(spa, zb, *arc_flags); | |
6277 | return (rc); | |
34dc7c2f BB |
6278 | } |
6279 | ||
ab26409d BB |
6280 | arc_prune_t * |
6281 | arc_add_prune_callback(arc_prune_func_t *func, void *private) | |
6282 | { | |
6283 | arc_prune_t *p; | |
6284 | ||
d1d7e268 | 6285 | p = kmem_alloc(sizeof (*p), KM_SLEEP); |
ab26409d BB |
6286 | p->p_pfunc = func; |
6287 | p->p_private = private; | |
6288 | list_link_init(&p->p_node); | |
6289 | refcount_create(&p->p_refcnt); | |
6290 | ||
6291 | mutex_enter(&arc_prune_mtx); | |
6292 | refcount_add(&p->p_refcnt, &arc_prune_list); | |
6293 | list_insert_head(&arc_prune_list, p); | |
6294 | mutex_exit(&arc_prune_mtx); | |
6295 | ||
6296 | return (p); | |
6297 | } | |
6298 | ||
6299 | void | |
6300 | arc_remove_prune_callback(arc_prune_t *p) | |
6301 | { | |
4442f60d | 6302 | boolean_t wait = B_FALSE; |
ab26409d BB |
6303 | mutex_enter(&arc_prune_mtx); |
6304 | list_remove(&arc_prune_list, p); | |
4442f60d CC |
6305 | if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0) |
6306 | wait = B_TRUE; | |
ab26409d | 6307 | mutex_exit(&arc_prune_mtx); |
4442f60d CC |
6308 | |
6309 | /* wait for arc_prune_task to finish */ | |
6310 | if (wait) | |
6311 | taskq_wait_outstanding(arc_prune_taskq, 0); | |
6312 | ASSERT0(refcount_count(&p->p_refcnt)); | |
6313 | refcount_destroy(&p->p_refcnt); | |
6314 | kmem_free(p, sizeof (*p)); | |
ab26409d BB |
6315 | } |
6316 | ||
df4474f9 MA |
6317 | /* |
6318 | * Notify the arc that a block was freed, and thus will never be used again. | |
6319 | */ | |
6320 | void | |
6321 | arc_freed(spa_t *spa, const blkptr_t *bp) | |
6322 | { | |
6323 | arc_buf_hdr_t *hdr; | |
6324 | kmutex_t *hash_lock; | |
6325 | uint64_t guid = spa_load_guid(spa); | |
6326 | ||
9b67f605 MA |
6327 | ASSERT(!BP_IS_EMBEDDED(bp)); |
6328 | ||
6329 | hdr = buf_hash_find(guid, bp, &hash_lock); | |
df4474f9 MA |
6330 | if (hdr == NULL) |
6331 | return; | |
df4474f9 | 6332 | |
d3c2ae1c GW |
6333 | /* |
6334 | * We might be trying to free a block that is still doing I/O | |
6335 | * (i.e. prefetch) or has a reference (i.e. a dedup-ed, | |
6336 | * dmu_sync-ed block). If this block is being prefetched, then it | |
6337 | * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr | |
6338 | * until the I/O completes. A block may also have a reference if it is | |
6339 | * part of a dedup-ed, dmu_synced write. The dmu_sync() function would | |
6340 | * have written the new block to its final resting place on disk but | |
6341 | * without the dedup flag set. This would have left the hdr in the MRU | |
6342 | * state and discoverable. When the txg finally syncs it detects that | |
6343 | * the block was overridden in open context and issues an override I/O. | |
6344 | * Since this is a dedup block, the override I/O will determine if the | |
6345 | * block is already in the DDT. If so, then it will replace the io_bp | |
6346 | * with the bp from the DDT and allow the I/O to finish. When the I/O | |
6347 | * reaches the done callback, dbuf_write_override_done, it will | |
6348 | * check to see if the io_bp and io_bp_override are identical. | |
6349 | * If they are not, then it indicates that the bp was replaced with | |
6350 | * the bp in the DDT and the override bp is freed. This allows | |
6351 | * us to arrive here with a reference on a block that is being | |
6352 | * freed. So if we have an I/O in progress, or a reference to | |
6353 | * this hdr, then we don't destroy the hdr. | |
6354 | */ | |
6355 | if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && | |
6356 | refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { | |
6357 | arc_change_state(arc_anon, hdr, hash_lock); | |
6358 | arc_hdr_destroy(hdr); | |
df4474f9 | 6359 | mutex_exit(hash_lock); |
bd089c54 | 6360 | } else { |
d3c2ae1c | 6361 | mutex_exit(hash_lock); |
34dc7c2f | 6362 | } |
34dc7c2f | 6363 | |
34dc7c2f BB |
6364 | } |
6365 | ||
6366 | /* | |
e49f1e20 WA |
6367 | * Release this buffer from the cache, making it an anonymous buffer. This |
6368 | * must be done after a read and prior to modifying the buffer contents. | |
34dc7c2f | 6369 | * If the buffer has more than one reference, we must make |
b128c09f | 6370 | * a new hdr for the buffer. |
34dc7c2f BB |
6371 | */ |
6372 | void | |
6373 | arc_release(arc_buf_t *buf, void *tag) | |
6374 | { | |
b9541d6b CW |
6375 | kmutex_t *hash_lock; |
6376 | arc_state_t *state; | |
6377 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
34dc7c2f | 6378 | |
428870ff | 6379 | /* |
ca0bf58d | 6380 | * It would be nice to assert that if its DMU metadata (level > |
428870ff BB |
6381 | * 0 || it's the dnode file), then it must be syncing context. |
6382 | * But we don't know that information at this level. | |
6383 | */ | |
6384 | ||
6385 | mutex_enter(&buf->b_evict_lock); | |
b128c09f | 6386 | |
ca0bf58d PS |
6387 | ASSERT(HDR_HAS_L1HDR(hdr)); |
6388 | ||
b9541d6b CW |
6389 | /* |
6390 | * We don't grab the hash lock prior to this check, because if | |
6391 | * the buffer's header is in the arc_anon state, it won't be | |
6392 | * linked into the hash table. | |
6393 | */ | |
6394 | if (hdr->b_l1hdr.b_state == arc_anon) { | |
6395 | mutex_exit(&buf->b_evict_lock); | |
6396 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
6397 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
6398 | ASSERT(!HDR_HAS_L2HDR(hdr)); | |
d3c2ae1c | 6399 | ASSERT(HDR_EMPTY(hdr)); |
34dc7c2f | 6400 | |
d3c2ae1c | 6401 | ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); |
b9541d6b CW |
6402 | ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); |
6403 | ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); | |
6404 | ||
b9541d6b | 6405 | hdr->b_l1hdr.b_arc_access = 0; |
d3c2ae1c GW |
6406 | |
6407 | /* | |
6408 | * If the buf is being overridden then it may already | |
6409 | * have a hdr that is not empty. | |
6410 | */ | |
6411 | buf_discard_identity(hdr); | |
b9541d6b CW |
6412 | arc_buf_thaw(buf); |
6413 | ||
6414 | return; | |
34dc7c2f BB |
6415 | } |
6416 | ||
b9541d6b CW |
6417 | hash_lock = HDR_LOCK(hdr); |
6418 | mutex_enter(hash_lock); | |
6419 | ||
6420 | /* | |
6421 | * This assignment is only valid as long as the hash_lock is | |
6422 | * held, we must be careful not to reference state or the | |
6423 | * b_state field after dropping the lock. | |
6424 | */ | |
6425 | state = hdr->b_l1hdr.b_state; | |
6426 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); | |
6427 | ASSERT3P(state, !=, arc_anon); | |
6428 | ||
6429 | /* this buffer is not on any list */ | |
2aa34383 | 6430 | ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); |
b9541d6b CW |
6431 | |
6432 | if (HDR_HAS_L2HDR(hdr)) { | |
b9541d6b | 6433 | mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); |
ca0bf58d PS |
6434 | |
6435 | /* | |
d962d5da PS |
6436 | * We have to recheck this conditional again now that |
6437 | * we're holding the l2ad_mtx to prevent a race with | |
6438 | * another thread which might be concurrently calling | |
6439 | * l2arc_evict(). In that case, l2arc_evict() might have | |
6440 | * destroyed the header's L2 portion as we were waiting | |
6441 | * to acquire the l2ad_mtx. | |
ca0bf58d | 6442 | */ |
d962d5da PS |
6443 | if (HDR_HAS_L2HDR(hdr)) |
6444 | arc_hdr_l2hdr_destroy(hdr); | |
ca0bf58d | 6445 | |
b9541d6b | 6446 | mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); |
b128c09f BB |
6447 | } |
6448 | ||
34dc7c2f BB |
6449 | /* |
6450 | * Do we have more than one buf? | |
6451 | */ | |
d3c2ae1c | 6452 | if (hdr->b_l1hdr.b_bufcnt > 1) { |
34dc7c2f | 6453 | arc_buf_hdr_t *nhdr; |
d164b209 | 6454 | uint64_t spa = hdr->b_spa; |
d3c2ae1c GW |
6455 | uint64_t psize = HDR_GET_PSIZE(hdr); |
6456 | uint64_t lsize = HDR_GET_LSIZE(hdr); | |
b5256303 TC |
6457 | boolean_t protected = HDR_PROTECTED(hdr); |
6458 | enum zio_compress compress = arc_hdr_get_compress(hdr); | |
b9541d6b | 6459 | arc_buf_contents_t type = arc_buf_type(hdr); |
d3c2ae1c | 6460 | VERIFY3U(hdr->b_type, ==, type); |
34dc7c2f | 6461 | |
b9541d6b | 6462 | ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); |
d3c2ae1c GW |
6463 | (void) remove_reference(hdr, hash_lock, tag); |
6464 | ||
524b4217 | 6465 | if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) { |
d3c2ae1c | 6466 | ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); |
524b4217 DK |
6467 | ASSERT(ARC_BUF_LAST(buf)); |
6468 | } | |
d3c2ae1c | 6469 | |
34dc7c2f | 6470 | /* |
428870ff | 6471 | * Pull the data off of this hdr and attach it to |
d3c2ae1c GW |
6472 | * a new anonymous hdr. Also find the last buffer |
6473 | * in the hdr's buffer list. | |
34dc7c2f | 6474 | */ |
a7004725 | 6475 | arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); |
d3c2ae1c | 6476 | ASSERT3P(lastbuf, !=, NULL); |
34dc7c2f | 6477 | |
d3c2ae1c GW |
6478 | /* |
6479 | * If the current arc_buf_t and the hdr are sharing their data | |
524b4217 | 6480 | * buffer, then we must stop sharing that block. |
d3c2ae1c GW |
6481 | */ |
6482 | if (arc_buf_is_shared(buf)) { | |
6483 | ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); | |
d3c2ae1c GW |
6484 | VERIFY(!arc_buf_is_shared(lastbuf)); |
6485 | ||
6486 | /* | |
6487 | * First, sever the block sharing relationship between | |
a7004725 | 6488 | * buf and the arc_buf_hdr_t. |
d3c2ae1c GW |
6489 | */ |
6490 | arc_unshare_buf(hdr, buf); | |
2aa34383 DK |
6491 | |
6492 | /* | |
a6255b7f | 6493 | * Now we need to recreate the hdr's b_pabd. Since we |
524b4217 | 6494 | * have lastbuf handy, we try to share with it, but if |
a6255b7f | 6495 | * we can't then we allocate a new b_pabd and copy the |
524b4217 | 6496 | * data from buf into it. |
2aa34383 | 6497 | */ |
524b4217 DK |
6498 | if (arc_can_share(hdr, lastbuf)) { |
6499 | arc_share_buf(hdr, lastbuf); | |
6500 | } else { | |
b5256303 | 6501 | arc_hdr_alloc_abd(hdr, B_FALSE); |
a6255b7f DQ |
6502 | abd_copy_from_buf(hdr->b_l1hdr.b_pabd, |
6503 | buf->b_data, psize); | |
2aa34383 | 6504 | } |
d3c2ae1c GW |
6505 | VERIFY3P(lastbuf->b_data, !=, NULL); |
6506 | } else if (HDR_SHARED_DATA(hdr)) { | |
2aa34383 DK |
6507 | /* |
6508 | * Uncompressed shared buffers are always at the end | |
6509 | * of the list. Compressed buffers don't have the | |
6510 | * same requirements. This makes it hard to | |
6511 | * simply assert that the lastbuf is shared so | |
6512 | * we rely on the hdr's compression flags to determine | |
6513 | * if we have a compressed, shared buffer. | |
6514 | */ | |
6515 | ASSERT(arc_buf_is_shared(lastbuf) || | |
b5256303 | 6516 | arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF); |
2aa34383 | 6517 | ASSERT(!ARC_BUF_SHARED(buf)); |
d3c2ae1c | 6518 | } |
b5256303 TC |
6519 | |
6520 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr)); | |
b9541d6b | 6521 | ASSERT3P(state, !=, arc_l2c_only); |
36da08ef | 6522 | |
d3c2ae1c | 6523 | (void) refcount_remove_many(&state->arcs_size, |
2aa34383 | 6524 | arc_buf_size(buf), buf); |
36da08ef | 6525 | |
b9541d6b | 6526 | if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { |
b9541d6b | 6527 | ASSERT3P(state, !=, arc_l2c_only); |
d3c2ae1c | 6528 | (void) refcount_remove_many(&state->arcs_esize[type], |
2aa34383 | 6529 | arc_buf_size(buf), buf); |
34dc7c2f | 6530 | } |
1eb5bfa3 | 6531 | |
d3c2ae1c | 6532 | hdr->b_l1hdr.b_bufcnt -= 1; |
b5256303 TC |
6533 | if (ARC_BUF_ENCRYPTED(buf)) |
6534 | hdr->b_crypt_hdr.b_ebufcnt -= 1; | |
6535 | ||
34dc7c2f | 6536 | arc_cksum_verify(buf); |
498877ba | 6537 | arc_buf_unwatch(buf); |
34dc7c2f | 6538 | |
f486f584 TC |
6539 | /* if this is the last uncompressed buf free the checksum */ |
6540 | if (!arc_hdr_has_uncompressed_buf(hdr)) | |
6541 | arc_cksum_free(hdr); | |
6542 | ||
34dc7c2f BB |
6543 | mutex_exit(hash_lock); |
6544 | ||
d3c2ae1c | 6545 | /* |
a6255b7f | 6546 | * Allocate a new hdr. The new hdr will contain a b_pabd |
d3c2ae1c GW |
6547 | * buffer which will be freed in arc_write(). |
6548 | */ | |
b5256303 TC |
6549 | nhdr = arc_hdr_alloc(spa, psize, lsize, protected, |
6550 | compress, type, HDR_HAS_RABD(hdr)); | |
d3c2ae1c GW |
6551 | ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); |
6552 | ASSERT0(nhdr->b_l1hdr.b_bufcnt); | |
6553 | ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt)); | |
6554 | VERIFY3U(nhdr->b_type, ==, type); | |
6555 | ASSERT(!HDR_SHARED_DATA(nhdr)); | |
b9541d6b | 6556 | |
d3c2ae1c GW |
6557 | nhdr->b_l1hdr.b_buf = buf; |
6558 | nhdr->b_l1hdr.b_bufcnt = 1; | |
b5256303 TC |
6559 | if (ARC_BUF_ENCRYPTED(buf)) |
6560 | nhdr->b_crypt_hdr.b_ebufcnt = 1; | |
b9541d6b CW |
6561 | nhdr->b_l1hdr.b_mru_hits = 0; |
6562 | nhdr->b_l1hdr.b_mru_ghost_hits = 0; | |
6563 | nhdr->b_l1hdr.b_mfu_hits = 0; | |
6564 | nhdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
6565 | nhdr->b_l1hdr.b_l2_hits = 0; | |
b9541d6b | 6566 | (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); |
34dc7c2f | 6567 | buf->b_hdr = nhdr; |
d3c2ae1c | 6568 | |
428870ff | 6569 | mutex_exit(&buf->b_evict_lock); |
d3c2ae1c GW |
6570 | (void) refcount_add_many(&arc_anon->arcs_size, |
6571 | HDR_GET_LSIZE(nhdr), buf); | |
34dc7c2f | 6572 | } else { |
428870ff | 6573 | mutex_exit(&buf->b_evict_lock); |
b9541d6b | 6574 | ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); |
ca0bf58d PS |
6575 | /* protected by hash lock, or hdr is on arc_anon */ |
6576 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
34dc7c2f | 6577 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
b9541d6b CW |
6578 | hdr->b_l1hdr.b_mru_hits = 0; |
6579 | hdr->b_l1hdr.b_mru_ghost_hits = 0; | |
6580 | hdr->b_l1hdr.b_mfu_hits = 0; | |
6581 | hdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
6582 | hdr->b_l1hdr.b_l2_hits = 0; | |
6583 | arc_change_state(arc_anon, hdr, hash_lock); | |
6584 | hdr->b_l1hdr.b_arc_access = 0; | |
34dc7c2f | 6585 | |
b5256303 | 6586 | mutex_exit(hash_lock); |
428870ff | 6587 | buf_discard_identity(hdr); |
34dc7c2f BB |
6588 | arc_buf_thaw(buf); |
6589 | } | |
34dc7c2f BB |
6590 | } |
6591 | ||
6592 | int | |
6593 | arc_released(arc_buf_t *buf) | |
6594 | { | |
b128c09f BB |
6595 | int released; |
6596 | ||
428870ff | 6597 | mutex_enter(&buf->b_evict_lock); |
b9541d6b CW |
6598 | released = (buf->b_data != NULL && |
6599 | buf->b_hdr->b_l1hdr.b_state == arc_anon); | |
428870ff | 6600 | mutex_exit(&buf->b_evict_lock); |
b128c09f | 6601 | return (released); |
34dc7c2f BB |
6602 | } |
6603 | ||
34dc7c2f BB |
6604 | #ifdef ZFS_DEBUG |
6605 | int | |
6606 | arc_referenced(arc_buf_t *buf) | |
6607 | { | |
b128c09f BB |
6608 | int referenced; |
6609 | ||
428870ff | 6610 | mutex_enter(&buf->b_evict_lock); |
b9541d6b | 6611 | referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); |
428870ff | 6612 | mutex_exit(&buf->b_evict_lock); |
b128c09f | 6613 | return (referenced); |
34dc7c2f BB |
6614 | } |
6615 | #endif | |
6616 | ||
6617 | static void | |
6618 | arc_write_ready(zio_t *zio) | |
6619 | { | |
6620 | arc_write_callback_t *callback = zio->io_private; | |
6621 | arc_buf_t *buf = callback->awcb_buf; | |
6622 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
b5256303 TC |
6623 | blkptr_t *bp = zio->io_bp; |
6624 | uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp); | |
d3c2ae1c | 6625 | enum zio_compress compress; |
a6255b7f | 6626 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
34dc7c2f | 6627 | |
b9541d6b CW |
6628 | ASSERT(HDR_HAS_L1HDR(hdr)); |
6629 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); | |
d3c2ae1c | 6630 | ASSERT(hdr->b_l1hdr.b_bufcnt > 0); |
b128c09f | 6631 | |
34dc7c2f | 6632 | /* |
d3c2ae1c GW |
6633 | * If we're reexecuting this zio because the pool suspended, then |
6634 | * cleanup any state that was previously set the first time the | |
2aa34383 | 6635 | * callback was invoked. |
34dc7c2f | 6636 | */ |
d3c2ae1c GW |
6637 | if (zio->io_flags & ZIO_FLAG_REEXECUTED) { |
6638 | arc_cksum_free(hdr); | |
6639 | arc_buf_unwatch(buf); | |
a6255b7f | 6640 | if (hdr->b_l1hdr.b_pabd != NULL) { |
d3c2ae1c | 6641 | if (arc_buf_is_shared(buf)) { |
d3c2ae1c GW |
6642 | arc_unshare_buf(hdr, buf); |
6643 | } else { | |
b5256303 | 6644 | arc_hdr_free_abd(hdr, B_FALSE); |
d3c2ae1c | 6645 | } |
34dc7c2f | 6646 | } |
b5256303 TC |
6647 | |
6648 | if (HDR_HAS_RABD(hdr)) | |
6649 | arc_hdr_free_abd(hdr, B_TRUE); | |
34dc7c2f | 6650 | } |
a6255b7f | 6651 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
b5256303 | 6652 | ASSERT(!HDR_HAS_RABD(hdr)); |
d3c2ae1c GW |
6653 | ASSERT(!HDR_SHARED_DATA(hdr)); |
6654 | ASSERT(!arc_buf_is_shared(buf)); | |
6655 | ||
6656 | callback->awcb_ready(zio, buf, callback->awcb_private); | |
6657 | ||
6658 | if (HDR_IO_IN_PROGRESS(hdr)) | |
6659 | ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); | |
6660 | ||
d3c2ae1c GW |
6661 | arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); |
6662 | ||
b5256303 TC |
6663 | if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr)) |
6664 | hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp)); | |
6665 | ||
6666 | if (BP_IS_PROTECTED(bp)) { | |
6667 | /* ZIL blocks are written through zio_rewrite */ | |
6668 | ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); | |
6669 | ASSERT(HDR_PROTECTED(hdr)); | |
6670 | ||
6671 | hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp); | |
6672 | hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset; | |
6673 | zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt, | |
6674 | hdr->b_crypt_hdr.b_iv); | |
6675 | zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac); | |
6676 | } | |
6677 | ||
6678 | /* | |
6679 | * If this block was written for raw encryption but the zio layer | |
6680 | * ended up only authenticating it, adjust the buffer flags now. | |
6681 | */ | |
6682 | if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) { | |
6683 | arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH); | |
6684 | buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED; | |
6685 | if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF) | |
6686 | buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; | |
6687 | } | |
6688 | ||
6689 | /* this must be done after the buffer flags are adjusted */ | |
6690 | arc_cksum_compute(buf); | |
6691 | ||
6692 | if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { | |
d3c2ae1c GW |
6693 | compress = ZIO_COMPRESS_OFF; |
6694 | } else { | |
b5256303 TC |
6695 | ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); |
6696 | compress = BP_GET_COMPRESS(bp); | |
d3c2ae1c GW |
6697 | } |
6698 | HDR_SET_PSIZE(hdr, psize); | |
6699 | arc_hdr_set_compress(hdr, compress); | |
6700 | ||
4807c0ba TC |
6701 | if (zio->io_error != 0 || psize == 0) |
6702 | goto out; | |
6703 | ||
d3c2ae1c | 6704 | /* |
b5256303 TC |
6705 | * Fill the hdr with data. If the buffer is encrypted we have no choice |
6706 | * but to copy the data into b_radb. If the hdr is compressed, the data | |
6707 | * we want is available from the zio, otherwise we can take it from | |
6708 | * the buf. | |
a6255b7f DQ |
6709 | * |
6710 | * We might be able to share the buf's data with the hdr here. However, | |
6711 | * doing so would cause the ARC to be full of linear ABDs if we write a | |
6712 | * lot of shareable data. As a compromise, we check whether scattered | |
6713 | * ABDs are allowed, and assume that if they are then the user wants | |
6714 | * the ARC to be primarily filled with them regardless of the data being | |
6715 | * written. Therefore, if they're allowed then we allocate one and copy | |
6716 | * the data into it; otherwise, we share the data directly if we can. | |
d3c2ae1c | 6717 | */ |
b5256303 | 6718 | if (ARC_BUF_ENCRYPTED(buf)) { |
4807c0ba | 6719 | ASSERT3U(psize, >, 0); |
b5256303 TC |
6720 | ASSERT(ARC_BUF_COMPRESSED(buf)); |
6721 | arc_hdr_alloc_abd(hdr, B_TRUE); | |
6722 | abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); | |
6723 | } else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) { | |
a6255b7f DQ |
6724 | /* |
6725 | * Ideally, we would always copy the io_abd into b_pabd, but the | |
6726 | * user may have disabled compressed ARC, thus we must check the | |
6727 | * hdr's compression setting rather than the io_bp's. | |
6728 | */ | |
b5256303 | 6729 | if (BP_IS_ENCRYPTED(bp)) { |
a6255b7f | 6730 | ASSERT3U(psize, >, 0); |
b5256303 TC |
6731 | arc_hdr_alloc_abd(hdr, B_TRUE); |
6732 | abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize); | |
6733 | } else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF && | |
6734 | !ARC_BUF_COMPRESSED(buf)) { | |
6735 | ASSERT3U(psize, >, 0); | |
6736 | arc_hdr_alloc_abd(hdr, B_FALSE); | |
a6255b7f DQ |
6737 | abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); |
6738 | } else { | |
6739 | ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); | |
b5256303 | 6740 | arc_hdr_alloc_abd(hdr, B_FALSE); |
a6255b7f DQ |
6741 | abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, |
6742 | arc_buf_size(buf)); | |
6743 | } | |
d3c2ae1c | 6744 | } else { |
a6255b7f | 6745 | ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); |
2aa34383 | 6746 | ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); |
d3c2ae1c | 6747 | ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); |
d3c2ae1c | 6748 | |
d3c2ae1c | 6749 | arc_share_buf(hdr, buf); |
d3c2ae1c | 6750 | } |
a6255b7f | 6751 | |
4807c0ba | 6752 | out: |
b5256303 | 6753 | arc_hdr_verify(hdr, bp); |
a6255b7f | 6754 | spl_fstrans_unmark(cookie); |
34dc7c2f BB |
6755 | } |
6756 | ||
bc77ba73 PD |
6757 | static void |
6758 | arc_write_children_ready(zio_t *zio) | |
6759 | { | |
6760 | arc_write_callback_t *callback = zio->io_private; | |
6761 | arc_buf_t *buf = callback->awcb_buf; | |
6762 | ||
6763 | callback->awcb_children_ready(zio, buf, callback->awcb_private); | |
6764 | } | |
6765 | ||
e8b96c60 MA |
6766 | /* |
6767 | * The SPA calls this callback for each physical write that happens on behalf | |
6768 | * of a logical write. See the comment in dbuf_write_physdone() for details. | |
6769 | */ | |
6770 | static void | |
6771 | arc_write_physdone(zio_t *zio) | |
6772 | { | |
6773 | arc_write_callback_t *cb = zio->io_private; | |
6774 | if (cb->awcb_physdone != NULL) | |
6775 | cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); | |
6776 | } | |
6777 | ||
34dc7c2f BB |
6778 | static void |
6779 | arc_write_done(zio_t *zio) | |
6780 | { | |
6781 | arc_write_callback_t *callback = zio->io_private; | |
6782 | arc_buf_t *buf = callback->awcb_buf; | |
6783 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
6784 | ||
d3c2ae1c | 6785 | ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); |
428870ff BB |
6786 | |
6787 | if (zio->io_error == 0) { | |
d3c2ae1c GW |
6788 | arc_hdr_verify(hdr, zio->io_bp); |
6789 | ||
9b67f605 | 6790 | if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { |
b0bc7a84 MG |
6791 | buf_discard_identity(hdr); |
6792 | } else { | |
6793 | hdr->b_dva = *BP_IDENTITY(zio->io_bp); | |
6794 | hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); | |
b0bc7a84 | 6795 | } |
428870ff | 6796 | } else { |
d3c2ae1c | 6797 | ASSERT(HDR_EMPTY(hdr)); |
428870ff | 6798 | } |
34dc7c2f | 6799 | |
34dc7c2f | 6800 | /* |
9b67f605 MA |
6801 | * If the block to be written was all-zero or compressed enough to be |
6802 | * embedded in the BP, no write was performed so there will be no | |
6803 | * dva/birth/checksum. The buffer must therefore remain anonymous | |
6804 | * (and uncached). | |
34dc7c2f | 6805 | */ |
d3c2ae1c | 6806 | if (!HDR_EMPTY(hdr)) { |
34dc7c2f BB |
6807 | arc_buf_hdr_t *exists; |
6808 | kmutex_t *hash_lock; | |
6809 | ||
524b4217 | 6810 | ASSERT3U(zio->io_error, ==, 0); |
428870ff | 6811 | |
34dc7c2f BB |
6812 | arc_cksum_verify(buf); |
6813 | ||
6814 | exists = buf_hash_insert(hdr, &hash_lock); | |
b9541d6b | 6815 | if (exists != NULL) { |
34dc7c2f BB |
6816 | /* |
6817 | * This can only happen if we overwrite for | |
6818 | * sync-to-convergence, because we remove | |
6819 | * buffers from the hash table when we arc_free(). | |
6820 | */ | |
428870ff BB |
6821 | if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { |
6822 | if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) | |
6823 | panic("bad overwrite, hdr=%p exists=%p", | |
6824 | (void *)hdr, (void *)exists); | |
b9541d6b CW |
6825 | ASSERT(refcount_is_zero( |
6826 | &exists->b_l1hdr.b_refcnt)); | |
428870ff BB |
6827 | arc_change_state(arc_anon, exists, hash_lock); |
6828 | mutex_exit(hash_lock); | |
6829 | arc_hdr_destroy(exists); | |
6830 | exists = buf_hash_insert(hdr, &hash_lock); | |
6831 | ASSERT3P(exists, ==, NULL); | |
03c6040b GW |
6832 | } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { |
6833 | /* nopwrite */ | |
6834 | ASSERT(zio->io_prop.zp_nopwrite); | |
6835 | if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) | |
6836 | panic("bad nopwrite, hdr=%p exists=%p", | |
6837 | (void *)hdr, (void *)exists); | |
428870ff BB |
6838 | } else { |
6839 | /* Dedup */ | |
d3c2ae1c | 6840 | ASSERT(hdr->b_l1hdr.b_bufcnt == 1); |
b9541d6b | 6841 | ASSERT(hdr->b_l1hdr.b_state == arc_anon); |
428870ff BB |
6842 | ASSERT(BP_GET_DEDUP(zio->io_bp)); |
6843 | ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); | |
6844 | } | |
34dc7c2f | 6845 | } |
d3c2ae1c | 6846 | arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); |
b128c09f | 6847 | /* if it's not anon, we are doing a scrub */ |
b9541d6b | 6848 | if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) |
b128c09f | 6849 | arc_access(hdr, hash_lock); |
34dc7c2f | 6850 | mutex_exit(hash_lock); |
34dc7c2f | 6851 | } else { |
d3c2ae1c | 6852 | arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); |
34dc7c2f BB |
6853 | } |
6854 | ||
b9541d6b | 6855 | ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
428870ff | 6856 | callback->awcb_done(zio, buf, callback->awcb_private); |
34dc7c2f | 6857 | |
a6255b7f | 6858 | abd_put(zio->io_abd); |
34dc7c2f BB |
6859 | kmem_free(callback, sizeof (arc_write_callback_t)); |
6860 | } | |
6861 | ||
6862 | zio_t * | |
428870ff | 6863 | arc_write(zio_t *pio, spa_t *spa, uint64_t txg, |
d3c2ae1c | 6864 | blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, |
b5256303 TC |
6865 | const zio_prop_t *zp, arc_write_done_func_t *ready, |
6866 | arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone, | |
6867 | arc_write_done_func_t *done, void *private, zio_priority_t priority, | |
5dbd68a3 | 6868 | int zio_flags, const zbookmark_phys_t *zb) |
34dc7c2f BB |
6869 | { |
6870 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
6871 | arc_write_callback_t *callback; | |
b128c09f | 6872 | zio_t *zio; |
82644107 | 6873 | zio_prop_t localprop = *zp; |
34dc7c2f | 6874 | |
d3c2ae1c GW |
6875 | ASSERT3P(ready, !=, NULL); |
6876 | ASSERT3P(done, !=, NULL); | |
34dc7c2f | 6877 | ASSERT(!HDR_IO_ERROR(hdr)); |
b9541d6b | 6878 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
d3c2ae1c GW |
6879 | ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); |
6880 | ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); | |
b128c09f | 6881 | if (l2arc) |
d3c2ae1c | 6882 | arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); |
82644107 | 6883 | |
b5256303 TC |
6884 | if (ARC_BUF_ENCRYPTED(buf)) { |
6885 | ASSERT(ARC_BUF_COMPRESSED(buf)); | |
6886 | localprop.zp_encrypt = B_TRUE; | |
6887 | localprop.zp_compress = HDR_GET_COMPRESS(hdr); | |
6888 | localprop.zp_byteorder = | |
6889 | (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ? | |
6890 | ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER; | |
6891 | bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt, | |
6892 | ZIO_DATA_SALT_LEN); | |
6893 | bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv, | |
6894 | ZIO_DATA_IV_LEN); | |
6895 | bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac, | |
6896 | ZIO_DATA_MAC_LEN); | |
6897 | if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) { | |
6898 | localprop.zp_nopwrite = B_FALSE; | |
6899 | localprop.zp_copies = | |
6900 | MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1); | |
6901 | } | |
2aa34383 | 6902 | zio_flags |= ZIO_FLAG_RAW; |
b5256303 TC |
6903 | } else if (ARC_BUF_COMPRESSED(buf)) { |
6904 | ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf)); | |
6905 | localprop.zp_compress = HDR_GET_COMPRESS(hdr); | |
6906 | zio_flags |= ZIO_FLAG_RAW_COMPRESS; | |
2aa34383 | 6907 | } |
79c76d5b | 6908 | callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); |
34dc7c2f | 6909 | callback->awcb_ready = ready; |
bc77ba73 | 6910 | callback->awcb_children_ready = children_ready; |
e8b96c60 | 6911 | callback->awcb_physdone = physdone; |
34dc7c2f BB |
6912 | callback->awcb_done = done; |
6913 | callback->awcb_private = private; | |
6914 | callback->awcb_buf = buf; | |
b128c09f | 6915 | |
d3c2ae1c | 6916 | /* |
a6255b7f | 6917 | * The hdr's b_pabd is now stale, free it now. A new data block |
d3c2ae1c GW |
6918 | * will be allocated when the zio pipeline calls arc_write_ready(). |
6919 | */ | |
a6255b7f | 6920 | if (hdr->b_l1hdr.b_pabd != NULL) { |
d3c2ae1c GW |
6921 | /* |
6922 | * If the buf is currently sharing the data block with | |
6923 | * the hdr then we need to break that relationship here. | |
6924 | * The hdr will remain with a NULL data pointer and the | |
6925 | * buf will take sole ownership of the block. | |
6926 | */ | |
6927 | if (arc_buf_is_shared(buf)) { | |
d3c2ae1c GW |
6928 | arc_unshare_buf(hdr, buf); |
6929 | } else { | |
b5256303 | 6930 | arc_hdr_free_abd(hdr, B_FALSE); |
d3c2ae1c GW |
6931 | } |
6932 | VERIFY3P(buf->b_data, !=, NULL); | |
d3c2ae1c | 6933 | } |
b5256303 TC |
6934 | |
6935 | if (HDR_HAS_RABD(hdr)) | |
6936 | arc_hdr_free_abd(hdr, B_TRUE); | |
6937 | ||
6938 | arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); | |
6939 | ||
d3c2ae1c | 6940 | ASSERT(!arc_buf_is_shared(buf)); |
a6255b7f | 6941 | ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); |
d3c2ae1c | 6942 | |
a6255b7f DQ |
6943 | zio = zio_write(pio, spa, txg, bp, |
6944 | abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), | |
82644107 | 6945 | HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready, |
bc77ba73 PD |
6946 | (children_ready != NULL) ? arc_write_children_ready : NULL, |
6947 | arc_write_physdone, arc_write_done, callback, | |
e8b96c60 | 6948 | priority, zio_flags, zb); |
34dc7c2f BB |
6949 | |
6950 | return (zio); | |
6951 | } | |
6952 | ||
34dc7c2f | 6953 | static int |
e8b96c60 | 6954 | arc_memory_throttle(uint64_t reserve, uint64_t txg) |
34dc7c2f BB |
6955 | { |
6956 | #ifdef _KERNEL | |
70f02287 | 6957 | uint64_t available_memory = arc_free_memory(); |
7e8bddd0 BB |
6958 | static uint64_t page_load = 0; |
6959 | static uint64_t last_txg = 0; | |
0c5493d4 | 6960 | |
70f02287 | 6961 | #if defined(_ILP32) |
9edb3695 BB |
6962 | available_memory = |
6963 | MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); | |
6964 | #endif | |
6965 | ||
6966 | if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100) | |
ca67b33a MA |
6967 | return (0); |
6968 | ||
7e8bddd0 BB |
6969 | if (txg > last_txg) { |
6970 | last_txg = txg; | |
6971 | page_load = 0; | |
6972 | } | |
7e8bddd0 BB |
6973 | /* |
6974 | * If we are in pageout, we know that memory is already tight, | |
6975 | * the arc is already going to be evicting, so we just want to | |
6976 | * continue to let page writes occur as quickly as possible. | |
6977 | */ | |
6978 | if (current_is_kswapd()) { | |
70f02287 | 6979 | if (page_load > MAX(arc_sys_free / 4, available_memory) / 4) { |
7e8bddd0 BB |
6980 | DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); |
6981 | return (SET_ERROR(ERESTART)); | |
6982 | } | |
6983 | /* Note: reserve is inflated, so we deflate */ | |
6984 | page_load += reserve / 8; | |
6985 | return (0); | |
6986 | } else if (page_load > 0 && arc_reclaim_needed()) { | |
ca67b33a | 6987 | /* memory is low, delay before restarting */ |
34dc7c2f | 6988 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); |
570827e1 | 6989 | DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); |
2e528b49 | 6990 | return (SET_ERROR(EAGAIN)); |
34dc7c2f | 6991 | } |
7e8bddd0 | 6992 | page_load = 0; |
34dc7c2f BB |
6993 | #endif |
6994 | return (0); | |
6995 | } | |
6996 | ||
6997 | void | |
6998 | arc_tempreserve_clear(uint64_t reserve) | |
6999 | { | |
7000 | atomic_add_64(&arc_tempreserve, -reserve); | |
7001 | ASSERT((int64_t)arc_tempreserve >= 0); | |
7002 | } | |
7003 | ||
7004 | int | |
7005 | arc_tempreserve_space(uint64_t reserve, uint64_t txg) | |
7006 | { | |
7007 | int error; | |
9babb374 | 7008 | uint64_t anon_size; |
34dc7c2f | 7009 | |
1b8951b3 TC |
7010 | if (!arc_no_grow && |
7011 | reserve > arc_c/4 && | |
7012 | reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT)) | |
34dc7c2f | 7013 | arc_c = MIN(arc_c_max, reserve * 4); |
12f9a6a3 BB |
7014 | |
7015 | /* | |
7016 | * Throttle when the calculated memory footprint for the TXG | |
7017 | * exceeds the target ARC size. | |
7018 | */ | |
570827e1 BB |
7019 | if (reserve > arc_c) { |
7020 | DMU_TX_STAT_BUMP(dmu_tx_memory_reserve); | |
12f9a6a3 | 7021 | return (SET_ERROR(ERESTART)); |
570827e1 | 7022 | } |
34dc7c2f | 7023 | |
9babb374 BB |
7024 | /* |
7025 | * Don't count loaned bufs as in flight dirty data to prevent long | |
7026 | * network delays from blocking transactions that are ready to be | |
7027 | * assigned to a txg. | |
7028 | */ | |
a7004725 DK |
7029 | |
7030 | /* assert that it has not wrapped around */ | |
7031 | ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); | |
7032 | ||
36da08ef PS |
7033 | anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - |
7034 | arc_loaned_bytes), 0); | |
9babb374 | 7035 | |
34dc7c2f BB |
7036 | /* |
7037 | * Writes will, almost always, require additional memory allocations | |
d3cc8b15 | 7038 | * in order to compress/encrypt/etc the data. We therefore need to |
34dc7c2f BB |
7039 | * make sure that there is sufficient available memory for this. |
7040 | */ | |
e8b96c60 MA |
7041 | error = arc_memory_throttle(reserve, txg); |
7042 | if (error != 0) | |
34dc7c2f BB |
7043 | return (error); |
7044 | ||
7045 | /* | |
7046 | * Throttle writes when the amount of dirty data in the cache | |
7047 | * gets too large. We try to keep the cache less than half full | |
7048 | * of dirty blocks so that our sync times don't grow too large. | |
7049 | * Note: if two requests come in concurrently, we might let them | |
7050 | * both succeed, when one of them should fail. Not a huge deal. | |
7051 | */ | |
9babb374 BB |
7052 | |
7053 | if (reserve + arc_tempreserve + anon_size > arc_c / 2 && | |
7054 | anon_size > arc_c / 4) { | |
d3c2ae1c GW |
7055 | uint64_t meta_esize = |
7056 | refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); | |
7057 | uint64_t data_esize = | |
7058 | refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); | |
34dc7c2f BB |
7059 | dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " |
7060 | "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", | |
d3c2ae1c GW |
7061 | arc_tempreserve >> 10, meta_esize >> 10, |
7062 | data_esize >> 10, reserve >> 10, arc_c >> 10); | |
570827e1 | 7063 | DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle); |
2e528b49 | 7064 | return (SET_ERROR(ERESTART)); |
34dc7c2f BB |
7065 | } |
7066 | atomic_add_64(&arc_tempreserve, reserve); | |
7067 | return (0); | |
7068 | } | |
7069 | ||
13be560d BB |
7070 | static void |
7071 | arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, | |
7072 | kstat_named_t *evict_data, kstat_named_t *evict_metadata) | |
7073 | { | |
36da08ef | 7074 | size->value.ui64 = refcount_count(&state->arcs_size); |
d3c2ae1c GW |
7075 | evict_data->value.ui64 = |
7076 | refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); | |
7077 | evict_metadata->value.ui64 = | |
7078 | refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); | |
13be560d BB |
7079 | } |
7080 | ||
7081 | static int | |
7082 | arc_kstat_update(kstat_t *ksp, int rw) | |
7083 | { | |
7084 | arc_stats_t *as = ksp->ks_data; | |
7085 | ||
7086 | if (rw == KSTAT_WRITE) { | |
ecb2b7dc | 7087 | return (SET_ERROR(EACCES)); |
13be560d BB |
7088 | } else { |
7089 | arc_kstat_update_state(arc_anon, | |
7090 | &as->arcstat_anon_size, | |
500445c0 PS |
7091 | &as->arcstat_anon_evictable_data, |
7092 | &as->arcstat_anon_evictable_metadata); | |
13be560d BB |
7093 | arc_kstat_update_state(arc_mru, |
7094 | &as->arcstat_mru_size, | |
500445c0 PS |
7095 | &as->arcstat_mru_evictable_data, |
7096 | &as->arcstat_mru_evictable_metadata); | |
13be560d BB |
7097 | arc_kstat_update_state(arc_mru_ghost, |
7098 | &as->arcstat_mru_ghost_size, | |
500445c0 PS |
7099 | &as->arcstat_mru_ghost_evictable_data, |
7100 | &as->arcstat_mru_ghost_evictable_metadata); | |
13be560d BB |
7101 | arc_kstat_update_state(arc_mfu, |
7102 | &as->arcstat_mfu_size, | |
500445c0 PS |
7103 | &as->arcstat_mfu_evictable_data, |
7104 | &as->arcstat_mfu_evictable_metadata); | |
fc41c640 | 7105 | arc_kstat_update_state(arc_mfu_ghost, |
13be560d | 7106 | &as->arcstat_mfu_ghost_size, |
500445c0 PS |
7107 | &as->arcstat_mfu_ghost_evictable_data, |
7108 | &as->arcstat_mfu_ghost_evictable_metadata); | |
70f02287 BB |
7109 | |
7110 | as->arcstat_memory_all_bytes.value.ui64 = | |
7111 | arc_all_memory(); | |
7112 | as->arcstat_memory_free_bytes.value.ui64 = | |
7113 | arc_free_memory(); | |
7114 | as->arcstat_memory_available_bytes.value.i64 = | |
7115 | arc_available_memory(); | |
13be560d BB |
7116 | } |
7117 | ||
7118 | return (0); | |
7119 | } | |
7120 | ||
ca0bf58d PS |
7121 | /* |
7122 | * This function *must* return indices evenly distributed between all | |
7123 | * sublists of the multilist. This is needed due to how the ARC eviction | |
7124 | * code is laid out; arc_evict_state() assumes ARC buffers are evenly | |
7125 | * distributed between all sublists and uses this assumption when | |
7126 | * deciding which sublist to evict from and how much to evict from it. | |
7127 | */ | |
7128 | unsigned int | |
7129 | arc_state_multilist_index_func(multilist_t *ml, void *obj) | |
7130 | { | |
7131 | arc_buf_hdr_t *hdr = obj; | |
7132 | ||
7133 | /* | |
7134 | * We rely on b_dva to generate evenly distributed index | |
7135 | * numbers using buf_hash below. So, as an added precaution, | |
7136 | * let's make sure we never add empty buffers to the arc lists. | |
7137 | */ | |
d3c2ae1c | 7138 | ASSERT(!HDR_EMPTY(hdr)); |
ca0bf58d PS |
7139 | |
7140 | /* | |
7141 | * The assumption here, is the hash value for a given | |
7142 | * arc_buf_hdr_t will remain constant throughout its lifetime | |
7143 | * (i.e. its b_spa, b_dva, and b_birth fields don't change). | |
7144 | * Thus, we don't need to store the header's sublist index | |
7145 | * on insertion, as this index can be recalculated on removal. | |
7146 | * | |
7147 | * Also, the low order bits of the hash value are thought to be | |
7148 | * distributed evenly. Otherwise, in the case that the multilist | |
7149 | * has a power of two number of sublists, each sublists' usage | |
7150 | * would not be evenly distributed. | |
7151 | */ | |
7152 | return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % | |
7153 | multilist_get_num_sublists(ml)); | |
7154 | } | |
7155 | ||
ca67b33a MA |
7156 | /* |
7157 | * Called during module initialization and periodically thereafter to | |
7158 | * apply reasonable changes to the exposed performance tunings. Non-zero | |
7159 | * zfs_* values which differ from the currently set values will be applied. | |
7160 | */ | |
7161 | static void | |
7162 | arc_tuning_update(void) | |
7163 | { | |
b8a97fb1 | 7164 | uint64_t allmem = arc_all_memory(); |
7165 | unsigned long limit; | |
9edb3695 | 7166 | |
ca67b33a MA |
7167 | /* Valid range: 64M - <all physical memory> */ |
7168 | if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) && | |
9edb3695 | 7169 | (zfs_arc_max > 64 << 20) && (zfs_arc_max < allmem) && |
ca67b33a MA |
7170 | (zfs_arc_max > arc_c_min)) { |
7171 | arc_c_max = zfs_arc_max; | |
7172 | arc_c = arc_c_max; | |
7173 | arc_p = (arc_c >> 1); | |
b8a97fb1 | 7174 | if (arc_meta_limit > arc_c_max) |
7175 | arc_meta_limit = arc_c_max; | |
7176 | if (arc_dnode_limit > arc_meta_limit) | |
7177 | arc_dnode_limit = arc_meta_limit; | |
ca67b33a MA |
7178 | } |
7179 | ||
7180 | /* Valid range: 32M - <arc_c_max> */ | |
7181 | if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) && | |
7182 | (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) && | |
7183 | (zfs_arc_min <= arc_c_max)) { | |
7184 | arc_c_min = zfs_arc_min; | |
7185 | arc_c = MAX(arc_c, arc_c_min); | |
7186 | } | |
7187 | ||
7188 | /* Valid range: 16M - <arc_c_max> */ | |
7189 | if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) && | |
7190 | (zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) && | |
7191 | (zfs_arc_meta_min <= arc_c_max)) { | |
7192 | arc_meta_min = zfs_arc_meta_min; | |
b8a97fb1 | 7193 | if (arc_meta_limit < arc_meta_min) |
7194 | arc_meta_limit = arc_meta_min; | |
7195 | if (arc_dnode_limit < arc_meta_min) | |
7196 | arc_dnode_limit = arc_meta_min; | |
ca67b33a MA |
7197 | } |
7198 | ||
7199 | /* Valid range: <arc_meta_min> - <arc_c_max> */ | |
b8a97fb1 | 7200 | limit = zfs_arc_meta_limit ? zfs_arc_meta_limit : |
7201 | MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100; | |
7202 | if ((limit != arc_meta_limit) && | |
7203 | (limit >= arc_meta_min) && | |
7204 | (limit <= arc_c_max)) | |
7205 | arc_meta_limit = limit; | |
7206 | ||
7207 | /* Valid range: <arc_meta_min> - <arc_meta_limit> */ | |
7208 | limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit : | |
7209 | MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100; | |
7210 | if ((limit != arc_dnode_limit) && | |
7211 | (limit >= arc_meta_min) && | |
7212 | (limit <= arc_meta_limit)) | |
7213 | arc_dnode_limit = limit; | |
25458cbe | 7214 | |
ca67b33a MA |
7215 | /* Valid range: 1 - N */ |
7216 | if (zfs_arc_grow_retry) | |
7217 | arc_grow_retry = zfs_arc_grow_retry; | |
7218 | ||
7219 | /* Valid range: 1 - N */ | |
7220 | if (zfs_arc_shrink_shift) { | |
7221 | arc_shrink_shift = zfs_arc_shrink_shift; | |
7222 | arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1); | |
7223 | } | |
7224 | ||
728d6ae9 BB |
7225 | /* Valid range: 1 - N */ |
7226 | if (zfs_arc_p_min_shift) | |
7227 | arc_p_min_shift = zfs_arc_p_min_shift; | |
7228 | ||
ca67b33a MA |
7229 | /* Valid range: 1 - N ticks */ |
7230 | if (zfs_arc_min_prefetch_lifespan) | |
7231 | arc_min_prefetch_lifespan = zfs_arc_min_prefetch_lifespan; | |
11f552fa | 7232 | |
7e8bddd0 BB |
7233 | /* Valid range: 0 - 100 */ |
7234 | if ((zfs_arc_lotsfree_percent >= 0) && | |
7235 | (zfs_arc_lotsfree_percent <= 100)) | |
7236 | arc_lotsfree_percent = zfs_arc_lotsfree_percent; | |
7237 | ||
11f552fa BB |
7238 | /* Valid range: 0 - <all physical memory> */ |
7239 | if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free)) | |
9edb3695 | 7240 | arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem); |
7e8bddd0 | 7241 | |
ca67b33a MA |
7242 | } |
7243 | ||
d3c2ae1c GW |
7244 | static void |
7245 | arc_state_init(void) | |
7246 | { | |
7247 | arc_anon = &ARC_anon; | |
7248 | arc_mru = &ARC_mru; | |
7249 | arc_mru_ghost = &ARC_mru_ghost; | |
7250 | arc_mfu = &ARC_mfu; | |
7251 | arc_mfu_ghost = &ARC_mfu_ghost; | |
7252 | arc_l2c_only = &ARC_l2c_only; | |
7253 | ||
64fc7762 MA |
7254 | arc_mru->arcs_list[ARC_BUFC_METADATA] = |
7255 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7256 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7257 | arc_state_multilist_index_func); |
64fc7762 MA |
7258 | arc_mru->arcs_list[ARC_BUFC_DATA] = |
7259 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7260 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7261 | arc_state_multilist_index_func); |
64fc7762 MA |
7262 | arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] = |
7263 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7264 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7265 | arc_state_multilist_index_func); |
64fc7762 MA |
7266 | arc_mru_ghost->arcs_list[ARC_BUFC_DATA] = |
7267 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7268 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7269 | arc_state_multilist_index_func); |
64fc7762 MA |
7270 | arc_mfu->arcs_list[ARC_BUFC_METADATA] = |
7271 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7272 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7273 | arc_state_multilist_index_func); |
64fc7762 MA |
7274 | arc_mfu->arcs_list[ARC_BUFC_DATA] = |
7275 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7276 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7277 | arc_state_multilist_index_func); |
64fc7762 MA |
7278 | arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] = |
7279 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7280 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7281 | arc_state_multilist_index_func); |
64fc7762 MA |
7282 | arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] = |
7283 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7284 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7285 | arc_state_multilist_index_func); |
64fc7762 MA |
7286 | arc_l2c_only->arcs_list[ARC_BUFC_METADATA] = |
7287 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7288 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7289 | arc_state_multilist_index_func); |
64fc7762 MA |
7290 | arc_l2c_only->arcs_list[ARC_BUFC_DATA] = |
7291 | multilist_create(sizeof (arc_buf_hdr_t), | |
d3c2ae1c | 7292 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
c30e58c4 | 7293 | arc_state_multilist_index_func); |
d3c2ae1c GW |
7294 | |
7295 | refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); | |
7296 | refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); | |
7297 | refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); | |
7298 | refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); | |
7299 | refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); | |
7300 | refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); | |
7301 | refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); | |
7302 | refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); | |
7303 | refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); | |
7304 | refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); | |
7305 | refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); | |
7306 | refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); | |
7307 | ||
7308 | refcount_create(&arc_anon->arcs_size); | |
7309 | refcount_create(&arc_mru->arcs_size); | |
7310 | refcount_create(&arc_mru_ghost->arcs_size); | |
7311 | refcount_create(&arc_mfu->arcs_size); | |
7312 | refcount_create(&arc_mfu_ghost->arcs_size); | |
7313 | refcount_create(&arc_l2c_only->arcs_size); | |
7314 | ||
7315 | arc_anon->arcs_state = ARC_STATE_ANON; | |
7316 | arc_mru->arcs_state = ARC_STATE_MRU; | |
7317 | arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST; | |
7318 | arc_mfu->arcs_state = ARC_STATE_MFU; | |
7319 | arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST; | |
7320 | arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY; | |
7321 | } | |
7322 | ||
7323 | static void | |
7324 | arc_state_fini(void) | |
7325 | { | |
7326 | refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); | |
7327 | refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); | |
7328 | refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); | |
7329 | refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); | |
7330 | refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); | |
7331 | refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); | |
7332 | refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); | |
7333 | refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); | |
7334 | refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); | |
7335 | refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); | |
7336 | refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); | |
7337 | refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); | |
7338 | ||
7339 | refcount_destroy(&arc_anon->arcs_size); | |
7340 | refcount_destroy(&arc_mru->arcs_size); | |
7341 | refcount_destroy(&arc_mru_ghost->arcs_size); | |
7342 | refcount_destroy(&arc_mfu->arcs_size); | |
7343 | refcount_destroy(&arc_mfu_ghost->arcs_size); | |
7344 | refcount_destroy(&arc_l2c_only->arcs_size); | |
7345 | ||
64fc7762 MA |
7346 | multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]); |
7347 | multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); | |
7348 | multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]); | |
7349 | multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); | |
7350 | multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]); | |
7351 | multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); | |
7352 | multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]); | |
7353 | multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); | |
7354 | multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); | |
7355 | multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_DATA]); | |
d3c2ae1c GW |
7356 | } |
7357 | ||
7358 | uint64_t | |
e71cade6 | 7359 | arc_target_bytes(void) |
d3c2ae1c | 7360 | { |
e71cade6 | 7361 | return (arc_c); |
d3c2ae1c GW |
7362 | } |
7363 | ||
34dc7c2f BB |
7364 | void |
7365 | arc_init(void) | |
7366 | { | |
9edb3695 | 7367 | uint64_t percent, allmem = arc_all_memory(); |
ca67b33a | 7368 | |
ca0bf58d PS |
7369 | mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); |
7370 | cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL); | |
7371 | cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL); | |
7372 | ||
34dc7c2f | 7373 | /* Convert seconds to clock ticks */ |
ca67b33a | 7374 | arc_min_prefetch_lifespan = 1 * hz; |
34dc7c2f | 7375 | |
34dc7c2f | 7376 | #ifdef _KERNEL |
7cb67b45 BB |
7377 | /* |
7378 | * Register a shrinker to support synchronous (direct) memory | |
7379 | * reclaim from the arc. This is done to prevent kswapd from | |
7380 | * swapping out pages when it is preferable to shrink the arc. | |
7381 | */ | |
7382 | spl_register_shrinker(&arc_shrinker); | |
11f552fa BB |
7383 | |
7384 | /* Set to 1/64 of all memory or a minimum of 512K */ | |
9edb3695 | 7385 | arc_sys_free = MAX(allmem / 64, (512 * 1024)); |
11f552fa | 7386 | arc_need_free = 0; |
34dc7c2f BB |
7387 | #endif |
7388 | ||
0a1f8cd9 TC |
7389 | /* Set max to 1/2 of all memory */ |
7390 | arc_c_max = allmem / 2; | |
7391 | ||
4ce3c45a BB |
7392 | #ifdef _KERNEL |
7393 | /* Set min cache to 1/32 of all memory, or 32MB, whichever is more */ | |
7394 | arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT); | |
7395 | #else | |
ab5cbbd1 BB |
7396 | /* |
7397 | * In userland, there's only the memory pressure that we artificially | |
7398 | * create (see arc_available_memory()). Don't let arc_c get too | |
7399 | * small, because it can cause transactions to be larger than | |
7400 | * arc_c, causing arc_tempreserve_space() to fail. | |
7401 | */ | |
0a1f8cd9 | 7402 | arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT); |
ab5cbbd1 BB |
7403 | #endif |
7404 | ||
34dc7c2f BB |
7405 | arc_c = arc_c_max; |
7406 | arc_p = (arc_c >> 1); | |
d3c2ae1c | 7407 | arc_size = 0; |
34dc7c2f | 7408 | |
ca67b33a MA |
7409 | /* Set min to 1/2 of arc_c_min */ |
7410 | arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT; | |
7411 | /* Initialize maximum observed usage to zero */ | |
1834f2d8 | 7412 | arc_meta_max = 0; |
9907cc1c G |
7413 | /* |
7414 | * Set arc_meta_limit to a percent of arc_c_max with a floor of | |
7415 | * arc_meta_min, and a ceiling of arc_c_max. | |
7416 | */ | |
7417 | percent = MIN(zfs_arc_meta_limit_percent, 100); | |
7418 | arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100); | |
7419 | percent = MIN(zfs_arc_dnode_limit_percent, 100); | |
7420 | arc_dnode_limit = (percent * arc_meta_limit) / 100; | |
34dc7c2f | 7421 | |
ca67b33a MA |
7422 | /* Apply user specified tunings */ |
7423 | arc_tuning_update(); | |
c52fca13 | 7424 | |
34dc7c2f BB |
7425 | /* if kmem_flags are set, lets try to use less memory */ |
7426 | if (kmem_debugging()) | |
7427 | arc_c = arc_c / 2; | |
7428 | if (arc_c < arc_c_min) | |
7429 | arc_c = arc_c_min; | |
7430 | ||
d3c2ae1c | 7431 | arc_state_init(); |
34dc7c2f BB |
7432 | buf_init(); |
7433 | ||
ab26409d BB |
7434 | list_create(&arc_prune_list, sizeof (arc_prune_t), |
7435 | offsetof(arc_prune_t, p_node)); | |
ab26409d | 7436 | mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f | 7437 | |
1229323d | 7438 | arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri, |
aa9af22c | 7439 | max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
f6046738 | 7440 | |
d3c2ae1c GW |
7441 | arc_reclaim_thread_exit = B_FALSE; |
7442 | ||
34dc7c2f BB |
7443 | arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, |
7444 | sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
7445 | ||
7446 | if (arc_ksp != NULL) { | |
7447 | arc_ksp->ks_data = &arc_stats; | |
13be560d | 7448 | arc_ksp->ks_update = arc_kstat_update; |
34dc7c2f BB |
7449 | kstat_install(arc_ksp); |
7450 | } | |
7451 | ||
ca67b33a | 7452 | (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, |
1229323d | 7453 | TS_RUN, defclsyspri); |
34dc7c2f | 7454 | |
d3c2ae1c | 7455 | arc_dead = B_FALSE; |
b128c09f | 7456 | arc_warm = B_FALSE; |
34dc7c2f | 7457 | |
e8b96c60 MA |
7458 | /* |
7459 | * Calculate maximum amount of dirty data per pool. | |
7460 | * | |
7461 | * If it has been set by a module parameter, take that. | |
7462 | * Otherwise, use a percentage of physical memory defined by | |
7463 | * zfs_dirty_data_max_percent (default 10%) with a cap at | |
e99932f7 | 7464 | * zfs_dirty_data_max_max (default 4G or 25% of physical memory). |
e8b96c60 MA |
7465 | */ |
7466 | if (zfs_dirty_data_max_max == 0) | |
e99932f7 BB |
7467 | zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024, |
7468 | allmem * zfs_dirty_data_max_max_percent / 100); | |
e8b96c60 MA |
7469 | |
7470 | if (zfs_dirty_data_max == 0) { | |
9edb3695 | 7471 | zfs_dirty_data_max = allmem * |
e8b96c60 MA |
7472 | zfs_dirty_data_max_percent / 100; |
7473 | zfs_dirty_data_max = MIN(zfs_dirty_data_max, | |
7474 | zfs_dirty_data_max_max); | |
7475 | } | |
34dc7c2f BB |
7476 | } |
7477 | ||
7478 | void | |
7479 | arc_fini(void) | |
7480 | { | |
ab26409d BB |
7481 | arc_prune_t *p; |
7482 | ||
7cb67b45 BB |
7483 | #ifdef _KERNEL |
7484 | spl_unregister_shrinker(&arc_shrinker); | |
7485 | #endif /* _KERNEL */ | |
7486 | ||
ca0bf58d | 7487 | mutex_enter(&arc_reclaim_lock); |
d3c2ae1c | 7488 | arc_reclaim_thread_exit = B_TRUE; |
ca0bf58d PS |
7489 | /* |
7490 | * The reclaim thread will set arc_reclaim_thread_exit back to | |
d3c2ae1c | 7491 | * B_FALSE when it is finished exiting; we're waiting for that. |
ca0bf58d PS |
7492 | */ |
7493 | while (arc_reclaim_thread_exit) { | |
7494 | cv_signal(&arc_reclaim_thread_cv); | |
7495 | cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock); | |
7496 | } | |
7497 | mutex_exit(&arc_reclaim_lock); | |
7498 | ||
d3c2ae1c GW |
7499 | /* Use B_TRUE to ensure *all* buffers are evicted */ |
7500 | arc_flush(NULL, B_TRUE); | |
34dc7c2f | 7501 | |
d3c2ae1c | 7502 | arc_dead = B_TRUE; |
34dc7c2f BB |
7503 | |
7504 | if (arc_ksp != NULL) { | |
7505 | kstat_delete(arc_ksp); | |
7506 | arc_ksp = NULL; | |
7507 | } | |
7508 | ||
f6046738 BB |
7509 | taskq_wait(arc_prune_taskq); |
7510 | taskq_destroy(arc_prune_taskq); | |
7511 | ||
ab26409d BB |
7512 | mutex_enter(&arc_prune_mtx); |
7513 | while ((p = list_head(&arc_prune_list)) != NULL) { | |
7514 | list_remove(&arc_prune_list, p); | |
7515 | refcount_remove(&p->p_refcnt, &arc_prune_list); | |
7516 | refcount_destroy(&p->p_refcnt); | |
7517 | kmem_free(p, sizeof (*p)); | |
7518 | } | |
7519 | mutex_exit(&arc_prune_mtx); | |
7520 | ||
7521 | list_destroy(&arc_prune_list); | |
7522 | mutex_destroy(&arc_prune_mtx); | |
ca0bf58d PS |
7523 | mutex_destroy(&arc_reclaim_lock); |
7524 | cv_destroy(&arc_reclaim_thread_cv); | |
7525 | cv_destroy(&arc_reclaim_waiters_cv); | |
7526 | ||
d3c2ae1c | 7527 | arc_state_fini(); |
34dc7c2f | 7528 | buf_fini(); |
9babb374 | 7529 | |
b9541d6b | 7530 | ASSERT0(arc_loaned_bytes); |
34dc7c2f BB |
7531 | } |
7532 | ||
7533 | /* | |
7534 | * Level 2 ARC | |
7535 | * | |
7536 | * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. | |
7537 | * It uses dedicated storage devices to hold cached data, which are populated | |
7538 | * using large infrequent writes. The main role of this cache is to boost | |
7539 | * the performance of random read workloads. The intended L2ARC devices | |
7540 | * include short-stroked disks, solid state disks, and other media with | |
7541 | * substantially faster read latency than disk. | |
7542 | * | |
7543 | * +-----------------------+ | |
7544 | * | ARC | | |
7545 | * +-----------------------+ | |
7546 | * | ^ ^ | |
7547 | * | | | | |
7548 | * l2arc_feed_thread() arc_read() | |
7549 | * | | | | |
7550 | * | l2arc read | | |
7551 | * V | | | |
7552 | * +---------------+ | | |
7553 | * | L2ARC | | | |
7554 | * +---------------+ | | |
7555 | * | ^ | | |
7556 | * l2arc_write() | | | |
7557 | * | | | | |
7558 | * V | | | |
7559 | * +-------+ +-------+ | |
7560 | * | vdev | | vdev | | |
7561 | * | cache | | cache | | |
7562 | * +-------+ +-------+ | |
7563 | * +=========+ .-----. | |
7564 | * : L2ARC : |-_____-| | |
7565 | * : devices : | Disks | | |
7566 | * +=========+ `-_____-' | |
7567 | * | |
7568 | * Read requests are satisfied from the following sources, in order: | |
7569 | * | |
7570 | * 1) ARC | |
7571 | * 2) vdev cache of L2ARC devices | |
7572 | * 3) L2ARC devices | |
7573 | * 4) vdev cache of disks | |
7574 | * 5) disks | |
7575 | * | |
7576 | * Some L2ARC device types exhibit extremely slow write performance. | |
7577 | * To accommodate for this there are some significant differences between | |
7578 | * the L2ARC and traditional cache design: | |
7579 | * | |
7580 | * 1. There is no eviction path from the ARC to the L2ARC. Evictions from | |
7581 | * the ARC behave as usual, freeing buffers and placing headers on ghost | |
7582 | * lists. The ARC does not send buffers to the L2ARC during eviction as | |
7583 | * this would add inflated write latencies for all ARC memory pressure. | |
7584 | * | |
7585 | * 2. The L2ARC attempts to cache data from the ARC before it is evicted. | |
7586 | * It does this by periodically scanning buffers from the eviction-end of | |
7587 | * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are | |
3a17a7a9 SK |
7588 | * not already there. It scans until a headroom of buffers is satisfied, |
7589 | * which itself is a buffer for ARC eviction. If a compressible buffer is | |
7590 | * found during scanning and selected for writing to an L2ARC device, we | |
7591 | * temporarily boost scanning headroom during the next scan cycle to make | |
7592 | * sure we adapt to compression effects (which might significantly reduce | |
7593 | * the data volume we write to L2ARC). The thread that does this is | |
34dc7c2f BB |
7594 | * l2arc_feed_thread(), illustrated below; example sizes are included to |
7595 | * provide a better sense of ratio than this diagram: | |
7596 | * | |
7597 | * head --> tail | |
7598 | * +---------------------+----------+ | |
7599 | * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC | |
7600 | * +---------------------+----------+ | o L2ARC eligible | |
7601 | * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer | |
7602 | * +---------------------+----------+ | | |
7603 | * 15.9 Gbytes ^ 32 Mbytes | | |
7604 | * headroom | | |
7605 | * l2arc_feed_thread() | |
7606 | * | | |
7607 | * l2arc write hand <--[oooo]--' | |
7608 | * | 8 Mbyte | |
7609 | * | write max | |
7610 | * V | |
7611 | * +==============================+ | |
7612 | * L2ARC dev |####|#|###|###| |####| ... | | |
7613 | * +==============================+ | |
7614 | * 32 Gbytes | |
7615 | * | |
7616 | * 3. If an ARC buffer is copied to the L2ARC but then hit instead of | |
7617 | * evicted, then the L2ARC has cached a buffer much sooner than it probably | |
7618 | * needed to, potentially wasting L2ARC device bandwidth and storage. It is | |
7619 | * safe to say that this is an uncommon case, since buffers at the end of | |
7620 | * the ARC lists have moved there due to inactivity. | |
7621 | * | |
7622 | * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, | |
7623 | * then the L2ARC simply misses copying some buffers. This serves as a | |
7624 | * pressure valve to prevent heavy read workloads from both stalling the ARC | |
7625 | * with waits and clogging the L2ARC with writes. This also helps prevent | |
7626 | * the potential for the L2ARC to churn if it attempts to cache content too | |
7627 | * quickly, such as during backups of the entire pool. | |
7628 | * | |
b128c09f BB |
7629 | * 5. After system boot and before the ARC has filled main memory, there are |
7630 | * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru | |
7631 | * lists can remain mostly static. Instead of searching from tail of these | |
7632 | * lists as pictured, the l2arc_feed_thread() will search from the list heads | |
7633 | * for eligible buffers, greatly increasing its chance of finding them. | |
7634 | * | |
7635 | * The L2ARC device write speed is also boosted during this time so that | |
7636 | * the L2ARC warms up faster. Since there have been no ARC evictions yet, | |
7637 | * there are no L2ARC reads, and no fear of degrading read performance | |
7638 | * through increased writes. | |
7639 | * | |
7640 | * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that | |
34dc7c2f BB |
7641 | * the vdev queue can aggregate them into larger and fewer writes. Each |
7642 | * device is written to in a rotor fashion, sweeping writes through | |
7643 | * available space then repeating. | |
7644 | * | |
b128c09f | 7645 | * 7. The L2ARC does not store dirty content. It never needs to flush |
34dc7c2f BB |
7646 | * write buffers back to disk based storage. |
7647 | * | |
b128c09f | 7648 | * 8. If an ARC buffer is written (and dirtied) which also exists in the |
34dc7c2f BB |
7649 | * L2ARC, the now stale L2ARC buffer is immediately dropped. |
7650 | * | |
7651 | * The performance of the L2ARC can be tweaked by a number of tunables, which | |
7652 | * may be necessary for different workloads: | |
7653 | * | |
7654 | * l2arc_write_max max write bytes per interval | |
b128c09f | 7655 | * l2arc_write_boost extra write bytes during device warmup |
34dc7c2f BB |
7656 | * l2arc_noprefetch skip caching prefetched buffers |
7657 | * l2arc_headroom number of max device writes to precache | |
3a17a7a9 SK |
7658 | * l2arc_headroom_boost when we find compressed buffers during ARC |
7659 | * scanning, we multiply headroom by this | |
7660 | * percentage factor for the next scan cycle, | |
7661 | * since more compressed buffers are likely to | |
7662 | * be present | |
34dc7c2f BB |
7663 | * l2arc_feed_secs seconds between L2ARC writing |
7664 | * | |
7665 | * Tunables may be removed or added as future performance improvements are | |
7666 | * integrated, and also may become zpool properties. | |
d164b209 BB |
7667 | * |
7668 | * There are three key functions that control how the L2ARC warms up: | |
7669 | * | |
7670 | * l2arc_write_eligible() check if a buffer is eligible to cache | |
7671 | * l2arc_write_size() calculate how much to write | |
7672 | * l2arc_write_interval() calculate sleep delay between writes | |
7673 | * | |
7674 | * These three functions determine what to write, how much, and how quickly | |
7675 | * to send writes. | |
34dc7c2f BB |
7676 | */ |
7677 | ||
d164b209 | 7678 | static boolean_t |
2a432414 | 7679 | l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) |
d164b209 BB |
7680 | { |
7681 | /* | |
7682 | * A buffer is *not* eligible for the L2ARC if it: | |
7683 | * 1. belongs to a different spa. | |
428870ff BB |
7684 | * 2. is already cached on the L2ARC. |
7685 | * 3. has an I/O in progress (it may be an incomplete read). | |
7686 | * 4. is flagged not eligible (zfs property). | |
d164b209 | 7687 | */ |
b9541d6b | 7688 | if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || |
2a432414 | 7689 | HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) |
d164b209 BB |
7690 | return (B_FALSE); |
7691 | ||
7692 | return (B_TRUE); | |
7693 | } | |
7694 | ||
7695 | static uint64_t | |
3a17a7a9 | 7696 | l2arc_write_size(void) |
d164b209 BB |
7697 | { |
7698 | uint64_t size; | |
7699 | ||
3a17a7a9 SK |
7700 | /* |
7701 | * Make sure our globals have meaningful values in case the user | |
7702 | * altered them. | |
7703 | */ | |
7704 | size = l2arc_write_max; | |
7705 | if (size == 0) { | |
7706 | cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " | |
7707 | "be greater than zero, resetting it to the default (%d)", | |
7708 | L2ARC_WRITE_SIZE); | |
7709 | size = l2arc_write_max = L2ARC_WRITE_SIZE; | |
7710 | } | |
d164b209 BB |
7711 | |
7712 | if (arc_warm == B_FALSE) | |
3a17a7a9 | 7713 | size += l2arc_write_boost; |
d164b209 BB |
7714 | |
7715 | return (size); | |
7716 | ||
7717 | } | |
7718 | ||
7719 | static clock_t | |
7720 | l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) | |
7721 | { | |
428870ff | 7722 | clock_t interval, next, now; |
d164b209 BB |
7723 | |
7724 | /* | |
7725 | * If the ARC lists are busy, increase our write rate; if the | |
7726 | * lists are stale, idle back. This is achieved by checking | |
7727 | * how much we previously wrote - if it was more than half of | |
7728 | * what we wanted, schedule the next write much sooner. | |
7729 | */ | |
7730 | if (l2arc_feed_again && wrote > (wanted / 2)) | |
7731 | interval = (hz * l2arc_feed_min_ms) / 1000; | |
7732 | else | |
7733 | interval = hz * l2arc_feed_secs; | |
7734 | ||
428870ff BB |
7735 | now = ddi_get_lbolt(); |
7736 | next = MAX(now, MIN(now + interval, began + interval)); | |
d164b209 BB |
7737 | |
7738 | return (next); | |
7739 | } | |
7740 | ||
34dc7c2f BB |
7741 | /* |
7742 | * Cycle through L2ARC devices. This is how L2ARC load balances. | |
b128c09f | 7743 | * If a device is returned, this also returns holding the spa config lock. |
34dc7c2f BB |
7744 | */ |
7745 | static l2arc_dev_t * | |
7746 | l2arc_dev_get_next(void) | |
7747 | { | |
b128c09f | 7748 | l2arc_dev_t *first, *next = NULL; |
34dc7c2f | 7749 | |
b128c09f BB |
7750 | /* |
7751 | * Lock out the removal of spas (spa_namespace_lock), then removal | |
7752 | * of cache devices (l2arc_dev_mtx). Once a device has been selected, | |
7753 | * both locks will be dropped and a spa config lock held instead. | |
7754 | */ | |
7755 | mutex_enter(&spa_namespace_lock); | |
7756 | mutex_enter(&l2arc_dev_mtx); | |
7757 | ||
7758 | /* if there are no vdevs, there is nothing to do */ | |
7759 | if (l2arc_ndev == 0) | |
7760 | goto out; | |
7761 | ||
7762 | first = NULL; | |
7763 | next = l2arc_dev_last; | |
7764 | do { | |
7765 | /* loop around the list looking for a non-faulted vdev */ | |
7766 | if (next == NULL) { | |
34dc7c2f | 7767 | next = list_head(l2arc_dev_list); |
b128c09f BB |
7768 | } else { |
7769 | next = list_next(l2arc_dev_list, next); | |
7770 | if (next == NULL) | |
7771 | next = list_head(l2arc_dev_list); | |
7772 | } | |
7773 | ||
7774 | /* if we have come back to the start, bail out */ | |
7775 | if (first == NULL) | |
7776 | first = next; | |
7777 | else if (next == first) | |
7778 | break; | |
7779 | ||
7780 | } while (vdev_is_dead(next->l2ad_vdev)); | |
7781 | ||
7782 | /* if we were unable to find any usable vdevs, return NULL */ | |
7783 | if (vdev_is_dead(next->l2ad_vdev)) | |
7784 | next = NULL; | |
34dc7c2f BB |
7785 | |
7786 | l2arc_dev_last = next; | |
7787 | ||
b128c09f BB |
7788 | out: |
7789 | mutex_exit(&l2arc_dev_mtx); | |
7790 | ||
7791 | /* | |
7792 | * Grab the config lock to prevent the 'next' device from being | |
7793 | * removed while we are writing to it. | |
7794 | */ | |
7795 | if (next != NULL) | |
7796 | spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); | |
7797 | mutex_exit(&spa_namespace_lock); | |
7798 | ||
34dc7c2f BB |
7799 | return (next); |
7800 | } | |
7801 | ||
b128c09f BB |
7802 | /* |
7803 | * Free buffers that were tagged for destruction. | |
7804 | */ | |
7805 | static void | |
0bc8fd78 | 7806 | l2arc_do_free_on_write(void) |
b128c09f BB |
7807 | { |
7808 | list_t *buflist; | |
7809 | l2arc_data_free_t *df, *df_prev; | |
7810 | ||
7811 | mutex_enter(&l2arc_free_on_write_mtx); | |
7812 | buflist = l2arc_free_on_write; | |
7813 | ||
7814 | for (df = list_tail(buflist); df; df = df_prev) { | |
7815 | df_prev = list_prev(buflist, df); | |
a6255b7f DQ |
7816 | ASSERT3P(df->l2df_abd, !=, NULL); |
7817 | abd_free(df->l2df_abd); | |
b128c09f BB |
7818 | list_remove(buflist, df); |
7819 | kmem_free(df, sizeof (l2arc_data_free_t)); | |
7820 | } | |
7821 | ||
7822 | mutex_exit(&l2arc_free_on_write_mtx); | |
7823 | } | |
7824 | ||
34dc7c2f BB |
7825 | /* |
7826 | * A write to a cache device has completed. Update all headers to allow | |
7827 | * reads from these buffers to begin. | |
7828 | */ | |
7829 | static void | |
7830 | l2arc_write_done(zio_t *zio) | |
7831 | { | |
7832 | l2arc_write_callback_t *cb; | |
7833 | l2arc_dev_t *dev; | |
7834 | list_t *buflist; | |
2a432414 | 7835 | arc_buf_hdr_t *head, *hdr, *hdr_prev; |
34dc7c2f | 7836 | kmutex_t *hash_lock; |
3bec585e | 7837 | int64_t bytes_dropped = 0; |
34dc7c2f BB |
7838 | |
7839 | cb = zio->io_private; | |
d3c2ae1c | 7840 | ASSERT3P(cb, !=, NULL); |
34dc7c2f | 7841 | dev = cb->l2wcb_dev; |
d3c2ae1c | 7842 | ASSERT3P(dev, !=, NULL); |
34dc7c2f | 7843 | head = cb->l2wcb_head; |
d3c2ae1c | 7844 | ASSERT3P(head, !=, NULL); |
b9541d6b | 7845 | buflist = &dev->l2ad_buflist; |
d3c2ae1c | 7846 | ASSERT3P(buflist, !=, NULL); |
34dc7c2f BB |
7847 | DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, |
7848 | l2arc_write_callback_t *, cb); | |
7849 | ||
7850 | if (zio->io_error != 0) | |
7851 | ARCSTAT_BUMP(arcstat_l2_writes_error); | |
7852 | ||
34dc7c2f BB |
7853 | /* |
7854 | * All writes completed, or an error was hit. | |
7855 | */ | |
ca0bf58d PS |
7856 | top: |
7857 | mutex_enter(&dev->l2ad_mtx); | |
2a432414 GW |
7858 | for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { |
7859 | hdr_prev = list_prev(buflist, hdr); | |
34dc7c2f | 7860 | |
2a432414 | 7861 | hash_lock = HDR_LOCK(hdr); |
ca0bf58d PS |
7862 | |
7863 | /* | |
7864 | * We cannot use mutex_enter or else we can deadlock | |
7865 | * with l2arc_write_buffers (due to swapping the order | |
7866 | * the hash lock and l2ad_mtx are taken). | |
7867 | */ | |
34dc7c2f BB |
7868 | if (!mutex_tryenter(hash_lock)) { |
7869 | /* | |
ca0bf58d PS |
7870 | * Missed the hash lock. We must retry so we |
7871 | * don't leave the ARC_FLAG_L2_WRITING bit set. | |
34dc7c2f | 7872 | */ |
ca0bf58d PS |
7873 | ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); |
7874 | ||
7875 | /* | |
7876 | * We don't want to rescan the headers we've | |
7877 | * already marked as having been written out, so | |
7878 | * we reinsert the head node so we can pick up | |
7879 | * where we left off. | |
7880 | */ | |
7881 | list_remove(buflist, head); | |
7882 | list_insert_after(buflist, hdr, head); | |
7883 | ||
7884 | mutex_exit(&dev->l2ad_mtx); | |
7885 | ||
7886 | /* | |
7887 | * We wait for the hash lock to become available | |
7888 | * to try and prevent busy waiting, and increase | |
7889 | * the chance we'll be able to acquire the lock | |
7890 | * the next time around. | |
7891 | */ | |
7892 | mutex_enter(hash_lock); | |
7893 | mutex_exit(hash_lock); | |
7894 | goto top; | |
34dc7c2f BB |
7895 | } |
7896 | ||
b9541d6b | 7897 | /* |
ca0bf58d PS |
7898 | * We could not have been moved into the arc_l2c_only |
7899 | * state while in-flight due to our ARC_FLAG_L2_WRITING | |
7900 | * bit being set. Let's just ensure that's being enforced. | |
7901 | */ | |
7902 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
7903 | ||
8a09d5fd BB |
7904 | /* |
7905 | * Skipped - drop L2ARC entry and mark the header as no | |
7906 | * longer L2 eligibile. | |
7907 | */ | |
d3c2ae1c | 7908 | if (zio->io_error != 0) { |
34dc7c2f | 7909 | /* |
b128c09f | 7910 | * Error - drop L2ARC entry. |
34dc7c2f | 7911 | */ |
2a432414 | 7912 | list_remove(buflist, hdr); |
d3c2ae1c | 7913 | arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); |
b9541d6b | 7914 | |
01850391 AG |
7915 | ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr)); |
7916 | ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); | |
d962d5da | 7917 | |
d3c2ae1c | 7918 | bytes_dropped += arc_hdr_size(hdr); |
d962d5da | 7919 | (void) refcount_remove_many(&dev->l2ad_alloc, |
d3c2ae1c | 7920 | arc_hdr_size(hdr), hdr); |
34dc7c2f BB |
7921 | } |
7922 | ||
7923 | /* | |
ca0bf58d PS |
7924 | * Allow ARC to begin reads and ghost list evictions to |
7925 | * this L2ARC entry. | |
34dc7c2f | 7926 | */ |
d3c2ae1c | 7927 | arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); |
34dc7c2f BB |
7928 | |
7929 | mutex_exit(hash_lock); | |
7930 | } | |
7931 | ||
7932 | atomic_inc_64(&l2arc_writes_done); | |
7933 | list_remove(buflist, head); | |
b9541d6b CW |
7934 | ASSERT(!HDR_HAS_L1HDR(head)); |
7935 | kmem_cache_free(hdr_l2only_cache, head); | |
7936 | mutex_exit(&dev->l2ad_mtx); | |
34dc7c2f | 7937 | |
3bec585e SK |
7938 | vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); |
7939 | ||
b128c09f | 7940 | l2arc_do_free_on_write(); |
34dc7c2f BB |
7941 | |
7942 | kmem_free(cb, sizeof (l2arc_write_callback_t)); | |
7943 | } | |
7944 | ||
b5256303 TC |
7945 | static int |
7946 | l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb) | |
7947 | { | |
7948 | int ret; | |
7949 | spa_t *spa = zio->io_spa; | |
7950 | arc_buf_hdr_t *hdr = cb->l2rcb_hdr; | |
7951 | blkptr_t *bp = zio->io_bp; | |
7952 | dsl_crypto_key_t *dck = NULL; | |
7953 | uint8_t salt[ZIO_DATA_SALT_LEN]; | |
7954 | uint8_t iv[ZIO_DATA_IV_LEN]; | |
7955 | uint8_t mac[ZIO_DATA_MAC_LEN]; | |
7956 | boolean_t no_crypt = B_FALSE; | |
7957 | ||
7958 | /* | |
7959 | * ZIL data is never be written to the L2ARC, so we don't need | |
7960 | * special handling for its unique MAC storage. | |
7961 | */ | |
7962 | ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG); | |
7963 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); | |
7964 | ||
7965 | /* If the data was encrypted, decrypt it now */ | |
7966 | if (HDR_ENCRYPTED(hdr)) { | |
7967 | abd_t *eabd = arc_get_data_abd(hdr, | |
7968 | arc_hdr_size(hdr), hdr); | |
7969 | ||
7970 | zio_crypt_decode_params_bp(bp, salt, iv); | |
7971 | zio_crypt_decode_mac_bp(bp, mac); | |
7972 | ||
7973 | ret = spa_keystore_lookup_key(spa, | |
7974 | cb->l2rcb_zb.zb_objset, FTAG, &dck); | |
7975 | if (ret != 0) { | |
7976 | arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); | |
7977 | goto error; | |
7978 | } | |
7979 | ||
7980 | ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key, | |
7981 | salt, BP_GET_TYPE(bp), iv, mac, HDR_GET_PSIZE(hdr), | |
7982 | BP_SHOULD_BYTESWAP(bp), eabd, hdr->b_l1hdr.b_pabd, | |
7983 | &no_crypt); | |
7984 | if (ret != 0) { | |
7985 | arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); | |
7986 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
7987 | goto error; | |
7988 | } | |
7989 | ||
7990 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
7991 | ||
7992 | /* | |
7993 | * If we actually performed decryption, replace b_pabd | |
7994 | * with the decrypted data. Otherwise we can just throw | |
7995 | * our decryption buffer away. | |
7996 | */ | |
7997 | if (!no_crypt) { | |
7998 | arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, | |
7999 | arc_hdr_size(hdr), hdr); | |
8000 | hdr->b_l1hdr.b_pabd = eabd; | |
8001 | zio->io_abd = eabd; | |
8002 | } else { | |
8003 | arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr); | |
8004 | } | |
8005 | } | |
8006 | ||
8007 | /* | |
8008 | * If the L2ARC block was compressed, but ARC compression | |
8009 | * is disabled we decompress the data into a new buffer and | |
8010 | * replace the existing data. | |
8011 | */ | |
8012 | if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && | |
8013 | !HDR_COMPRESSION_ENABLED(hdr)) { | |
8014 | abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr); | |
8015 | void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr)); | |
8016 | ||
8017 | ret = zio_decompress_data(HDR_GET_COMPRESS(hdr), | |
8018 | hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr), | |
8019 | HDR_GET_LSIZE(hdr)); | |
8020 | if (ret != 0) { | |
8021 | abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); | |
8022 | arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr); | |
8023 | goto error; | |
8024 | } | |
8025 | ||
8026 | abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr)); | |
8027 | arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, | |
8028 | arc_hdr_size(hdr), hdr); | |
8029 | hdr->b_l1hdr.b_pabd = cabd; | |
8030 | zio->io_abd = cabd; | |
8031 | zio->io_size = HDR_GET_LSIZE(hdr); | |
8032 | } | |
8033 | ||
8034 | return (0); | |
8035 | ||
8036 | error: | |
8037 | return (ret); | |
8038 | } | |
8039 | ||
8040 | ||
34dc7c2f BB |
8041 | /* |
8042 | * A read to a cache device completed. Validate buffer contents before | |
8043 | * handing over to the regular ARC routines. | |
8044 | */ | |
8045 | static void | |
8046 | l2arc_read_done(zio_t *zio) | |
8047 | { | |
b5256303 | 8048 | int tfm_error = 0; |
34dc7c2f BB |
8049 | l2arc_read_callback_t *cb; |
8050 | arc_buf_hdr_t *hdr; | |
34dc7c2f | 8051 | kmutex_t *hash_lock; |
b5256303 | 8052 | boolean_t valid_cksum, using_rdata; |
b128c09f | 8053 | |
d3c2ae1c | 8054 | ASSERT3P(zio->io_vd, !=, NULL); |
b128c09f BB |
8055 | ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); |
8056 | ||
8057 | spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); | |
34dc7c2f BB |
8058 | |
8059 | cb = zio->io_private; | |
d3c2ae1c GW |
8060 | ASSERT3P(cb, !=, NULL); |
8061 | hdr = cb->l2rcb_hdr; | |
8062 | ASSERT3P(hdr, !=, NULL); | |
34dc7c2f | 8063 | |
d3c2ae1c | 8064 | hash_lock = HDR_LOCK(hdr); |
34dc7c2f | 8065 | mutex_enter(hash_lock); |
428870ff | 8066 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); |
34dc7c2f | 8067 | |
82710e99 GDN |
8068 | /* |
8069 | * If the data was read into a temporary buffer, | |
8070 | * move it and free the buffer. | |
8071 | */ | |
8072 | if (cb->l2rcb_abd != NULL) { | |
8073 | ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); | |
8074 | if (zio->io_error == 0) { | |
8075 | abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd, | |
8076 | arc_hdr_size(hdr)); | |
8077 | } | |
8078 | ||
8079 | /* | |
8080 | * The following must be done regardless of whether | |
8081 | * there was an error: | |
8082 | * - free the temporary buffer | |
8083 | * - point zio to the real ARC buffer | |
8084 | * - set zio size accordingly | |
8085 | * These are required because zio is either re-used for | |
8086 | * an I/O of the block in the case of the error | |
8087 | * or the zio is passed to arc_read_done() and it | |
8088 | * needs real data. | |
8089 | */ | |
8090 | abd_free(cb->l2rcb_abd); | |
8091 | zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); | |
8092 | zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; | |
8093 | } | |
8094 | ||
a6255b7f | 8095 | ASSERT3P(zio->io_abd, !=, NULL); |
3a17a7a9 | 8096 | |
34dc7c2f BB |
8097 | /* |
8098 | * Check this survived the L2ARC journey. | |
8099 | */ | |
b5256303 TC |
8100 | ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd || |
8101 | (HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd)); | |
d3c2ae1c GW |
8102 | zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ |
8103 | zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ | |
8104 | ||
8105 | valid_cksum = arc_cksum_is_equal(hdr, zio); | |
b5256303 TC |
8106 | using_rdata = (HDR_HAS_RABD(hdr) && |
8107 | zio->io_abd == hdr->b_crypt_hdr.b_rabd); | |
8108 | ||
8109 | /* | |
8110 | * b_rabd will always match the data as it exists on disk if it is | |
8111 | * being used. Therefore if we are reading into b_rabd we do not | |
8112 | * attempt to untransform the data. | |
8113 | */ | |
8114 | if (valid_cksum && !using_rdata) | |
8115 | tfm_error = l2arc_untransform(zio, cb); | |
8116 | ||
8117 | if (valid_cksum && tfm_error == 0 && zio->io_error == 0 && | |
8118 | !HDR_L2_EVICTED(hdr)) { | |
34dc7c2f | 8119 | mutex_exit(hash_lock); |
d3c2ae1c | 8120 | zio->io_private = hdr; |
34dc7c2f BB |
8121 | arc_read_done(zio); |
8122 | } else { | |
8123 | mutex_exit(hash_lock); | |
8124 | /* | |
8125 | * Buffer didn't survive caching. Increment stats and | |
8126 | * reissue to the original storage device. | |
8127 | */ | |
b128c09f | 8128 | if (zio->io_error != 0) { |
34dc7c2f | 8129 | ARCSTAT_BUMP(arcstat_l2_io_error); |
b128c09f | 8130 | } else { |
2e528b49 | 8131 | zio->io_error = SET_ERROR(EIO); |
b128c09f | 8132 | } |
b5256303 | 8133 | if (!valid_cksum || tfm_error != 0) |
34dc7c2f BB |
8134 | ARCSTAT_BUMP(arcstat_l2_cksum_bad); |
8135 | ||
34dc7c2f | 8136 | /* |
b128c09f BB |
8137 | * If there's no waiter, issue an async i/o to the primary |
8138 | * storage now. If there *is* a waiter, the caller must | |
8139 | * issue the i/o in a context where it's OK to block. | |
34dc7c2f | 8140 | */ |
d164b209 BB |
8141 | if (zio->io_waiter == NULL) { |
8142 | zio_t *pio = zio_unique_parent(zio); | |
b5256303 TC |
8143 | void *abd = (using_rdata) ? |
8144 | hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd; | |
d164b209 BB |
8145 | |
8146 | ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); | |
8147 | ||
d3c2ae1c | 8148 | zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp, |
b5256303 | 8149 | abd, zio->io_size, arc_read_done, |
d3c2ae1c GW |
8150 | hdr, zio->io_priority, cb->l2rcb_flags, |
8151 | &cb->l2rcb_zb)); | |
d164b209 | 8152 | } |
34dc7c2f BB |
8153 | } |
8154 | ||
8155 | kmem_free(cb, sizeof (l2arc_read_callback_t)); | |
8156 | } | |
8157 | ||
8158 | /* | |
8159 | * This is the list priority from which the L2ARC will search for pages to | |
8160 | * cache. This is used within loops (0..3) to cycle through lists in the | |
8161 | * desired order. This order can have a significant effect on cache | |
8162 | * performance. | |
8163 | * | |
8164 | * Currently the metadata lists are hit first, MFU then MRU, followed by | |
8165 | * the data lists. This function returns a locked list, and also returns | |
8166 | * the lock pointer. | |
8167 | */ | |
ca0bf58d PS |
8168 | static multilist_sublist_t * |
8169 | l2arc_sublist_lock(int list_num) | |
34dc7c2f | 8170 | { |
ca0bf58d PS |
8171 | multilist_t *ml = NULL; |
8172 | unsigned int idx; | |
34dc7c2f | 8173 | |
4aafab91 | 8174 | ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES); |
34dc7c2f BB |
8175 | |
8176 | switch (list_num) { | |
8177 | case 0: | |
64fc7762 | 8178 | ml = arc_mfu->arcs_list[ARC_BUFC_METADATA]; |
34dc7c2f BB |
8179 | break; |
8180 | case 1: | |
64fc7762 | 8181 | ml = arc_mru->arcs_list[ARC_BUFC_METADATA]; |
34dc7c2f BB |
8182 | break; |
8183 | case 2: | |
64fc7762 | 8184 | ml = arc_mfu->arcs_list[ARC_BUFC_DATA]; |
34dc7c2f BB |
8185 | break; |
8186 | case 3: | |
64fc7762 | 8187 | ml = arc_mru->arcs_list[ARC_BUFC_DATA]; |
34dc7c2f | 8188 | break; |
4aafab91 G |
8189 | default: |
8190 | return (NULL); | |
34dc7c2f BB |
8191 | } |
8192 | ||
ca0bf58d PS |
8193 | /* |
8194 | * Return a randomly-selected sublist. This is acceptable | |
8195 | * because the caller feeds only a little bit of data for each | |
8196 | * call (8MB). Subsequent calls will result in different | |
8197 | * sublists being selected. | |
8198 | */ | |
8199 | idx = multilist_get_random_index(ml); | |
8200 | return (multilist_sublist_lock(ml, idx)); | |
34dc7c2f BB |
8201 | } |
8202 | ||
8203 | /* | |
8204 | * Evict buffers from the device write hand to the distance specified in | |
8205 | * bytes. This distance may span populated buffers, it may span nothing. | |
8206 | * This is clearing a region on the L2ARC device ready for writing. | |
8207 | * If the 'all' boolean is set, every buffer is evicted. | |
8208 | */ | |
8209 | static void | |
8210 | l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) | |
8211 | { | |
8212 | list_t *buflist; | |
2a432414 | 8213 | arc_buf_hdr_t *hdr, *hdr_prev; |
34dc7c2f BB |
8214 | kmutex_t *hash_lock; |
8215 | uint64_t taddr; | |
8216 | ||
b9541d6b | 8217 | buflist = &dev->l2ad_buflist; |
34dc7c2f BB |
8218 | |
8219 | if (!all && dev->l2ad_first) { | |
8220 | /* | |
8221 | * This is the first sweep through the device. There is | |
8222 | * nothing to evict. | |
8223 | */ | |
8224 | return; | |
8225 | } | |
8226 | ||
b128c09f | 8227 | if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { |
34dc7c2f BB |
8228 | /* |
8229 | * When nearing the end of the device, evict to the end | |
8230 | * before the device write hand jumps to the start. | |
8231 | */ | |
8232 | taddr = dev->l2ad_end; | |
8233 | } else { | |
8234 | taddr = dev->l2ad_hand + distance; | |
8235 | } | |
8236 | DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, | |
8237 | uint64_t, taddr, boolean_t, all); | |
8238 | ||
8239 | top: | |
b9541d6b | 8240 | mutex_enter(&dev->l2ad_mtx); |
2a432414 GW |
8241 | for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { |
8242 | hdr_prev = list_prev(buflist, hdr); | |
34dc7c2f | 8243 | |
2a432414 | 8244 | hash_lock = HDR_LOCK(hdr); |
ca0bf58d PS |
8245 | |
8246 | /* | |
8247 | * We cannot use mutex_enter or else we can deadlock | |
8248 | * with l2arc_write_buffers (due to swapping the order | |
8249 | * the hash lock and l2ad_mtx are taken). | |
8250 | */ | |
34dc7c2f BB |
8251 | if (!mutex_tryenter(hash_lock)) { |
8252 | /* | |
8253 | * Missed the hash lock. Retry. | |
8254 | */ | |
8255 | ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); | |
b9541d6b | 8256 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
8257 | mutex_enter(hash_lock); |
8258 | mutex_exit(hash_lock); | |
8259 | goto top; | |
8260 | } | |
8261 | ||
f06f53fa AG |
8262 | /* |
8263 | * A header can't be on this list if it doesn't have L2 header. | |
8264 | */ | |
8265 | ASSERT(HDR_HAS_L2HDR(hdr)); | |
34dc7c2f | 8266 | |
f06f53fa AG |
8267 | /* Ensure this header has finished being written. */ |
8268 | ASSERT(!HDR_L2_WRITING(hdr)); | |
8269 | ASSERT(!HDR_L2_WRITE_HEAD(hdr)); | |
8270 | ||
8271 | if (!all && (hdr->b_l2hdr.b_daddr >= taddr || | |
b9541d6b | 8272 | hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { |
34dc7c2f BB |
8273 | /* |
8274 | * We've evicted to the target address, | |
8275 | * or the end of the device. | |
8276 | */ | |
8277 | mutex_exit(hash_lock); | |
8278 | break; | |
8279 | } | |
8280 | ||
b9541d6b | 8281 | if (!HDR_HAS_L1HDR(hdr)) { |
2a432414 | 8282 | ASSERT(!HDR_L2_READING(hdr)); |
34dc7c2f BB |
8283 | /* |
8284 | * This doesn't exist in the ARC. Destroy. | |
8285 | * arc_hdr_destroy() will call list_remove() | |
01850391 | 8286 | * and decrement arcstat_l2_lsize. |
34dc7c2f | 8287 | */ |
2a432414 GW |
8288 | arc_change_state(arc_anon, hdr, hash_lock); |
8289 | arc_hdr_destroy(hdr); | |
34dc7c2f | 8290 | } else { |
b9541d6b CW |
8291 | ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); |
8292 | ARCSTAT_BUMP(arcstat_l2_evict_l1cached); | |
b128c09f BB |
8293 | /* |
8294 | * Invalidate issued or about to be issued | |
8295 | * reads, since we may be about to write | |
8296 | * over this location. | |
8297 | */ | |
2a432414 | 8298 | if (HDR_L2_READING(hdr)) { |
b128c09f | 8299 | ARCSTAT_BUMP(arcstat_l2_evict_reading); |
d3c2ae1c | 8300 | arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); |
b128c09f BB |
8301 | } |
8302 | ||
d962d5da | 8303 | arc_hdr_l2hdr_destroy(hdr); |
34dc7c2f BB |
8304 | } |
8305 | mutex_exit(hash_lock); | |
8306 | } | |
b9541d6b | 8307 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
8308 | } |
8309 | ||
b5256303 TC |
8310 | /* |
8311 | * Handle any abd transforms that might be required for writing to the L2ARC. | |
8312 | * If successful, this function will always return an abd with the data | |
8313 | * transformed as it is on disk in a new abd of asize bytes. | |
8314 | */ | |
8315 | static int | |
8316 | l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize, | |
8317 | abd_t **abd_out) | |
8318 | { | |
8319 | int ret; | |
8320 | void *tmp = NULL; | |
8321 | abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd; | |
8322 | enum zio_compress compress = HDR_GET_COMPRESS(hdr); | |
8323 | uint64_t psize = HDR_GET_PSIZE(hdr); | |
8324 | uint64_t size = arc_hdr_size(hdr); | |
8325 | boolean_t ismd = HDR_ISTYPE_METADATA(hdr); | |
8326 | boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS); | |
8327 | dsl_crypto_key_t *dck = NULL; | |
8328 | uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 }; | |
4807c0ba | 8329 | boolean_t no_crypt = B_FALSE; |
b5256303 TC |
8330 | |
8331 | ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && | |
8332 | !HDR_COMPRESSION_ENABLED(hdr)) || | |
8333 | HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize); | |
8334 | ASSERT3U(psize, <=, asize); | |
8335 | ||
8336 | /* | |
8337 | * If this data simply needs its own buffer, we simply allocate it | |
8338 | * and copy the data. This may be done to elimiate a depedency on a | |
8339 | * shared buffer or to reallocate the buffer to match asize. | |
8340 | */ | |
4807c0ba TC |
8341 | if (HDR_HAS_RABD(hdr) && asize != psize) { |
8342 | ASSERT3U(size, ==, psize); | |
8343 | to_write = abd_alloc_for_io(asize, ismd); | |
8344 | abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, size); | |
8345 | if (size != asize) | |
8346 | abd_zero_off(to_write, size, asize - size); | |
8347 | goto out; | |
8348 | } | |
8349 | ||
b5256303 TC |
8350 | if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) && |
8351 | !HDR_ENCRYPTED(hdr)) { | |
8352 | ASSERT3U(size, ==, psize); | |
8353 | to_write = abd_alloc_for_io(asize, ismd); | |
8354 | abd_copy(to_write, hdr->b_l1hdr.b_pabd, size); | |
8355 | if (size != asize) | |
8356 | abd_zero_off(to_write, size, asize - size); | |
8357 | goto out; | |
8358 | } | |
8359 | ||
8360 | if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) { | |
8361 | cabd = abd_alloc_for_io(asize, ismd); | |
8362 | tmp = abd_borrow_buf(cabd, asize); | |
8363 | ||
8364 | psize = zio_compress_data(compress, to_write, tmp, size); | |
8365 | ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr)); | |
8366 | if (psize < asize) | |
8367 | bzero((char *)tmp + psize, asize - psize); | |
8368 | psize = HDR_GET_PSIZE(hdr); | |
8369 | abd_return_buf_copy(cabd, tmp, asize); | |
8370 | to_write = cabd; | |
8371 | } | |
8372 | ||
8373 | if (HDR_ENCRYPTED(hdr)) { | |
8374 | eabd = abd_alloc_for_io(asize, ismd); | |
8375 | ||
8376 | /* | |
8377 | * If the dataset was disowned before the buffer | |
8378 | * made it to this point, the key to re-encrypt | |
8379 | * it won't be available. In this case we simply | |
8380 | * won't write the buffer to the L2ARC. | |
8381 | */ | |
8382 | ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj, | |
8383 | FTAG, &dck); | |
8384 | if (ret != 0) | |
8385 | goto error; | |
8386 | ||
8387 | ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key, | |
8388 | hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot, | |
8389 | hdr->b_crypt_hdr.b_iv, mac, psize, bswap, to_write, | |
8390 | eabd, &no_crypt); | |
8391 | if (ret != 0) | |
8392 | goto error; | |
8393 | ||
4807c0ba TC |
8394 | if (no_crypt) |
8395 | abd_copy(eabd, to_write, psize); | |
b5256303 TC |
8396 | |
8397 | if (psize != asize) | |
8398 | abd_zero_off(eabd, psize, asize - psize); | |
8399 | ||
8400 | /* assert that the MAC we got here matches the one we saved */ | |
8401 | ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN)); | |
8402 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
8403 | ||
8404 | if (to_write == cabd) | |
8405 | abd_free(cabd); | |
8406 | ||
8407 | to_write = eabd; | |
8408 | } | |
8409 | ||
8410 | out: | |
8411 | ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd); | |
8412 | *abd_out = to_write; | |
8413 | return (0); | |
8414 | ||
8415 | error: | |
8416 | if (dck != NULL) | |
8417 | spa_keystore_dsl_key_rele(spa, dck, FTAG); | |
8418 | if (cabd != NULL) | |
8419 | abd_free(cabd); | |
8420 | if (eabd != NULL) | |
8421 | abd_free(eabd); | |
8422 | ||
8423 | *abd_out = NULL; | |
8424 | return (ret); | |
8425 | } | |
8426 | ||
34dc7c2f BB |
8427 | /* |
8428 | * Find and write ARC buffers to the L2ARC device. | |
8429 | * | |
2a432414 | 8430 | * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid |
34dc7c2f | 8431 | * for reading until they have completed writing. |
3a17a7a9 SK |
8432 | * The headroom_boost is an in-out parameter used to maintain headroom boost |
8433 | * state between calls to this function. | |
8434 | * | |
8435 | * Returns the number of bytes actually written (which may be smaller than | |
8436 | * the delta by which the device hand has changed due to alignment). | |
34dc7c2f | 8437 | */ |
d164b209 | 8438 | static uint64_t |
d3c2ae1c | 8439 | l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) |
34dc7c2f | 8440 | { |
2a432414 | 8441 | arc_buf_hdr_t *hdr, *hdr_prev, *head; |
01850391 | 8442 | uint64_t write_asize, write_psize, write_lsize, headroom; |
3a17a7a9 | 8443 | boolean_t full; |
34dc7c2f BB |
8444 | l2arc_write_callback_t *cb; |
8445 | zio_t *pio, *wzio; | |
3541dc6d | 8446 | uint64_t guid = spa_load_guid(spa); |
d6320ddb | 8447 | int try; |
34dc7c2f | 8448 | |
d3c2ae1c | 8449 | ASSERT3P(dev->l2ad_vdev, !=, NULL); |
3a17a7a9 | 8450 | |
34dc7c2f | 8451 | pio = NULL; |
01850391 | 8452 | write_lsize = write_asize = write_psize = 0; |
34dc7c2f | 8453 | full = B_FALSE; |
b9541d6b | 8454 | head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); |
d3c2ae1c | 8455 | arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); |
3a17a7a9 | 8456 | |
34dc7c2f BB |
8457 | /* |
8458 | * Copy buffers for L2ARC writing. | |
8459 | */ | |
4aafab91 | 8460 | for (try = 0; try < L2ARC_FEED_TYPES; try++) { |
ca0bf58d | 8461 | multilist_sublist_t *mls = l2arc_sublist_lock(try); |
3a17a7a9 SK |
8462 | uint64_t passed_sz = 0; |
8463 | ||
4aafab91 G |
8464 | VERIFY3P(mls, !=, NULL); |
8465 | ||
b128c09f BB |
8466 | /* |
8467 | * L2ARC fast warmup. | |
8468 | * | |
8469 | * Until the ARC is warm and starts to evict, read from the | |
8470 | * head of the ARC lists rather than the tail. | |
8471 | */ | |
b128c09f | 8472 | if (arc_warm == B_FALSE) |
ca0bf58d | 8473 | hdr = multilist_sublist_head(mls); |
b128c09f | 8474 | else |
ca0bf58d | 8475 | hdr = multilist_sublist_tail(mls); |
b128c09f | 8476 | |
3a17a7a9 | 8477 | headroom = target_sz * l2arc_headroom; |
d3c2ae1c | 8478 | if (zfs_compressed_arc_enabled) |
3a17a7a9 SK |
8479 | headroom = (headroom * l2arc_headroom_boost) / 100; |
8480 | ||
2a432414 | 8481 | for (; hdr; hdr = hdr_prev) { |
3a17a7a9 | 8482 | kmutex_t *hash_lock; |
b5256303 | 8483 | abd_t *to_write = NULL; |
3a17a7a9 | 8484 | |
b128c09f | 8485 | if (arc_warm == B_FALSE) |
ca0bf58d | 8486 | hdr_prev = multilist_sublist_next(mls, hdr); |
b128c09f | 8487 | else |
ca0bf58d | 8488 | hdr_prev = multilist_sublist_prev(mls, hdr); |
34dc7c2f | 8489 | |
2a432414 | 8490 | hash_lock = HDR_LOCK(hdr); |
3a17a7a9 | 8491 | if (!mutex_tryenter(hash_lock)) { |
34dc7c2f BB |
8492 | /* |
8493 | * Skip this buffer rather than waiting. | |
8494 | */ | |
8495 | continue; | |
8496 | } | |
8497 | ||
d3c2ae1c | 8498 | passed_sz += HDR_GET_LSIZE(hdr); |
34dc7c2f BB |
8499 | if (passed_sz > headroom) { |
8500 | /* | |
8501 | * Searched too far. | |
8502 | */ | |
8503 | mutex_exit(hash_lock); | |
8504 | break; | |
8505 | } | |
8506 | ||
2a432414 | 8507 | if (!l2arc_write_eligible(guid, hdr)) { |
34dc7c2f BB |
8508 | mutex_exit(hash_lock); |
8509 | continue; | |
8510 | } | |
8511 | ||
01850391 AG |
8512 | /* |
8513 | * We rely on the L1 portion of the header below, so | |
8514 | * it's invalid for this header to have been evicted out | |
8515 | * of the ghost cache, prior to being written out. The | |
8516 | * ARC_FLAG_L2_WRITING bit ensures this won't happen. | |
8517 | */ | |
8518 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
8519 | ||
8520 | ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); | |
01850391 | 8521 | ASSERT3U(arc_hdr_size(hdr), >, 0); |
b5256303 TC |
8522 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || |
8523 | HDR_HAS_RABD(hdr)); | |
8524 | uint64_t psize = HDR_GET_PSIZE(hdr); | |
01850391 AG |
8525 | uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, |
8526 | psize); | |
8527 | ||
8528 | if ((write_asize + asize) > target_sz) { | |
34dc7c2f BB |
8529 | full = B_TRUE; |
8530 | mutex_exit(hash_lock); | |
8531 | break; | |
8532 | } | |
8533 | ||
b5256303 TC |
8534 | /* |
8535 | * We rely on the L1 portion of the header below, so | |
8536 | * it's invalid for this header to have been evicted out | |
8537 | * of the ghost cache, prior to being written out. The | |
8538 | * ARC_FLAG_L2_WRITING bit ensures this won't happen. | |
8539 | */ | |
8540 | arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING); | |
8541 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
8542 | ||
8543 | ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); | |
8544 | ASSERT(hdr->b_l1hdr.b_pabd != NULL || | |
8545 | HDR_HAS_RABD(hdr)); | |
8546 | ASSERT3U(arc_hdr_size(hdr), >, 0); | |
8547 | ||
8548 | /* | |
8549 | * If this header has b_rabd, we can use this since it | |
8550 | * must always match the data exactly as it exists on | |
8551 | * disk. Otherwise, the L2ARC can normally use the | |
8552 | * hdr's data, but if we're sharing data between the | |
8553 | * hdr and one of its bufs, L2ARC needs its own copy of | |
8554 | * the data so that the ZIO below can't race with the | |
8555 | * buf consumer. To ensure that this copy will be | |
8556 | * available for the lifetime of the ZIO and be cleaned | |
8557 | * up afterwards, we add it to the l2arc_free_on_write | |
8558 | * queue. If we need to apply any transforms to the | |
8559 | * data (compression, encryption) we will also need the | |
8560 | * extra buffer. | |
8561 | */ | |
8562 | if (HDR_HAS_RABD(hdr) && psize == asize) { | |
8563 | to_write = hdr->b_crypt_hdr.b_rabd; | |
8564 | } else if ((HDR_COMPRESSION_ENABLED(hdr) || | |
8565 | HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) && | |
8566 | !HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) && | |
8567 | psize == asize) { | |
8568 | to_write = hdr->b_l1hdr.b_pabd; | |
8569 | } else { | |
8570 | int ret; | |
8571 | arc_buf_contents_t type = arc_buf_type(hdr); | |
8572 | ||
8573 | ret = l2arc_apply_transforms(spa, hdr, asize, | |
8574 | &to_write); | |
8575 | if (ret != 0) { | |
8576 | arc_hdr_clear_flags(hdr, | |
8577 | ARC_FLAG_L2_WRITING); | |
8578 | mutex_exit(hash_lock); | |
8579 | continue; | |
8580 | } | |
8581 | ||
8582 | l2arc_free_abd_on_write(to_write, asize, type); | |
8583 | } | |
8584 | ||
34dc7c2f BB |
8585 | if (pio == NULL) { |
8586 | /* | |
8587 | * Insert a dummy header on the buflist so | |
8588 | * l2arc_write_done() can find where the | |
8589 | * write buffers begin without searching. | |
8590 | */ | |
ca0bf58d | 8591 | mutex_enter(&dev->l2ad_mtx); |
b9541d6b | 8592 | list_insert_head(&dev->l2ad_buflist, head); |
ca0bf58d | 8593 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f | 8594 | |
96c080cb BB |
8595 | cb = kmem_alloc( |
8596 | sizeof (l2arc_write_callback_t), KM_SLEEP); | |
34dc7c2f BB |
8597 | cb->l2wcb_dev = dev; |
8598 | cb->l2wcb_head = head; | |
8599 | pio = zio_root(spa, l2arc_write_done, cb, | |
8600 | ZIO_FLAG_CANFAIL); | |
8601 | } | |
8602 | ||
b9541d6b | 8603 | hdr->b_l2hdr.b_dev = dev; |
b9541d6b | 8604 | hdr->b_l2hdr.b_hits = 0; |
3a17a7a9 | 8605 | |
d3c2ae1c | 8606 | hdr->b_l2hdr.b_daddr = dev->l2ad_hand; |
b5256303 | 8607 | arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR); |
3a17a7a9 | 8608 | |
ca0bf58d | 8609 | mutex_enter(&dev->l2ad_mtx); |
b9541d6b | 8610 | list_insert_head(&dev->l2ad_buflist, hdr); |
ca0bf58d | 8611 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f | 8612 | |
b5256303 TC |
8613 | (void) refcount_add_many(&dev->l2ad_alloc, |
8614 | arc_hdr_size(hdr), hdr); | |
3a17a7a9 | 8615 | |
34dc7c2f | 8616 | wzio = zio_write_phys(pio, dev->l2ad_vdev, |
82710e99 | 8617 | hdr->b_l2hdr.b_daddr, asize, to_write, |
d3c2ae1c GW |
8618 | ZIO_CHECKSUM_OFF, NULL, hdr, |
8619 | ZIO_PRIORITY_ASYNC_WRITE, | |
34dc7c2f BB |
8620 | ZIO_FLAG_CANFAIL, B_FALSE); |
8621 | ||
01850391 | 8622 | write_lsize += HDR_GET_LSIZE(hdr); |
34dc7c2f BB |
8623 | DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, |
8624 | zio_t *, wzio); | |
d962d5da | 8625 | |
01850391 AG |
8626 | write_psize += psize; |
8627 | write_asize += asize; | |
d3c2ae1c GW |
8628 | dev->l2ad_hand += asize; |
8629 | ||
8630 | mutex_exit(hash_lock); | |
8631 | ||
8632 | (void) zio_nowait(wzio); | |
34dc7c2f | 8633 | } |
d3c2ae1c GW |
8634 | |
8635 | multilist_sublist_unlock(mls); | |
8636 | ||
8637 | if (full == B_TRUE) | |
8638 | break; | |
34dc7c2f | 8639 | } |
34dc7c2f | 8640 | |
d3c2ae1c GW |
8641 | /* No buffers selected for writing? */ |
8642 | if (pio == NULL) { | |
01850391 | 8643 | ASSERT0(write_lsize); |
d3c2ae1c GW |
8644 | ASSERT(!HDR_HAS_L1HDR(head)); |
8645 | kmem_cache_free(hdr_l2only_cache, head); | |
8646 | return (0); | |
8647 | } | |
34dc7c2f | 8648 | |
3a17a7a9 | 8649 | ASSERT3U(write_asize, <=, target_sz); |
34dc7c2f | 8650 | ARCSTAT_BUMP(arcstat_l2_writes_sent); |
01850391 AG |
8651 | ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize); |
8652 | ARCSTAT_INCR(arcstat_l2_lsize, write_lsize); | |
8653 | ARCSTAT_INCR(arcstat_l2_psize, write_psize); | |
8654 | vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0); | |
34dc7c2f BB |
8655 | |
8656 | /* | |
8657 | * Bump device hand to the device start if it is approaching the end. | |
8658 | * l2arc_evict() will already have evicted ahead for this case. | |
8659 | */ | |
b128c09f | 8660 | if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { |
34dc7c2f | 8661 | dev->l2ad_hand = dev->l2ad_start; |
34dc7c2f BB |
8662 | dev->l2ad_first = B_FALSE; |
8663 | } | |
8664 | ||
d164b209 | 8665 | dev->l2ad_writing = B_TRUE; |
34dc7c2f | 8666 | (void) zio_wait(pio); |
d164b209 BB |
8667 | dev->l2ad_writing = B_FALSE; |
8668 | ||
3a17a7a9 SK |
8669 | return (write_asize); |
8670 | } | |
8671 | ||
34dc7c2f BB |
8672 | /* |
8673 | * This thread feeds the L2ARC at regular intervals. This is the beating | |
8674 | * heart of the L2ARC. | |
8675 | */ | |
8676 | static void | |
c25b8f99 | 8677 | l2arc_feed_thread(void *unused) |
34dc7c2f BB |
8678 | { |
8679 | callb_cpr_t cpr; | |
8680 | l2arc_dev_t *dev; | |
8681 | spa_t *spa; | |
d164b209 | 8682 | uint64_t size, wrote; |
428870ff | 8683 | clock_t begin, next = ddi_get_lbolt(); |
40d06e3c | 8684 | fstrans_cookie_t cookie; |
34dc7c2f BB |
8685 | |
8686 | CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); | |
8687 | ||
8688 | mutex_enter(&l2arc_feed_thr_lock); | |
8689 | ||
40d06e3c | 8690 | cookie = spl_fstrans_mark(); |
34dc7c2f | 8691 | while (l2arc_thread_exit == 0) { |
34dc7c2f | 8692 | CALLB_CPR_SAFE_BEGIN(&cpr); |
b64ccd6c | 8693 | (void) cv_timedwait_sig(&l2arc_feed_thr_cv, |
5b63b3eb | 8694 | &l2arc_feed_thr_lock, next); |
34dc7c2f | 8695 | CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); |
428870ff | 8696 | next = ddi_get_lbolt() + hz; |
34dc7c2f BB |
8697 | |
8698 | /* | |
b128c09f | 8699 | * Quick check for L2ARC devices. |
34dc7c2f BB |
8700 | */ |
8701 | mutex_enter(&l2arc_dev_mtx); | |
8702 | if (l2arc_ndev == 0) { | |
8703 | mutex_exit(&l2arc_dev_mtx); | |
8704 | continue; | |
8705 | } | |
b128c09f | 8706 | mutex_exit(&l2arc_dev_mtx); |
428870ff | 8707 | begin = ddi_get_lbolt(); |
34dc7c2f BB |
8708 | |
8709 | /* | |
b128c09f BB |
8710 | * This selects the next l2arc device to write to, and in |
8711 | * doing so the next spa to feed from: dev->l2ad_spa. This | |
8712 | * will return NULL if there are now no l2arc devices or if | |
8713 | * they are all faulted. | |
8714 | * | |
8715 | * If a device is returned, its spa's config lock is also | |
8716 | * held to prevent device removal. l2arc_dev_get_next() | |
8717 | * will grab and release l2arc_dev_mtx. | |
34dc7c2f | 8718 | */ |
b128c09f | 8719 | if ((dev = l2arc_dev_get_next()) == NULL) |
34dc7c2f | 8720 | continue; |
b128c09f BB |
8721 | |
8722 | spa = dev->l2ad_spa; | |
d3c2ae1c | 8723 | ASSERT3P(spa, !=, NULL); |
34dc7c2f | 8724 | |
572e2857 BB |
8725 | /* |
8726 | * If the pool is read-only then force the feed thread to | |
8727 | * sleep a little longer. | |
8728 | */ | |
8729 | if (!spa_writeable(spa)) { | |
8730 | next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; | |
8731 | spa_config_exit(spa, SCL_L2ARC, dev); | |
8732 | continue; | |
8733 | } | |
8734 | ||
34dc7c2f | 8735 | /* |
b128c09f | 8736 | * Avoid contributing to memory pressure. |
34dc7c2f | 8737 | */ |
ca67b33a | 8738 | if (arc_reclaim_needed()) { |
b128c09f BB |
8739 | ARCSTAT_BUMP(arcstat_l2_abort_lowmem); |
8740 | spa_config_exit(spa, SCL_L2ARC, dev); | |
34dc7c2f BB |
8741 | continue; |
8742 | } | |
b128c09f | 8743 | |
34dc7c2f BB |
8744 | ARCSTAT_BUMP(arcstat_l2_feeds); |
8745 | ||
3a17a7a9 | 8746 | size = l2arc_write_size(); |
b128c09f | 8747 | |
34dc7c2f BB |
8748 | /* |
8749 | * Evict L2ARC buffers that will be overwritten. | |
8750 | */ | |
b128c09f | 8751 | l2arc_evict(dev, size, B_FALSE); |
34dc7c2f BB |
8752 | |
8753 | /* | |
8754 | * Write ARC buffers. | |
8755 | */ | |
d3c2ae1c | 8756 | wrote = l2arc_write_buffers(spa, dev, size); |
d164b209 BB |
8757 | |
8758 | /* | |
8759 | * Calculate interval between writes. | |
8760 | */ | |
8761 | next = l2arc_write_interval(begin, size, wrote); | |
b128c09f | 8762 | spa_config_exit(spa, SCL_L2ARC, dev); |
34dc7c2f | 8763 | } |
40d06e3c | 8764 | spl_fstrans_unmark(cookie); |
34dc7c2f BB |
8765 | |
8766 | l2arc_thread_exit = 0; | |
8767 | cv_broadcast(&l2arc_feed_thr_cv); | |
8768 | CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ | |
8769 | thread_exit(); | |
8770 | } | |
8771 | ||
b128c09f BB |
8772 | boolean_t |
8773 | l2arc_vdev_present(vdev_t *vd) | |
8774 | { | |
8775 | l2arc_dev_t *dev; | |
8776 | ||
8777 | mutex_enter(&l2arc_dev_mtx); | |
8778 | for (dev = list_head(l2arc_dev_list); dev != NULL; | |
8779 | dev = list_next(l2arc_dev_list, dev)) { | |
8780 | if (dev->l2ad_vdev == vd) | |
8781 | break; | |
8782 | } | |
8783 | mutex_exit(&l2arc_dev_mtx); | |
8784 | ||
8785 | return (dev != NULL); | |
8786 | } | |
8787 | ||
34dc7c2f BB |
8788 | /* |
8789 | * Add a vdev for use by the L2ARC. By this point the spa has already | |
8790 | * validated the vdev and opened it. | |
8791 | */ | |
8792 | void | |
9babb374 | 8793 | l2arc_add_vdev(spa_t *spa, vdev_t *vd) |
34dc7c2f BB |
8794 | { |
8795 | l2arc_dev_t *adddev; | |
8796 | ||
b128c09f BB |
8797 | ASSERT(!l2arc_vdev_present(vd)); |
8798 | ||
34dc7c2f BB |
8799 | /* |
8800 | * Create a new l2arc device entry. | |
8801 | */ | |
8802 | adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); | |
8803 | adddev->l2ad_spa = spa; | |
8804 | adddev->l2ad_vdev = vd; | |
9babb374 BB |
8805 | adddev->l2ad_start = VDEV_LABEL_START_SIZE; |
8806 | adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); | |
34dc7c2f | 8807 | adddev->l2ad_hand = adddev->l2ad_start; |
34dc7c2f | 8808 | adddev->l2ad_first = B_TRUE; |
d164b209 | 8809 | adddev->l2ad_writing = B_FALSE; |
98f72a53 | 8810 | list_link_init(&adddev->l2ad_node); |
34dc7c2f | 8811 | |
b9541d6b | 8812 | mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
8813 | /* |
8814 | * This is a list of all ARC buffers that are still valid on the | |
8815 | * device. | |
8816 | */ | |
b9541d6b CW |
8817 | list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), |
8818 | offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); | |
34dc7c2f | 8819 | |
428870ff | 8820 | vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); |
d962d5da | 8821 | refcount_create(&adddev->l2ad_alloc); |
34dc7c2f BB |
8822 | |
8823 | /* | |
8824 | * Add device to global list | |
8825 | */ | |
8826 | mutex_enter(&l2arc_dev_mtx); | |
8827 | list_insert_head(l2arc_dev_list, adddev); | |
8828 | atomic_inc_64(&l2arc_ndev); | |
8829 | mutex_exit(&l2arc_dev_mtx); | |
8830 | } | |
8831 | ||
8832 | /* | |
8833 | * Remove a vdev from the L2ARC. | |
8834 | */ | |
8835 | void | |
8836 | l2arc_remove_vdev(vdev_t *vd) | |
8837 | { | |
8838 | l2arc_dev_t *dev, *nextdev, *remdev = NULL; | |
8839 | ||
34dc7c2f BB |
8840 | /* |
8841 | * Find the device by vdev | |
8842 | */ | |
8843 | mutex_enter(&l2arc_dev_mtx); | |
8844 | for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { | |
8845 | nextdev = list_next(l2arc_dev_list, dev); | |
8846 | if (vd == dev->l2ad_vdev) { | |
8847 | remdev = dev; | |
8848 | break; | |
8849 | } | |
8850 | } | |
d3c2ae1c | 8851 | ASSERT3P(remdev, !=, NULL); |
34dc7c2f BB |
8852 | |
8853 | /* | |
8854 | * Remove device from global list | |
8855 | */ | |
8856 | list_remove(l2arc_dev_list, remdev); | |
8857 | l2arc_dev_last = NULL; /* may have been invalidated */ | |
b128c09f BB |
8858 | atomic_dec_64(&l2arc_ndev); |
8859 | mutex_exit(&l2arc_dev_mtx); | |
34dc7c2f BB |
8860 | |
8861 | /* | |
8862 | * Clear all buflists and ARC references. L2ARC device flush. | |
8863 | */ | |
8864 | l2arc_evict(remdev, 0, B_TRUE); | |
b9541d6b CW |
8865 | list_destroy(&remdev->l2ad_buflist); |
8866 | mutex_destroy(&remdev->l2ad_mtx); | |
d962d5da | 8867 | refcount_destroy(&remdev->l2ad_alloc); |
34dc7c2f | 8868 | kmem_free(remdev, sizeof (l2arc_dev_t)); |
34dc7c2f BB |
8869 | } |
8870 | ||
8871 | void | |
b128c09f | 8872 | l2arc_init(void) |
34dc7c2f BB |
8873 | { |
8874 | l2arc_thread_exit = 0; | |
8875 | l2arc_ndev = 0; | |
8876 | l2arc_writes_sent = 0; | |
8877 | l2arc_writes_done = 0; | |
8878 | ||
8879 | mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
8880 | cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); | |
8881 | mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); | |
34dc7c2f BB |
8882 | mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); |
8883 | ||
8884 | l2arc_dev_list = &L2ARC_dev_list; | |
8885 | l2arc_free_on_write = &L2ARC_free_on_write; | |
8886 | list_create(l2arc_dev_list, sizeof (l2arc_dev_t), | |
8887 | offsetof(l2arc_dev_t, l2ad_node)); | |
8888 | list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), | |
8889 | offsetof(l2arc_data_free_t, l2df_list_node)); | |
34dc7c2f BB |
8890 | } |
8891 | ||
8892 | void | |
b128c09f | 8893 | l2arc_fini(void) |
34dc7c2f | 8894 | { |
b128c09f BB |
8895 | /* |
8896 | * This is called from dmu_fini(), which is called from spa_fini(); | |
8897 | * Because of this, we can assume that all l2arc devices have | |
8898 | * already been removed when the pools themselves were removed. | |
8899 | */ | |
8900 | ||
8901 | l2arc_do_free_on_write(); | |
34dc7c2f BB |
8902 | |
8903 | mutex_destroy(&l2arc_feed_thr_lock); | |
8904 | cv_destroy(&l2arc_feed_thr_cv); | |
8905 | mutex_destroy(&l2arc_dev_mtx); | |
34dc7c2f BB |
8906 | mutex_destroy(&l2arc_free_on_write_mtx); |
8907 | ||
8908 | list_destroy(l2arc_dev_list); | |
8909 | list_destroy(l2arc_free_on_write); | |
8910 | } | |
b128c09f BB |
8911 | |
8912 | void | |
8913 | l2arc_start(void) | |
8914 | { | |
fb5f0bc8 | 8915 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
8916 | return; |
8917 | ||
8918 | (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, | |
1229323d | 8919 | TS_RUN, defclsyspri); |
b128c09f BB |
8920 | } |
8921 | ||
8922 | void | |
8923 | l2arc_stop(void) | |
8924 | { | |
fb5f0bc8 | 8925 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
8926 | return; |
8927 | ||
8928 | mutex_enter(&l2arc_feed_thr_lock); | |
8929 | cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ | |
8930 | l2arc_thread_exit = 1; | |
8931 | while (l2arc_thread_exit != 0) | |
8932 | cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); | |
8933 | mutex_exit(&l2arc_feed_thr_lock); | |
8934 | } | |
c28b2279 BB |
8935 | |
8936 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
0f699108 AZ |
8937 | EXPORT_SYMBOL(arc_buf_size); |
8938 | EXPORT_SYMBOL(arc_write); | |
c28b2279 | 8939 | EXPORT_SYMBOL(arc_read); |
e0b0ca98 | 8940 | EXPORT_SYMBOL(arc_buf_info); |
c28b2279 | 8941 | EXPORT_SYMBOL(arc_getbuf_func); |
ab26409d BB |
8942 | EXPORT_SYMBOL(arc_add_prune_callback); |
8943 | EXPORT_SYMBOL(arc_remove_prune_callback); | |
c28b2279 | 8944 | |
02730c33 | 8945 | /* BEGIN CSTYLED */ |
bce45ec9 | 8946 | module_param(zfs_arc_min, ulong, 0644); |
c409e464 | 8947 | MODULE_PARM_DESC(zfs_arc_min, "Min arc size"); |
c28b2279 | 8948 | |
bce45ec9 | 8949 | module_param(zfs_arc_max, ulong, 0644); |
c409e464 | 8950 | MODULE_PARM_DESC(zfs_arc_max, "Max arc size"); |
c28b2279 | 8951 | |
bce45ec9 | 8952 | module_param(zfs_arc_meta_limit, ulong, 0644); |
c28b2279 | 8953 | MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size"); |
6a8f9b6b | 8954 | |
9907cc1c G |
8955 | module_param(zfs_arc_meta_limit_percent, ulong, 0644); |
8956 | MODULE_PARM_DESC(zfs_arc_meta_limit_percent, | |
8957 | "Percent of arc size for arc meta limit"); | |
8958 | ||
ca0bf58d PS |
8959 | module_param(zfs_arc_meta_min, ulong, 0644); |
8960 | MODULE_PARM_DESC(zfs_arc_meta_min, "Min arc metadata"); | |
8961 | ||
bce45ec9 | 8962 | module_param(zfs_arc_meta_prune, int, 0644); |
2cbb06b5 | 8963 | MODULE_PARM_DESC(zfs_arc_meta_prune, "Meta objects to scan for prune"); |
c409e464 | 8964 | |
ca67b33a | 8965 | module_param(zfs_arc_meta_adjust_restarts, int, 0644); |
bc888666 BB |
8966 | MODULE_PARM_DESC(zfs_arc_meta_adjust_restarts, |
8967 | "Limit number of restarts in arc_adjust_meta"); | |
8968 | ||
f6046738 BB |
8969 | module_param(zfs_arc_meta_strategy, int, 0644); |
8970 | MODULE_PARM_DESC(zfs_arc_meta_strategy, "Meta reclaim strategy"); | |
8971 | ||
bce45ec9 | 8972 | module_param(zfs_arc_grow_retry, int, 0644); |
c409e464 BB |
8973 | MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size"); |
8974 | ||
89c8cac4 PS |
8975 | module_param(zfs_arc_p_aggressive_disable, int, 0644); |
8976 | MODULE_PARM_DESC(zfs_arc_p_aggressive_disable, "disable aggressive arc_p grow"); | |
8977 | ||
62422785 PS |
8978 | module_param(zfs_arc_p_dampener_disable, int, 0644); |
8979 | MODULE_PARM_DESC(zfs_arc_p_dampener_disable, "disable arc_p adapt dampener"); | |
8980 | ||
bce45ec9 | 8981 | module_param(zfs_arc_shrink_shift, int, 0644); |
c409e464 BB |
8982 | MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)"); |
8983 | ||
03b60eee DB |
8984 | module_param(zfs_arc_pc_percent, uint, 0644); |
8985 | MODULE_PARM_DESC(zfs_arc_pc_percent, | |
8986 | "Percent of pagecache to reclaim arc to"); | |
8987 | ||
728d6ae9 BB |
8988 | module_param(zfs_arc_p_min_shift, int, 0644); |
8989 | MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p"); | |
8990 | ||
49ddb315 MA |
8991 | module_param(zfs_arc_average_blocksize, int, 0444); |
8992 | MODULE_PARM_DESC(zfs_arc_average_blocksize, "Target average block size"); | |
8993 | ||
d3c2ae1c | 8994 | module_param(zfs_compressed_arc_enabled, int, 0644); |
544596c5 | 8995 | MODULE_PARM_DESC(zfs_compressed_arc_enabled, "Disable compressed arc buffers"); |
d3c2ae1c | 8996 | |
bce45ec9 BB |
8997 | module_param(zfs_arc_min_prefetch_lifespan, int, 0644); |
8998 | MODULE_PARM_DESC(zfs_arc_min_prefetch_lifespan, "Min life of prefetch block"); | |
8999 | ||
9000 | module_param(l2arc_write_max, ulong, 0644); | |
abd8610c BB |
9001 | MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval"); |
9002 | ||
bce45ec9 | 9003 | module_param(l2arc_write_boost, ulong, 0644); |
abd8610c BB |
9004 | MODULE_PARM_DESC(l2arc_write_boost, "Extra write bytes during device warmup"); |
9005 | ||
bce45ec9 | 9006 | module_param(l2arc_headroom, ulong, 0644); |
abd8610c BB |
9007 | MODULE_PARM_DESC(l2arc_headroom, "Number of max device writes to precache"); |
9008 | ||
3a17a7a9 SK |
9009 | module_param(l2arc_headroom_boost, ulong, 0644); |
9010 | MODULE_PARM_DESC(l2arc_headroom_boost, "Compressed l2arc_headroom multiplier"); | |
9011 | ||
bce45ec9 | 9012 | module_param(l2arc_feed_secs, ulong, 0644); |
abd8610c BB |
9013 | MODULE_PARM_DESC(l2arc_feed_secs, "Seconds between L2ARC writing"); |
9014 | ||
bce45ec9 | 9015 | module_param(l2arc_feed_min_ms, ulong, 0644); |
abd8610c BB |
9016 | MODULE_PARM_DESC(l2arc_feed_min_ms, "Min feed interval in milliseconds"); |
9017 | ||
bce45ec9 | 9018 | module_param(l2arc_noprefetch, int, 0644); |
abd8610c BB |
9019 | MODULE_PARM_DESC(l2arc_noprefetch, "Skip caching prefetched buffers"); |
9020 | ||
bce45ec9 | 9021 | module_param(l2arc_feed_again, int, 0644); |
abd8610c BB |
9022 | MODULE_PARM_DESC(l2arc_feed_again, "Turbo L2ARC warmup"); |
9023 | ||
bce45ec9 | 9024 | module_param(l2arc_norw, int, 0644); |
abd8610c BB |
9025 | MODULE_PARM_DESC(l2arc_norw, "No reads during writes"); |
9026 | ||
7e8bddd0 BB |
9027 | module_param(zfs_arc_lotsfree_percent, int, 0644); |
9028 | MODULE_PARM_DESC(zfs_arc_lotsfree_percent, | |
9029 | "System free memory I/O throttle in bytes"); | |
9030 | ||
11f552fa BB |
9031 | module_param(zfs_arc_sys_free, ulong, 0644); |
9032 | MODULE_PARM_DESC(zfs_arc_sys_free, "System free memory target size in bytes"); | |
9033 | ||
25458cbe TC |
9034 | module_param(zfs_arc_dnode_limit, ulong, 0644); |
9035 | MODULE_PARM_DESC(zfs_arc_dnode_limit, "Minimum bytes of dnodes in arc"); | |
9036 | ||
9907cc1c G |
9037 | module_param(zfs_arc_dnode_limit_percent, ulong, 0644); |
9038 | MODULE_PARM_DESC(zfs_arc_dnode_limit_percent, | |
9039 | "Percent of ARC meta buffers for dnodes"); | |
9040 | ||
25458cbe TC |
9041 | module_param(zfs_arc_dnode_reduce_percent, ulong, 0644); |
9042 | MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent, | |
9043 | "Percentage of excess dnodes to try to unpin"); | |
02730c33 | 9044 | /* END CSTYLED */ |
c28b2279 | 9045 | #endif |