]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
428870ff | 22 | * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. |
36da08ef PS |
23 | * Copyright (c) 2012, Joyent, Inc. All rights reserved. |
24 | * Copyright (c) 2011, 2015 by Delphix. All rights reserved. | |
25 | * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. | |
3bec585e | 26 | * Copyright 2014 Nexenta Systems, Inc. All rights reserved. |
34dc7c2f BB |
27 | */ |
28 | ||
34dc7c2f BB |
29 | /* |
30 | * DVA-based Adjustable Replacement Cache | |
31 | * | |
32 | * While much of the theory of operation used here is | |
33 | * based on the self-tuning, low overhead replacement cache | |
34 | * presented by Megiddo and Modha at FAST 2003, there are some | |
35 | * significant differences: | |
36 | * | |
37 | * 1. The Megiddo and Modha model assumes any page is evictable. | |
38 | * Pages in its cache cannot be "locked" into memory. This makes | |
39 | * the eviction algorithm simple: evict the last page in the list. | |
40 | * This also make the performance characteristics easy to reason | |
41 | * about. Our cache is not so simple. At any given moment, some | |
42 | * subset of the blocks in the cache are un-evictable because we | |
43 | * have handed out a reference to them. Blocks are only evictable | |
44 | * when there are no external references active. This makes | |
45 | * eviction far more problematic: we choose to evict the evictable | |
46 | * blocks that are the "lowest" in the list. | |
47 | * | |
48 | * There are times when it is not possible to evict the requested | |
49 | * space. In these circumstances we are unable to adjust the cache | |
50 | * size. To prevent the cache growing unbounded at these times we | |
51 | * implement a "cache throttle" that slows the flow of new data | |
52 | * into the cache until we can make space available. | |
53 | * | |
54 | * 2. The Megiddo and Modha model assumes a fixed cache size. | |
55 | * Pages are evicted when the cache is full and there is a cache | |
56 | * miss. Our model has a variable sized cache. It grows with | |
57 | * high use, but also tries to react to memory pressure from the | |
58 | * operating system: decreasing its size when system memory is | |
59 | * tight. | |
60 | * | |
61 | * 3. The Megiddo and Modha model assumes a fixed page size. All | |
d3cc8b15 | 62 | * elements of the cache are therefore exactly the same size. So |
34dc7c2f BB |
63 | * when adjusting the cache size following a cache miss, its simply |
64 | * a matter of choosing a single page to evict. In our model, we | |
65 | * have variable sized cache blocks (rangeing from 512 bytes to | |
d3cc8b15 | 66 | * 128K bytes). We therefore choose a set of blocks to evict to make |
34dc7c2f BB |
67 | * space for a cache miss that approximates as closely as possible |
68 | * the space used by the new block. | |
69 | * | |
70 | * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" | |
71 | * by N. Megiddo & D. Modha, FAST 2003 | |
72 | */ | |
73 | ||
74 | /* | |
75 | * The locking model: | |
76 | * | |
77 | * A new reference to a cache buffer can be obtained in two | |
78 | * ways: 1) via a hash table lookup using the DVA as a key, | |
79 | * or 2) via one of the ARC lists. The arc_read() interface | |
80 | * uses method 1, while the internal arc algorithms for | |
d3cc8b15 | 81 | * adjusting the cache use method 2. We therefore provide two |
34dc7c2f BB |
82 | * types of locks: 1) the hash table lock array, and 2) the |
83 | * arc list locks. | |
84 | * | |
5c839890 BC |
85 | * Buffers do not have their own mutexes, rather they rely on the |
86 | * hash table mutexes for the bulk of their protection (i.e. most | |
87 | * fields in the arc_buf_hdr_t are protected by these mutexes). | |
34dc7c2f BB |
88 | * |
89 | * buf_hash_find() returns the appropriate mutex (held) when it | |
90 | * locates the requested buffer in the hash table. It returns | |
91 | * NULL for the mutex if the buffer was not in the table. | |
92 | * | |
93 | * buf_hash_remove() expects the appropriate hash mutex to be | |
94 | * already held before it is invoked. | |
95 | * | |
96 | * Each arc state also has a mutex which is used to protect the | |
97 | * buffer list associated with the state. When attempting to | |
98 | * obtain a hash table lock while holding an arc list lock you | |
99 | * must use: mutex_tryenter() to avoid deadlock. Also note that | |
100 | * the active state mutex must be held before the ghost state mutex. | |
101 | * | |
102 | * Arc buffers may have an associated eviction callback function. | |
103 | * This function will be invoked prior to removing the buffer (e.g. | |
104 | * in arc_do_user_evicts()). Note however that the data associated | |
105 | * with the buffer may be evicted prior to the callback. The callback | |
106 | * must be made with *no locks held* (to prevent deadlock). Additionally, | |
107 | * the users of callbacks must ensure that their private data is | |
bd089c54 | 108 | * protected from simultaneous callbacks from arc_clear_callback() |
34dc7c2f BB |
109 | * and arc_do_user_evicts(). |
110 | * | |
ab26409d BB |
111 | * It as also possible to register a callback which is run when the |
112 | * arc_meta_limit is reached and no buffers can be safely evicted. In | |
113 | * this case the arc user should drop a reference on some arc buffers so | |
114 | * they can be reclaimed and the arc_meta_limit honored. For example, | |
115 | * when using the ZPL each dentry holds a references on a znode. These | |
116 | * dentries must be pruned before the arc buffer holding the znode can | |
117 | * be safely evicted. | |
118 | * | |
34dc7c2f BB |
119 | * Note that the majority of the performance stats are manipulated |
120 | * with atomic operations. | |
121 | * | |
b9541d6b | 122 | * The L2ARC uses the l2ad_mtx on each vdev for the following: |
34dc7c2f BB |
123 | * |
124 | * - L2ARC buflist creation | |
125 | * - L2ARC buflist eviction | |
126 | * - L2ARC write completion, which walks L2ARC buflists | |
127 | * - ARC header destruction, as it removes from L2ARC buflists | |
128 | * - ARC header release, as it removes from L2ARC buflists | |
129 | */ | |
130 | ||
131 | #include <sys/spa.h> | |
132 | #include <sys/zio.h> | |
3a17a7a9 | 133 | #include <sys/zio_compress.h> |
34dc7c2f BB |
134 | #include <sys/zfs_context.h> |
135 | #include <sys/arc.h> | |
36da08ef | 136 | #include <sys/refcount.h> |
b128c09f | 137 | #include <sys/vdev.h> |
9babb374 | 138 | #include <sys/vdev_impl.h> |
e8b96c60 | 139 | #include <sys/dsl_pool.h> |
ca0bf58d | 140 | #include <sys/multilist.h> |
34dc7c2f BB |
141 | #ifdef _KERNEL |
142 | #include <sys/vmsystm.h> | |
143 | #include <vm/anon.h> | |
144 | #include <sys/fs/swapnode.h> | |
ab26409d | 145 | #include <sys/zpl.h> |
aaed7c40 | 146 | #include <linux/mm_compat.h> |
34dc7c2f BB |
147 | #endif |
148 | #include <sys/callb.h> | |
149 | #include <sys/kstat.h> | |
570827e1 | 150 | #include <sys/dmu_tx.h> |
428870ff | 151 | #include <zfs_fletcher.h> |
59ec819a | 152 | #include <sys/arc_impl.h> |
49ee64e5 | 153 | #include <sys/trace_arc.h> |
34dc7c2f | 154 | |
498877ba MA |
155 | #ifndef _KERNEL |
156 | /* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ | |
157 | boolean_t arc_watch = B_FALSE; | |
158 | #endif | |
159 | ||
ca0bf58d PS |
160 | static kmutex_t arc_reclaim_lock; |
161 | static kcondvar_t arc_reclaim_thread_cv; | |
162 | static boolean_t arc_reclaim_thread_exit; | |
163 | static kcondvar_t arc_reclaim_waiters_cv; | |
164 | ||
165 | static kmutex_t arc_user_evicts_lock; | |
166 | static kcondvar_t arc_user_evicts_cv; | |
167 | static boolean_t arc_user_evicts_thread_exit; | |
34dc7c2f | 168 | |
e8b96c60 | 169 | /* |
ca0bf58d PS |
170 | * The number of headers to evict in arc_evict_state_impl() before |
171 | * dropping the sublist lock and evicting from another sublist. A lower | |
172 | * value means we're more likely to evict the "correct" header (i.e. the | |
173 | * oldest header in the arc state), but comes with higher overhead | |
174 | * (i.e. more invocations of arc_evict_state_impl()). | |
175 | */ | |
176 | int zfs_arc_evict_batch_limit = 10; | |
177 | ||
178 | /* | |
179 | * The number of sublists used for each of the arc state lists. If this | |
180 | * is not set to a suitable value by the user, it will be configured to | |
181 | * the number of CPUs on the system in arc_init(). | |
e8b96c60 | 182 | */ |
ca0bf58d | 183 | int zfs_arc_num_sublists_per_state = 0; |
e8b96c60 | 184 | |
34dc7c2f | 185 | /* number of seconds before growing cache again */ |
ca67b33a | 186 | static int arc_grow_retry = 5; |
34dc7c2f | 187 | |
ca0bf58d | 188 | /* shift of arc_c for calculating overflow limit in arc_get_data_buf */ |
ca67b33a | 189 | int zfs_arc_overflow_shift = 8; |
62422785 | 190 | |
728d6ae9 BB |
191 | /* shift of arc_c for calculating both min and max arc_p */ |
192 | static int arc_p_min_shift = 4; | |
193 | ||
d164b209 | 194 | /* log2(fraction of arc to reclaim) */ |
ca67b33a | 195 | static int arc_shrink_shift = 7; |
d164b209 | 196 | |
34dc7c2f | 197 | /* |
ca67b33a MA |
198 | * log2(fraction of ARC which must be free to allow growing). |
199 | * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, | |
200 | * when reading a new block into the ARC, we will evict an equal-sized block | |
201 | * from the ARC. | |
202 | * | |
203 | * This must be less than arc_shrink_shift, so that when we shrink the ARC, | |
204 | * we will still not allow it to grow. | |
34dc7c2f | 205 | */ |
ca67b33a | 206 | int arc_no_grow_shift = 5; |
bce45ec9 | 207 | |
49ddb315 | 208 | |
ca0bf58d PS |
209 | /* |
210 | * minimum lifespan of a prefetch block in clock ticks | |
211 | * (initialized in arc_init()) | |
212 | */ | |
ca67b33a | 213 | static int arc_min_prefetch_lifespan; |
ca0bf58d | 214 | |
e8b96c60 MA |
215 | /* |
216 | * If this percent of memory is free, don't throttle. | |
217 | */ | |
218 | int arc_lotsfree_percent = 10; | |
219 | ||
34dc7c2f BB |
220 | static int arc_dead; |
221 | ||
b128c09f BB |
222 | /* |
223 | * The arc has filled available memory and has now warmed up. | |
224 | */ | |
225 | static boolean_t arc_warm; | |
226 | ||
34dc7c2f BB |
227 | /* |
228 | * These tunables are for performance analysis. | |
229 | */ | |
c28b2279 BB |
230 | unsigned long zfs_arc_max = 0; |
231 | unsigned long zfs_arc_min = 0; | |
232 | unsigned long zfs_arc_meta_limit = 0; | |
ca0bf58d | 233 | unsigned long zfs_arc_meta_min = 0; |
ca67b33a MA |
234 | int zfs_arc_grow_retry = 0; |
235 | int zfs_arc_shrink_shift = 0; | |
728d6ae9 | 236 | int zfs_arc_p_min_shift = 0; |
ca67b33a MA |
237 | int zfs_disable_dup_eviction = 0; |
238 | int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ | |
34dc7c2f | 239 | |
bc888666 | 240 | /* |
ca67b33a | 241 | * These tunables are Linux specific |
bc888666 | 242 | */ |
11f552fa | 243 | unsigned long zfs_arc_sys_free = 0; |
ca67b33a MA |
244 | int zfs_arc_min_prefetch_lifespan = 0; |
245 | int zfs_arc_p_aggressive_disable = 1; | |
246 | int zfs_arc_p_dampener_disable = 1; | |
247 | int zfs_arc_meta_prune = 10000; | |
248 | int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED; | |
249 | int zfs_arc_meta_adjust_restarts = 4096; | |
7e8bddd0 | 250 | int zfs_arc_lotsfree_percent = 10; |
bc888666 | 251 | |
34dc7c2f BB |
252 | /* The 6 states: */ |
253 | static arc_state_t ARC_anon; | |
254 | static arc_state_t ARC_mru; | |
255 | static arc_state_t ARC_mru_ghost; | |
256 | static arc_state_t ARC_mfu; | |
257 | static arc_state_t ARC_mfu_ghost; | |
258 | static arc_state_t ARC_l2c_only; | |
259 | ||
260 | typedef struct arc_stats { | |
261 | kstat_named_t arcstat_hits; | |
262 | kstat_named_t arcstat_misses; | |
263 | kstat_named_t arcstat_demand_data_hits; | |
264 | kstat_named_t arcstat_demand_data_misses; | |
265 | kstat_named_t arcstat_demand_metadata_hits; | |
266 | kstat_named_t arcstat_demand_metadata_misses; | |
267 | kstat_named_t arcstat_prefetch_data_hits; | |
268 | kstat_named_t arcstat_prefetch_data_misses; | |
269 | kstat_named_t arcstat_prefetch_metadata_hits; | |
270 | kstat_named_t arcstat_prefetch_metadata_misses; | |
271 | kstat_named_t arcstat_mru_hits; | |
272 | kstat_named_t arcstat_mru_ghost_hits; | |
273 | kstat_named_t arcstat_mfu_hits; | |
274 | kstat_named_t arcstat_mfu_ghost_hits; | |
275 | kstat_named_t arcstat_deleted; | |
e49f1e20 WA |
276 | /* |
277 | * Number of buffers that could not be evicted because the hash lock | |
278 | * was held by another thread. The lock may not necessarily be held | |
279 | * by something using the same buffer, since hash locks are shared | |
280 | * by multiple buffers. | |
281 | */ | |
34dc7c2f | 282 | kstat_named_t arcstat_mutex_miss; |
e49f1e20 WA |
283 | /* |
284 | * Number of buffers skipped because they have I/O in progress, are | |
285 | * indrect prefetch buffers that have not lived long enough, or are | |
286 | * not from the spa we're trying to evict from. | |
287 | */ | |
34dc7c2f | 288 | kstat_named_t arcstat_evict_skip; |
ca0bf58d PS |
289 | /* |
290 | * Number of times arc_evict_state() was unable to evict enough | |
291 | * buffers to reach its target amount. | |
292 | */ | |
293 | kstat_named_t arcstat_evict_not_enough; | |
428870ff BB |
294 | kstat_named_t arcstat_evict_l2_cached; |
295 | kstat_named_t arcstat_evict_l2_eligible; | |
296 | kstat_named_t arcstat_evict_l2_ineligible; | |
ca0bf58d | 297 | kstat_named_t arcstat_evict_l2_skip; |
34dc7c2f BB |
298 | kstat_named_t arcstat_hash_elements; |
299 | kstat_named_t arcstat_hash_elements_max; | |
300 | kstat_named_t arcstat_hash_collisions; | |
301 | kstat_named_t arcstat_hash_chains; | |
302 | kstat_named_t arcstat_hash_chain_max; | |
303 | kstat_named_t arcstat_p; | |
304 | kstat_named_t arcstat_c; | |
305 | kstat_named_t arcstat_c_min; | |
306 | kstat_named_t arcstat_c_max; | |
307 | kstat_named_t arcstat_size; | |
500445c0 PS |
308 | /* |
309 | * Number of bytes consumed by internal ARC structures necessary | |
310 | * for tracking purposes; these structures are not actually | |
311 | * backed by ARC buffers. This includes arc_buf_hdr_t structures | |
312 | * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only | |
313 | * caches), and arc_buf_t structures (allocated via arc_buf_t | |
314 | * cache). | |
315 | */ | |
34dc7c2f | 316 | kstat_named_t arcstat_hdr_size; |
500445c0 PS |
317 | /* |
318 | * Number of bytes consumed by ARC buffers of type equal to | |
319 | * ARC_BUFC_DATA. This is generally consumed by buffers backing | |
320 | * on disk user data (e.g. plain file contents). | |
321 | */ | |
d164b209 | 322 | kstat_named_t arcstat_data_size; |
500445c0 PS |
323 | /* |
324 | * Number of bytes consumed by ARC buffers of type equal to | |
325 | * ARC_BUFC_METADATA. This is generally consumed by buffers | |
326 | * backing on disk data that is used for internal ZFS | |
327 | * structures (e.g. ZAP, dnode, indirect blocks, etc). | |
328 | */ | |
329 | kstat_named_t arcstat_metadata_size; | |
330 | /* | |
331 | * Number of bytes consumed by various buffers and structures | |
332 | * not actually backed with ARC buffers. This includes bonus | |
333 | * buffers (allocated directly via zio_buf_* functions), | |
334 | * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t | |
335 | * cache), and dnode_t structures (allocated via dnode_t cache). | |
336 | */ | |
d164b209 | 337 | kstat_named_t arcstat_other_size; |
500445c0 PS |
338 | /* |
339 | * Total number of bytes consumed by ARC buffers residing in the | |
340 | * arc_anon state. This includes *all* buffers in the arc_anon | |
341 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
342 | * are all included in this value. | |
343 | */ | |
13be560d | 344 | kstat_named_t arcstat_anon_size; |
500445c0 PS |
345 | /* |
346 | * Number of bytes consumed by ARC buffers that meet the | |
347 | * following criteria: backing buffers of type ARC_BUFC_DATA, | |
348 | * residing in the arc_anon state, and are eligible for eviction | |
349 | * (e.g. have no outstanding holds on the buffer). | |
350 | */ | |
351 | kstat_named_t arcstat_anon_evictable_data; | |
352 | /* | |
353 | * Number of bytes consumed by ARC buffers that meet the | |
354 | * following criteria: backing buffers of type ARC_BUFC_METADATA, | |
355 | * residing in the arc_anon state, and are eligible for eviction | |
356 | * (e.g. have no outstanding holds on the buffer). | |
357 | */ | |
358 | kstat_named_t arcstat_anon_evictable_metadata; | |
359 | /* | |
360 | * Total number of bytes consumed by ARC buffers residing in the | |
361 | * arc_mru state. This includes *all* buffers in the arc_mru | |
362 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
363 | * are all included in this value. | |
364 | */ | |
13be560d | 365 | kstat_named_t arcstat_mru_size; |
500445c0 PS |
366 | /* |
367 | * Number of bytes consumed by ARC buffers that meet the | |
368 | * following criteria: backing buffers of type ARC_BUFC_DATA, | |
369 | * residing in the arc_mru state, and are eligible for eviction | |
370 | * (e.g. have no outstanding holds on the buffer). | |
371 | */ | |
372 | kstat_named_t arcstat_mru_evictable_data; | |
373 | /* | |
374 | * Number of bytes consumed by ARC buffers that meet the | |
375 | * following criteria: backing buffers of type ARC_BUFC_METADATA, | |
376 | * residing in the arc_mru state, and are eligible for eviction | |
377 | * (e.g. have no outstanding holds on the buffer). | |
378 | */ | |
379 | kstat_named_t arcstat_mru_evictable_metadata; | |
380 | /* | |
381 | * Total number of bytes that *would have been* consumed by ARC | |
382 | * buffers in the arc_mru_ghost state. The key thing to note | |
383 | * here, is the fact that this size doesn't actually indicate | |
384 | * RAM consumption. The ghost lists only consist of headers and | |
385 | * don't actually have ARC buffers linked off of these headers. | |
386 | * Thus, *if* the headers had associated ARC buffers, these | |
387 | * buffers *would have* consumed this number of bytes. | |
388 | */ | |
13be560d | 389 | kstat_named_t arcstat_mru_ghost_size; |
500445c0 PS |
390 | /* |
391 | * Number of bytes that *would have been* consumed by ARC | |
392 | * buffers that are eligible for eviction, of type | |
393 | * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. | |
394 | */ | |
395 | kstat_named_t arcstat_mru_ghost_evictable_data; | |
396 | /* | |
397 | * Number of bytes that *would have been* consumed by ARC | |
398 | * buffers that are eligible for eviction, of type | |
399 | * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. | |
400 | */ | |
401 | kstat_named_t arcstat_mru_ghost_evictable_metadata; | |
402 | /* | |
403 | * Total number of bytes consumed by ARC buffers residing in the | |
404 | * arc_mfu state. This includes *all* buffers in the arc_mfu | |
405 | * state; e.g. data, metadata, evictable, and unevictable buffers | |
406 | * are all included in this value. | |
407 | */ | |
13be560d | 408 | kstat_named_t arcstat_mfu_size; |
500445c0 PS |
409 | /* |
410 | * Number of bytes consumed by ARC buffers that are eligible for | |
411 | * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu | |
412 | * state. | |
413 | */ | |
414 | kstat_named_t arcstat_mfu_evictable_data; | |
415 | /* | |
416 | * Number of bytes consumed by ARC buffers that are eligible for | |
417 | * eviction, of type ARC_BUFC_METADATA, and reside in the | |
418 | * arc_mfu state. | |
419 | */ | |
420 | kstat_named_t arcstat_mfu_evictable_metadata; | |
421 | /* | |
422 | * Total number of bytes that *would have been* consumed by ARC | |
423 | * buffers in the arc_mfu_ghost state. See the comment above | |
424 | * arcstat_mru_ghost_size for more details. | |
425 | */ | |
13be560d | 426 | kstat_named_t arcstat_mfu_ghost_size; |
500445c0 PS |
427 | /* |
428 | * Number of bytes that *would have been* consumed by ARC | |
429 | * buffers that are eligible for eviction, of type | |
430 | * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. | |
431 | */ | |
432 | kstat_named_t arcstat_mfu_ghost_evictable_data; | |
433 | /* | |
434 | * Number of bytes that *would have been* consumed by ARC | |
435 | * buffers that are eligible for eviction, of type | |
436 | * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. | |
437 | */ | |
438 | kstat_named_t arcstat_mfu_ghost_evictable_metadata; | |
34dc7c2f BB |
439 | kstat_named_t arcstat_l2_hits; |
440 | kstat_named_t arcstat_l2_misses; | |
441 | kstat_named_t arcstat_l2_feeds; | |
442 | kstat_named_t arcstat_l2_rw_clash; | |
d164b209 BB |
443 | kstat_named_t arcstat_l2_read_bytes; |
444 | kstat_named_t arcstat_l2_write_bytes; | |
34dc7c2f BB |
445 | kstat_named_t arcstat_l2_writes_sent; |
446 | kstat_named_t arcstat_l2_writes_done; | |
447 | kstat_named_t arcstat_l2_writes_error; | |
ca0bf58d | 448 | kstat_named_t arcstat_l2_writes_lock_retry; |
34dc7c2f BB |
449 | kstat_named_t arcstat_l2_evict_lock_retry; |
450 | kstat_named_t arcstat_l2_evict_reading; | |
b9541d6b | 451 | kstat_named_t arcstat_l2_evict_l1cached; |
34dc7c2f | 452 | kstat_named_t arcstat_l2_free_on_write; |
ca0bf58d | 453 | kstat_named_t arcstat_l2_cdata_free_on_write; |
34dc7c2f BB |
454 | kstat_named_t arcstat_l2_abort_lowmem; |
455 | kstat_named_t arcstat_l2_cksum_bad; | |
456 | kstat_named_t arcstat_l2_io_error; | |
457 | kstat_named_t arcstat_l2_size; | |
3a17a7a9 | 458 | kstat_named_t arcstat_l2_asize; |
34dc7c2f | 459 | kstat_named_t arcstat_l2_hdr_size; |
3a17a7a9 SK |
460 | kstat_named_t arcstat_l2_compress_successes; |
461 | kstat_named_t arcstat_l2_compress_zeros; | |
462 | kstat_named_t arcstat_l2_compress_failures; | |
34dc7c2f | 463 | kstat_named_t arcstat_memory_throttle_count; |
1eb5bfa3 GW |
464 | kstat_named_t arcstat_duplicate_buffers; |
465 | kstat_named_t arcstat_duplicate_buffers_size; | |
466 | kstat_named_t arcstat_duplicate_reads; | |
7cb67b45 BB |
467 | kstat_named_t arcstat_memory_direct_count; |
468 | kstat_named_t arcstat_memory_indirect_count; | |
1834f2d8 BB |
469 | kstat_named_t arcstat_no_grow; |
470 | kstat_named_t arcstat_tempreserve; | |
471 | kstat_named_t arcstat_loaned_bytes; | |
ab26409d | 472 | kstat_named_t arcstat_prune; |
1834f2d8 BB |
473 | kstat_named_t arcstat_meta_used; |
474 | kstat_named_t arcstat_meta_limit; | |
475 | kstat_named_t arcstat_meta_max; | |
ca0bf58d | 476 | kstat_named_t arcstat_meta_min; |
11f552fa BB |
477 | kstat_named_t arcstat_need_free; |
478 | kstat_named_t arcstat_sys_free; | |
34dc7c2f BB |
479 | } arc_stats_t; |
480 | ||
481 | static arc_stats_t arc_stats = { | |
482 | { "hits", KSTAT_DATA_UINT64 }, | |
483 | { "misses", KSTAT_DATA_UINT64 }, | |
484 | { "demand_data_hits", KSTAT_DATA_UINT64 }, | |
485 | { "demand_data_misses", KSTAT_DATA_UINT64 }, | |
486 | { "demand_metadata_hits", KSTAT_DATA_UINT64 }, | |
487 | { "demand_metadata_misses", KSTAT_DATA_UINT64 }, | |
488 | { "prefetch_data_hits", KSTAT_DATA_UINT64 }, | |
489 | { "prefetch_data_misses", KSTAT_DATA_UINT64 }, | |
490 | { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, | |
491 | { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, | |
492 | { "mru_hits", KSTAT_DATA_UINT64 }, | |
493 | { "mru_ghost_hits", KSTAT_DATA_UINT64 }, | |
494 | { "mfu_hits", KSTAT_DATA_UINT64 }, | |
495 | { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, | |
496 | { "deleted", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
497 | { "mutex_miss", KSTAT_DATA_UINT64 }, |
498 | { "evict_skip", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 499 | { "evict_not_enough", KSTAT_DATA_UINT64 }, |
428870ff BB |
500 | { "evict_l2_cached", KSTAT_DATA_UINT64 }, |
501 | { "evict_l2_eligible", KSTAT_DATA_UINT64 }, | |
502 | { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 503 | { "evict_l2_skip", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
504 | { "hash_elements", KSTAT_DATA_UINT64 }, |
505 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
506 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
507 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
508 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
509 | { "p", KSTAT_DATA_UINT64 }, | |
510 | { "c", KSTAT_DATA_UINT64 }, | |
511 | { "c_min", KSTAT_DATA_UINT64 }, | |
512 | { "c_max", KSTAT_DATA_UINT64 }, | |
513 | { "size", KSTAT_DATA_UINT64 }, | |
514 | { "hdr_size", KSTAT_DATA_UINT64 }, | |
d164b209 | 515 | { "data_size", KSTAT_DATA_UINT64 }, |
500445c0 | 516 | { "metadata_size", KSTAT_DATA_UINT64 }, |
d164b209 | 517 | { "other_size", KSTAT_DATA_UINT64 }, |
13be560d | 518 | { "anon_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
519 | { "anon_evictable_data", KSTAT_DATA_UINT64 }, |
520 | { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 521 | { "mru_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
522 | { "mru_evictable_data", KSTAT_DATA_UINT64 }, |
523 | { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 524 | { "mru_ghost_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
525 | { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, |
526 | { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 527 | { "mfu_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
528 | { "mfu_evictable_data", KSTAT_DATA_UINT64 }, |
529 | { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, | |
13be560d | 530 | { "mfu_ghost_size", KSTAT_DATA_UINT64 }, |
500445c0 PS |
531 | { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, |
532 | { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
533 | { "l2_hits", KSTAT_DATA_UINT64 }, |
534 | { "l2_misses", KSTAT_DATA_UINT64 }, | |
535 | { "l2_feeds", KSTAT_DATA_UINT64 }, | |
536 | { "l2_rw_clash", KSTAT_DATA_UINT64 }, | |
d164b209 BB |
537 | { "l2_read_bytes", KSTAT_DATA_UINT64 }, |
538 | { "l2_write_bytes", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
539 | { "l2_writes_sent", KSTAT_DATA_UINT64 }, |
540 | { "l2_writes_done", KSTAT_DATA_UINT64 }, | |
541 | { "l2_writes_error", KSTAT_DATA_UINT64 }, | |
ca0bf58d | 542 | { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
543 | { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, |
544 | { "l2_evict_reading", KSTAT_DATA_UINT64 }, | |
b9541d6b | 545 | { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, |
34dc7c2f | 546 | { "l2_free_on_write", KSTAT_DATA_UINT64 }, |
ca0bf58d | 547 | { "l2_cdata_free_on_write", KSTAT_DATA_UINT64 }, |
34dc7c2f BB |
548 | { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, |
549 | { "l2_cksum_bad", KSTAT_DATA_UINT64 }, | |
550 | { "l2_io_error", KSTAT_DATA_UINT64 }, | |
551 | { "l2_size", KSTAT_DATA_UINT64 }, | |
3a17a7a9 | 552 | { "l2_asize", KSTAT_DATA_UINT64 }, |
34dc7c2f | 553 | { "l2_hdr_size", KSTAT_DATA_UINT64 }, |
3a17a7a9 SK |
554 | { "l2_compress_successes", KSTAT_DATA_UINT64 }, |
555 | { "l2_compress_zeros", KSTAT_DATA_UINT64 }, | |
556 | { "l2_compress_failures", KSTAT_DATA_UINT64 }, | |
1834f2d8 | 557 | { "memory_throttle_count", KSTAT_DATA_UINT64 }, |
1eb5bfa3 GW |
558 | { "duplicate_buffers", KSTAT_DATA_UINT64 }, |
559 | { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, | |
560 | { "duplicate_reads", KSTAT_DATA_UINT64 }, | |
7cb67b45 BB |
561 | { "memory_direct_count", KSTAT_DATA_UINT64 }, |
562 | { "memory_indirect_count", KSTAT_DATA_UINT64 }, | |
1834f2d8 BB |
563 | { "arc_no_grow", KSTAT_DATA_UINT64 }, |
564 | { "arc_tempreserve", KSTAT_DATA_UINT64 }, | |
565 | { "arc_loaned_bytes", KSTAT_DATA_UINT64 }, | |
ab26409d | 566 | { "arc_prune", KSTAT_DATA_UINT64 }, |
1834f2d8 BB |
567 | { "arc_meta_used", KSTAT_DATA_UINT64 }, |
568 | { "arc_meta_limit", KSTAT_DATA_UINT64 }, | |
569 | { "arc_meta_max", KSTAT_DATA_UINT64 }, | |
11f552fa BB |
570 | { "arc_meta_min", KSTAT_DATA_UINT64 }, |
571 | { "arc_need_free", KSTAT_DATA_UINT64 }, | |
572 | { "arc_sys_free", KSTAT_DATA_UINT64 } | |
34dc7c2f BB |
573 | }; |
574 | ||
575 | #define ARCSTAT(stat) (arc_stats.stat.value.ui64) | |
576 | ||
577 | #define ARCSTAT_INCR(stat, val) \ | |
d3cc8b15 | 578 | atomic_add_64(&arc_stats.stat.value.ui64, (val)) |
34dc7c2f | 579 | |
428870ff | 580 | #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) |
34dc7c2f BB |
581 | #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) |
582 | ||
583 | #define ARCSTAT_MAX(stat, val) { \ | |
584 | uint64_t m; \ | |
585 | while ((val) > (m = arc_stats.stat.value.ui64) && \ | |
586 | (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ | |
587 | continue; \ | |
588 | } | |
589 | ||
590 | #define ARCSTAT_MAXSTAT(stat) \ | |
591 | ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) | |
592 | ||
593 | /* | |
594 | * We define a macro to allow ARC hits/misses to be easily broken down by | |
595 | * two separate conditions, giving a total of four different subtypes for | |
596 | * each of hits and misses (so eight statistics total). | |
597 | */ | |
598 | #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ | |
599 | if (cond1) { \ | |
600 | if (cond2) { \ | |
601 | ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ | |
602 | } else { \ | |
603 | ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ | |
604 | } \ | |
605 | } else { \ | |
606 | if (cond2) { \ | |
607 | ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ | |
608 | } else { \ | |
609 | ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ | |
610 | } \ | |
611 | } | |
612 | ||
613 | kstat_t *arc_ksp; | |
428870ff | 614 | static arc_state_t *arc_anon; |
34dc7c2f BB |
615 | static arc_state_t *arc_mru; |
616 | static arc_state_t *arc_mru_ghost; | |
617 | static arc_state_t *arc_mfu; | |
618 | static arc_state_t *arc_mfu_ghost; | |
619 | static arc_state_t *arc_l2c_only; | |
620 | ||
621 | /* | |
622 | * There are several ARC variables that are critical to export as kstats -- | |
623 | * but we don't want to have to grovel around in the kstat whenever we wish to | |
624 | * manipulate them. For these variables, we therefore define them to be in | |
625 | * terms of the statistic variable. This assures that we are not introducing | |
626 | * the possibility of inconsistency by having shadow copies of the variables, | |
627 | * while still allowing the code to be readable. | |
628 | */ | |
629 | #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ | |
630 | #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ | |
631 | #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ | |
632 | #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ | |
633 | #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ | |
1834f2d8 BB |
634 | #define arc_no_grow ARCSTAT(arcstat_no_grow) |
635 | #define arc_tempreserve ARCSTAT(arcstat_tempreserve) | |
636 | #define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes) | |
23c0a133 | 637 | #define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ |
ca0bf58d | 638 | #define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ |
23c0a133 GW |
639 | #define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */ |
640 | #define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ | |
11f552fa BB |
641 | #define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */ |
642 | #define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */ | |
34dc7c2f | 643 | |
3a17a7a9 SK |
644 | #define L2ARC_IS_VALID_COMPRESS(_c_) \ |
645 | ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY) | |
646 | ||
ab26409d BB |
647 | static list_t arc_prune_list; |
648 | static kmutex_t arc_prune_mtx; | |
f6046738 | 649 | static taskq_t *arc_prune_taskq; |
34dc7c2f | 650 | static arc_buf_t *arc_eviction_list; |
34dc7c2f | 651 | static arc_buf_hdr_t arc_eviction_hdr; |
428870ff | 652 | |
34dc7c2f BB |
653 | #define GHOST_STATE(state) \ |
654 | ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ | |
655 | (state) == arc_l2c_only) | |
656 | ||
2a432414 GW |
657 | #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) |
658 | #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) | |
659 | #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) | |
660 | #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) | |
661 | #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FLAG_FREED_IN_READ) | |
662 | #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE) | |
b9541d6b | 663 | |
2a432414 | 664 | #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) |
b9541d6b | 665 | #define HDR_L2COMPRESS(hdr) ((hdr)->b_flags & ARC_FLAG_L2COMPRESS) |
2a432414 | 666 | #define HDR_L2_READING(hdr) \ |
b9541d6b CW |
667 | (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ |
668 | ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) | |
2a432414 GW |
669 | #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) |
670 | #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) | |
671 | #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) | |
34dc7c2f | 672 | |
b9541d6b CW |
673 | #define HDR_ISTYPE_METADATA(hdr) \ |
674 | ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) | |
675 | #define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) | |
676 | ||
677 | #define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) | |
678 | #define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) | |
679 | ||
680 | /* For storing compression mode in b_flags */ | |
681 | #define HDR_COMPRESS_OFFSET 24 | |
682 | #define HDR_COMPRESS_NBITS 7 | |
683 | ||
684 | #define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET(hdr->b_flags, \ | |
685 | HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS)) | |
686 | #define HDR_SET_COMPRESS(hdr, cmp) BF32_SET(hdr->b_flags, \ | |
687 | HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS, (cmp)) | |
688 | ||
34dc7c2f BB |
689 | /* |
690 | * Other sizes | |
691 | */ | |
692 | ||
b9541d6b CW |
693 | #define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) |
694 | #define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) | |
34dc7c2f BB |
695 | |
696 | /* | |
697 | * Hash table routines | |
698 | */ | |
699 | ||
00b46022 BB |
700 | #define HT_LOCK_ALIGN 64 |
701 | #define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN))) | |
34dc7c2f BB |
702 | |
703 | struct ht_lock { | |
704 | kmutex_t ht_lock; | |
705 | #ifdef _KERNEL | |
00b46022 | 706 | unsigned char pad[HT_LOCK_PAD]; |
34dc7c2f BB |
707 | #endif |
708 | }; | |
709 | ||
b31d8ea7 | 710 | #define BUF_LOCKS 8192 |
34dc7c2f BB |
711 | typedef struct buf_hash_table { |
712 | uint64_t ht_mask; | |
713 | arc_buf_hdr_t **ht_table; | |
714 | struct ht_lock ht_locks[BUF_LOCKS]; | |
715 | } buf_hash_table_t; | |
716 | ||
717 | static buf_hash_table_t buf_hash_table; | |
718 | ||
719 | #define BUF_HASH_INDEX(spa, dva, birth) \ | |
720 | (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) | |
721 | #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) | |
722 | #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) | |
428870ff BB |
723 | #define HDR_LOCK(hdr) \ |
724 | (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) | |
34dc7c2f BB |
725 | |
726 | uint64_t zfs_crc64_table[256]; | |
727 | ||
728 | /* | |
729 | * Level 2 ARC | |
730 | */ | |
731 | ||
732 | #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ | |
3a17a7a9 SK |
733 | #define L2ARC_HEADROOM 2 /* num of writes */ |
734 | /* | |
735 | * If we discover during ARC scan any buffers to be compressed, we boost | |
736 | * our headroom for the next scanning cycle by this percentage multiple. | |
737 | */ | |
738 | #define L2ARC_HEADROOM_BOOST 200 | |
d164b209 BB |
739 | #define L2ARC_FEED_SECS 1 /* caching interval secs */ |
740 | #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ | |
34dc7c2f | 741 | |
d962d5da PS |
742 | /* |
743 | * Used to distinguish headers that are being process by | |
744 | * l2arc_write_buffers(), but have yet to be assigned to a l2arc disk | |
745 | * address. This can happen when the header is added to the l2arc's list | |
746 | * of buffers to write in the first stage of l2arc_write_buffers(), but | |
747 | * has not yet been written out which happens in the second stage of | |
748 | * l2arc_write_buffers(). | |
749 | */ | |
750 | #define L2ARC_ADDR_UNSET ((uint64_t)(-1)) | |
751 | ||
34dc7c2f BB |
752 | #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) |
753 | #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) | |
754 | ||
d3cc8b15 | 755 | /* L2ARC Performance Tunables */ |
abd8610c BB |
756 | unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */ |
757 | unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */ | |
758 | unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */ | |
3a17a7a9 | 759 | unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; |
abd8610c BB |
760 | unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ |
761 | unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */ | |
762 | int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ | |
3a17a7a9 | 763 | int l2arc_nocompress = B_FALSE; /* don't compress bufs */ |
abd8610c | 764 | int l2arc_feed_again = B_TRUE; /* turbo warmup */ |
c93504f0 | 765 | int l2arc_norw = B_FALSE; /* no reads during writes */ |
34dc7c2f BB |
766 | |
767 | /* | |
768 | * L2ARC Internals | |
769 | */ | |
34dc7c2f BB |
770 | static list_t L2ARC_dev_list; /* device list */ |
771 | static list_t *l2arc_dev_list; /* device list pointer */ | |
772 | static kmutex_t l2arc_dev_mtx; /* device list mutex */ | |
773 | static l2arc_dev_t *l2arc_dev_last; /* last device used */ | |
34dc7c2f BB |
774 | static list_t L2ARC_free_on_write; /* free after write buf list */ |
775 | static list_t *l2arc_free_on_write; /* free after write list ptr */ | |
776 | static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ | |
777 | static uint64_t l2arc_ndev; /* number of devices */ | |
778 | ||
779 | typedef struct l2arc_read_callback { | |
3a17a7a9 SK |
780 | arc_buf_t *l2rcb_buf; /* read buffer */ |
781 | spa_t *l2rcb_spa; /* spa */ | |
782 | blkptr_t l2rcb_bp; /* original blkptr */ | |
5dbd68a3 | 783 | zbookmark_phys_t l2rcb_zb; /* original bookmark */ |
3a17a7a9 SK |
784 | int l2rcb_flags; /* original flags */ |
785 | enum zio_compress l2rcb_compress; /* applied compress */ | |
34dc7c2f BB |
786 | } l2arc_read_callback_t; |
787 | ||
34dc7c2f BB |
788 | typedef struct l2arc_data_free { |
789 | /* protected by l2arc_free_on_write_mtx */ | |
790 | void *l2df_data; | |
791 | size_t l2df_size; | |
792 | void (*l2df_func)(void *, size_t); | |
793 | list_node_t l2df_list_node; | |
794 | } l2arc_data_free_t; | |
795 | ||
796 | static kmutex_t l2arc_feed_thr_lock; | |
797 | static kcondvar_t l2arc_feed_thr_cv; | |
798 | static uint8_t l2arc_thread_exit; | |
799 | ||
2a432414 GW |
800 | static void arc_get_data_buf(arc_buf_t *); |
801 | static void arc_access(arc_buf_hdr_t *, kmutex_t *); | |
ca0bf58d | 802 | static boolean_t arc_is_overflowing(void); |
2a432414 | 803 | static void arc_buf_watch(arc_buf_t *); |
ca67b33a | 804 | static void arc_tuning_update(void); |
2a432414 | 805 | |
b9541d6b CW |
806 | static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); |
807 | static uint32_t arc_bufc_to_flags(arc_buf_contents_t); | |
808 | ||
2a432414 GW |
809 | static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); |
810 | static void l2arc_read_done(zio_t *); | |
34dc7c2f | 811 | |
b9541d6b | 812 | static boolean_t l2arc_compress_buf(arc_buf_hdr_t *); |
2a432414 GW |
813 | static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress); |
814 | static void l2arc_release_cdata_buf(arc_buf_hdr_t *); | |
3a17a7a9 | 815 | |
34dc7c2f | 816 | static uint64_t |
d164b209 | 817 | buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) |
34dc7c2f | 818 | { |
34dc7c2f BB |
819 | uint8_t *vdva = (uint8_t *)dva; |
820 | uint64_t crc = -1ULL; | |
821 | int i; | |
822 | ||
823 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
824 | ||
825 | for (i = 0; i < sizeof (dva_t); i++) | |
826 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; | |
827 | ||
d164b209 | 828 | crc ^= (spa>>8) ^ birth; |
34dc7c2f BB |
829 | |
830 | return (crc); | |
831 | } | |
832 | ||
833 | #define BUF_EMPTY(buf) \ | |
834 | ((buf)->b_dva.dva_word[0] == 0 && \ | |
b9541d6b | 835 | (buf)->b_dva.dva_word[1] == 0) |
34dc7c2f BB |
836 | |
837 | #define BUF_EQUAL(spa, dva, birth, buf) \ | |
838 | ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ | |
839 | ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ | |
840 | ((buf)->b_birth == birth) && ((buf)->b_spa == spa) | |
841 | ||
428870ff BB |
842 | static void |
843 | buf_discard_identity(arc_buf_hdr_t *hdr) | |
844 | { | |
845 | hdr->b_dva.dva_word[0] = 0; | |
846 | hdr->b_dva.dva_word[1] = 0; | |
847 | hdr->b_birth = 0; | |
428870ff BB |
848 | } |
849 | ||
34dc7c2f | 850 | static arc_buf_hdr_t * |
9b67f605 | 851 | buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) |
34dc7c2f | 852 | { |
9b67f605 MA |
853 | const dva_t *dva = BP_IDENTITY(bp); |
854 | uint64_t birth = BP_PHYSICAL_BIRTH(bp); | |
34dc7c2f BB |
855 | uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); |
856 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
2a432414 | 857 | arc_buf_hdr_t *hdr; |
34dc7c2f BB |
858 | |
859 | mutex_enter(hash_lock); | |
2a432414 GW |
860 | for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; |
861 | hdr = hdr->b_hash_next) { | |
862 | if (BUF_EQUAL(spa, dva, birth, hdr)) { | |
34dc7c2f | 863 | *lockp = hash_lock; |
2a432414 | 864 | return (hdr); |
34dc7c2f BB |
865 | } |
866 | } | |
867 | mutex_exit(hash_lock); | |
868 | *lockp = NULL; | |
869 | return (NULL); | |
870 | } | |
871 | ||
872 | /* | |
873 | * Insert an entry into the hash table. If there is already an element | |
874 | * equal to elem in the hash table, then the already existing element | |
875 | * will be returned and the new element will not be inserted. | |
876 | * Otherwise returns NULL. | |
b9541d6b | 877 | * If lockp == NULL, the caller is assumed to already hold the hash lock. |
34dc7c2f BB |
878 | */ |
879 | static arc_buf_hdr_t * | |
2a432414 | 880 | buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) |
34dc7c2f | 881 | { |
2a432414 | 882 | uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); |
34dc7c2f | 883 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); |
2a432414 | 884 | arc_buf_hdr_t *fhdr; |
34dc7c2f BB |
885 | uint32_t i; |
886 | ||
2a432414 GW |
887 | ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); |
888 | ASSERT(hdr->b_birth != 0); | |
889 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
b9541d6b CW |
890 | |
891 | if (lockp != NULL) { | |
892 | *lockp = hash_lock; | |
893 | mutex_enter(hash_lock); | |
894 | } else { | |
895 | ASSERT(MUTEX_HELD(hash_lock)); | |
896 | } | |
897 | ||
2a432414 GW |
898 | for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; |
899 | fhdr = fhdr->b_hash_next, i++) { | |
900 | if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) | |
901 | return (fhdr); | |
34dc7c2f BB |
902 | } |
903 | ||
2a432414 GW |
904 | hdr->b_hash_next = buf_hash_table.ht_table[idx]; |
905 | buf_hash_table.ht_table[idx] = hdr; | |
906 | hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; | |
34dc7c2f BB |
907 | |
908 | /* collect some hash table performance data */ | |
909 | if (i > 0) { | |
910 | ARCSTAT_BUMP(arcstat_hash_collisions); | |
911 | if (i == 1) | |
912 | ARCSTAT_BUMP(arcstat_hash_chains); | |
913 | ||
914 | ARCSTAT_MAX(arcstat_hash_chain_max, i); | |
915 | } | |
916 | ||
917 | ARCSTAT_BUMP(arcstat_hash_elements); | |
918 | ARCSTAT_MAXSTAT(arcstat_hash_elements); | |
919 | ||
920 | return (NULL); | |
921 | } | |
922 | ||
923 | static void | |
2a432414 | 924 | buf_hash_remove(arc_buf_hdr_t *hdr) |
34dc7c2f | 925 | { |
2a432414 GW |
926 | arc_buf_hdr_t *fhdr, **hdrp; |
927 | uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); | |
34dc7c2f BB |
928 | |
929 | ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); | |
2a432414 | 930 | ASSERT(HDR_IN_HASH_TABLE(hdr)); |
34dc7c2f | 931 | |
2a432414 GW |
932 | hdrp = &buf_hash_table.ht_table[idx]; |
933 | while ((fhdr = *hdrp) != hdr) { | |
934 | ASSERT(fhdr != NULL); | |
935 | hdrp = &fhdr->b_hash_next; | |
34dc7c2f | 936 | } |
2a432414 GW |
937 | *hdrp = hdr->b_hash_next; |
938 | hdr->b_hash_next = NULL; | |
939 | hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE; | |
34dc7c2f BB |
940 | |
941 | /* collect some hash table performance data */ | |
942 | ARCSTAT_BUMPDOWN(arcstat_hash_elements); | |
943 | ||
944 | if (buf_hash_table.ht_table[idx] && | |
945 | buf_hash_table.ht_table[idx]->b_hash_next == NULL) | |
946 | ARCSTAT_BUMPDOWN(arcstat_hash_chains); | |
947 | } | |
948 | ||
949 | /* | |
950 | * Global data structures and functions for the buf kmem cache. | |
951 | */ | |
b9541d6b CW |
952 | static kmem_cache_t *hdr_full_cache; |
953 | static kmem_cache_t *hdr_l2only_cache; | |
34dc7c2f BB |
954 | static kmem_cache_t *buf_cache; |
955 | ||
956 | static void | |
957 | buf_fini(void) | |
958 | { | |
959 | int i; | |
960 | ||
00b46022 | 961 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
962 | /* |
963 | * Large allocations which do not require contiguous pages | |
964 | * should be using vmem_free() in the linux kernel\ | |
965 | */ | |
00b46022 BB |
966 | vmem_free(buf_hash_table.ht_table, |
967 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
968 | #else | |
34dc7c2f BB |
969 | kmem_free(buf_hash_table.ht_table, |
970 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
00b46022 | 971 | #endif |
34dc7c2f BB |
972 | for (i = 0; i < BUF_LOCKS; i++) |
973 | mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); | |
b9541d6b CW |
974 | kmem_cache_destroy(hdr_full_cache); |
975 | kmem_cache_destroy(hdr_l2only_cache); | |
34dc7c2f BB |
976 | kmem_cache_destroy(buf_cache); |
977 | } | |
978 | ||
979 | /* | |
980 | * Constructor callback - called when the cache is empty | |
981 | * and a new buf is requested. | |
982 | */ | |
983 | /* ARGSUSED */ | |
984 | static int | |
b9541d6b CW |
985 | hdr_full_cons(void *vbuf, void *unused, int kmflag) |
986 | { | |
987 | arc_buf_hdr_t *hdr = vbuf; | |
988 | ||
989 | bzero(hdr, HDR_FULL_SIZE); | |
990 | cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); | |
991 | refcount_create(&hdr->b_l1hdr.b_refcnt); | |
992 | mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); | |
993 | list_link_init(&hdr->b_l1hdr.b_arc_node); | |
994 | list_link_init(&hdr->b_l2hdr.b_l2node); | |
ca0bf58d | 995 | multilist_link_init(&hdr->b_l1hdr.b_arc_node); |
b9541d6b CW |
996 | arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); |
997 | ||
998 | return (0); | |
999 | } | |
1000 | ||
1001 | /* ARGSUSED */ | |
1002 | static int | |
1003 | hdr_l2only_cons(void *vbuf, void *unused, int kmflag) | |
34dc7c2f | 1004 | { |
2a432414 GW |
1005 | arc_buf_hdr_t *hdr = vbuf; |
1006 | ||
b9541d6b CW |
1007 | bzero(hdr, HDR_L2ONLY_SIZE); |
1008 | arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); | |
34dc7c2f | 1009 | |
34dc7c2f BB |
1010 | return (0); |
1011 | } | |
1012 | ||
b128c09f BB |
1013 | /* ARGSUSED */ |
1014 | static int | |
1015 | buf_cons(void *vbuf, void *unused, int kmflag) | |
1016 | { | |
1017 | arc_buf_t *buf = vbuf; | |
1018 | ||
1019 | bzero(buf, sizeof (arc_buf_t)); | |
428870ff | 1020 | mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); |
d164b209 BB |
1021 | arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
1022 | ||
b128c09f BB |
1023 | return (0); |
1024 | } | |
1025 | ||
34dc7c2f BB |
1026 | /* |
1027 | * Destructor callback - called when a cached buf is | |
1028 | * no longer required. | |
1029 | */ | |
1030 | /* ARGSUSED */ | |
1031 | static void | |
b9541d6b | 1032 | hdr_full_dest(void *vbuf, void *unused) |
34dc7c2f | 1033 | { |
2a432414 | 1034 | arc_buf_hdr_t *hdr = vbuf; |
34dc7c2f | 1035 | |
2a432414 | 1036 | ASSERT(BUF_EMPTY(hdr)); |
b9541d6b CW |
1037 | cv_destroy(&hdr->b_l1hdr.b_cv); |
1038 | refcount_destroy(&hdr->b_l1hdr.b_refcnt); | |
1039 | mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); | |
ca0bf58d | 1040 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); |
b9541d6b CW |
1041 | arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); |
1042 | } | |
1043 | ||
1044 | /* ARGSUSED */ | |
1045 | static void | |
1046 | hdr_l2only_dest(void *vbuf, void *unused) | |
1047 | { | |
1048 | ASSERTV(arc_buf_hdr_t *hdr = vbuf); | |
1049 | ||
1050 | ASSERT(BUF_EMPTY(hdr)); | |
1051 | arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); | |
34dc7c2f BB |
1052 | } |
1053 | ||
b128c09f BB |
1054 | /* ARGSUSED */ |
1055 | static void | |
1056 | buf_dest(void *vbuf, void *unused) | |
1057 | { | |
1058 | arc_buf_t *buf = vbuf; | |
1059 | ||
428870ff | 1060 | mutex_destroy(&buf->b_evict_lock); |
d164b209 | 1061 | arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
b128c09f BB |
1062 | } |
1063 | ||
8c8af9d8 BB |
1064 | /* |
1065 | * Reclaim callback -- invoked when memory is low. | |
1066 | */ | |
1067 | /* ARGSUSED */ | |
1068 | static void | |
1069 | hdr_recl(void *unused) | |
1070 | { | |
1071 | dprintf("hdr_recl called\n"); | |
1072 | /* | |
1073 | * umem calls the reclaim func when we destroy the buf cache, | |
1074 | * which is after we do arc_fini(). | |
1075 | */ | |
1076 | if (!arc_dead) | |
1077 | cv_signal(&arc_reclaim_thread_cv); | |
1078 | } | |
1079 | ||
34dc7c2f BB |
1080 | static void |
1081 | buf_init(void) | |
1082 | { | |
1083 | uint64_t *ct; | |
1084 | uint64_t hsize = 1ULL << 12; | |
1085 | int i, j; | |
1086 | ||
1087 | /* | |
1088 | * The hash table is big enough to fill all of physical memory | |
49ddb315 MA |
1089 | * with an average block size of zfs_arc_average_blocksize (default 8K). |
1090 | * By default, the table will take up | |
1091 | * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). | |
34dc7c2f | 1092 | */ |
49ddb315 | 1093 | while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE) |
34dc7c2f BB |
1094 | hsize <<= 1; |
1095 | retry: | |
1096 | buf_hash_table.ht_mask = hsize - 1; | |
00b46022 | 1097 | #if defined(_KERNEL) && defined(HAVE_SPL) |
d1d7e268 MK |
1098 | /* |
1099 | * Large allocations which do not require contiguous pages | |
1100 | * should be using vmem_alloc() in the linux kernel | |
1101 | */ | |
00b46022 BB |
1102 | buf_hash_table.ht_table = |
1103 | vmem_zalloc(hsize * sizeof (void*), KM_SLEEP); | |
1104 | #else | |
34dc7c2f BB |
1105 | buf_hash_table.ht_table = |
1106 | kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); | |
00b46022 | 1107 | #endif |
34dc7c2f BB |
1108 | if (buf_hash_table.ht_table == NULL) { |
1109 | ASSERT(hsize > (1ULL << 8)); | |
1110 | hsize >>= 1; | |
1111 | goto retry; | |
1112 | } | |
1113 | ||
b9541d6b | 1114 | hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, |
8c8af9d8 | 1115 | 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); |
b9541d6b | 1116 | hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", |
8c8af9d8 | 1117 | HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, |
b9541d6b | 1118 | NULL, NULL, 0); |
34dc7c2f | 1119 | buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), |
b128c09f | 1120 | 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); |
34dc7c2f BB |
1121 | |
1122 | for (i = 0; i < 256; i++) | |
1123 | for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) | |
1124 | *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); | |
1125 | ||
1126 | for (i = 0; i < BUF_LOCKS; i++) { | |
1127 | mutex_init(&buf_hash_table.ht_locks[i].ht_lock, | |
40d06e3c | 1128 | NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
1129 | } |
1130 | } | |
1131 | ||
b9541d6b CW |
1132 | /* |
1133 | * Transition between the two allocation states for the arc_buf_hdr struct. | |
1134 | * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without | |
1135 | * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller | |
1136 | * version is used when a cache buffer is only in the L2ARC in order to reduce | |
1137 | * memory usage. | |
1138 | */ | |
1139 | static arc_buf_hdr_t * | |
1140 | arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) | |
1141 | { | |
1142 | arc_buf_hdr_t *nhdr; | |
1143 | l2arc_dev_t *dev; | |
1144 | ||
1145 | ASSERT(HDR_HAS_L2HDR(hdr)); | |
1146 | ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || | |
1147 | (old == hdr_l2only_cache && new == hdr_full_cache)); | |
1148 | ||
1149 | dev = hdr->b_l2hdr.b_dev; | |
1150 | nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); | |
1151 | ||
1152 | ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); | |
1153 | buf_hash_remove(hdr); | |
1154 | ||
1155 | bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); | |
d962d5da | 1156 | |
b9541d6b CW |
1157 | if (new == hdr_full_cache) { |
1158 | nhdr->b_flags |= ARC_FLAG_HAS_L1HDR; | |
1159 | /* | |
1160 | * arc_access and arc_change_state need to be aware that a | |
1161 | * header has just come out of L2ARC, so we set its state to | |
1162 | * l2c_only even though it's about to change. | |
1163 | */ | |
1164 | nhdr->b_l1hdr.b_state = arc_l2c_only; | |
ca0bf58d PS |
1165 | |
1166 | /* Verify previous threads set to NULL before freeing */ | |
1167 | ASSERT3P(nhdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
b9541d6b CW |
1168 | } else { |
1169 | ASSERT(hdr->b_l1hdr.b_buf == NULL); | |
1170 | ASSERT0(hdr->b_l1hdr.b_datacnt); | |
ca0bf58d PS |
1171 | |
1172 | /* | |
1173 | * If we've reached here, We must have been called from | |
1174 | * arc_evict_hdr(), as such we should have already been | |
1175 | * removed from any ghost list we were previously on | |
1176 | * (which protects us from racing with arc_evict_state), | |
1177 | * thus no locking is needed during this check. | |
1178 | */ | |
1179 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
1180 | ||
b9541d6b | 1181 | /* |
ca0bf58d PS |
1182 | * A buffer must not be moved into the arc_l2c_only |
1183 | * state if it's not finished being written out to the | |
1184 | * l2arc device. Otherwise, the b_l1hdr.b_tmp_cdata field | |
1185 | * might try to be accessed, even though it was removed. | |
b9541d6b | 1186 | */ |
ca0bf58d PS |
1187 | VERIFY(!HDR_L2_WRITING(hdr)); |
1188 | VERIFY3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
1189 | ||
b9541d6b CW |
1190 | nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR; |
1191 | } | |
1192 | /* | |
1193 | * The header has been reallocated so we need to re-insert it into any | |
1194 | * lists it was on. | |
1195 | */ | |
1196 | (void) buf_hash_insert(nhdr, NULL); | |
1197 | ||
1198 | ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); | |
1199 | ||
1200 | mutex_enter(&dev->l2ad_mtx); | |
1201 | ||
1202 | /* | |
1203 | * We must place the realloc'ed header back into the list at | |
1204 | * the same spot. Otherwise, if it's placed earlier in the list, | |
1205 | * l2arc_write_buffers() could find it during the function's | |
1206 | * write phase, and try to write it out to the l2arc. | |
1207 | */ | |
1208 | list_insert_after(&dev->l2ad_buflist, hdr, nhdr); | |
1209 | list_remove(&dev->l2ad_buflist, hdr); | |
1210 | ||
1211 | mutex_exit(&dev->l2ad_mtx); | |
1212 | ||
d962d5da PS |
1213 | /* |
1214 | * Since we're using the pointer address as the tag when | |
1215 | * incrementing and decrementing the l2ad_alloc refcount, we | |
1216 | * must remove the old pointer (that we're about to destroy) and | |
1217 | * add the new pointer to the refcount. Otherwise we'd remove | |
1218 | * the wrong pointer address when calling arc_hdr_destroy() later. | |
1219 | */ | |
1220 | ||
1221 | (void) refcount_remove_many(&dev->l2ad_alloc, | |
1222 | hdr->b_l2hdr.b_asize, hdr); | |
1223 | ||
1224 | (void) refcount_add_many(&dev->l2ad_alloc, | |
1225 | nhdr->b_l2hdr.b_asize, nhdr); | |
1226 | ||
b9541d6b CW |
1227 | buf_discard_identity(hdr); |
1228 | hdr->b_freeze_cksum = NULL; | |
1229 | kmem_cache_free(old, hdr); | |
1230 | ||
1231 | return (nhdr); | |
1232 | } | |
1233 | ||
1234 | ||
34dc7c2f BB |
1235 | #define ARC_MINTIME (hz>>4) /* 62 ms */ |
1236 | ||
1237 | static void | |
1238 | arc_cksum_verify(arc_buf_t *buf) | |
1239 | { | |
1240 | zio_cksum_t zc; | |
1241 | ||
1242 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
1243 | return; | |
1244 | ||
b9541d6b CW |
1245 | mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
1246 | if (buf->b_hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(buf->b_hdr)) { | |
1247 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); | |
34dc7c2f BB |
1248 | return; |
1249 | } | |
1250 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
1251 | if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) | |
1252 | panic("buffer modified while frozen!"); | |
b9541d6b | 1253 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1254 | } |
1255 | ||
1256 | static int | |
1257 | arc_cksum_equal(arc_buf_t *buf) | |
1258 | { | |
1259 | zio_cksum_t zc; | |
1260 | int equal; | |
1261 | ||
b9541d6b | 1262 | mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1263 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); |
1264 | equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); | |
b9541d6b | 1265 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1266 | |
1267 | return (equal); | |
1268 | } | |
1269 | ||
1270 | static void | |
1271 | arc_cksum_compute(arc_buf_t *buf, boolean_t force) | |
1272 | { | |
1273 | if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) | |
1274 | return; | |
1275 | ||
b9541d6b | 1276 | mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f | 1277 | if (buf->b_hdr->b_freeze_cksum != NULL) { |
b9541d6b | 1278 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1279 | return; |
1280 | } | |
96c080cb | 1281 | buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); |
34dc7c2f BB |
1282 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, |
1283 | buf->b_hdr->b_freeze_cksum); | |
b9541d6b | 1284 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
498877ba MA |
1285 | arc_buf_watch(buf); |
1286 | } | |
1287 | ||
1288 | #ifndef _KERNEL | |
1289 | void | |
1290 | arc_buf_sigsegv(int sig, siginfo_t *si, void *unused) | |
1291 | { | |
1292 | panic("Got SIGSEGV at address: 0x%lx\n", (long) si->si_addr); | |
1293 | } | |
1294 | #endif | |
1295 | ||
1296 | /* ARGSUSED */ | |
1297 | static void | |
1298 | arc_buf_unwatch(arc_buf_t *buf) | |
1299 | { | |
1300 | #ifndef _KERNEL | |
1301 | if (arc_watch) { | |
1302 | ASSERT0(mprotect(buf->b_data, buf->b_hdr->b_size, | |
1303 | PROT_READ | PROT_WRITE)); | |
1304 | } | |
1305 | #endif | |
1306 | } | |
1307 | ||
1308 | /* ARGSUSED */ | |
1309 | static void | |
1310 | arc_buf_watch(arc_buf_t *buf) | |
1311 | { | |
1312 | #ifndef _KERNEL | |
1313 | if (arc_watch) | |
1314 | ASSERT0(mprotect(buf->b_data, buf->b_hdr->b_size, PROT_READ)); | |
1315 | #endif | |
34dc7c2f BB |
1316 | } |
1317 | ||
b9541d6b CW |
1318 | static arc_buf_contents_t |
1319 | arc_buf_type(arc_buf_hdr_t *hdr) | |
1320 | { | |
1321 | if (HDR_ISTYPE_METADATA(hdr)) { | |
1322 | return (ARC_BUFC_METADATA); | |
1323 | } else { | |
1324 | return (ARC_BUFC_DATA); | |
1325 | } | |
1326 | } | |
1327 | ||
1328 | static uint32_t | |
1329 | arc_bufc_to_flags(arc_buf_contents_t type) | |
1330 | { | |
1331 | switch (type) { | |
1332 | case ARC_BUFC_DATA: | |
1333 | /* metadata field is 0 if buffer contains normal data */ | |
1334 | return (0); | |
1335 | case ARC_BUFC_METADATA: | |
1336 | return (ARC_FLAG_BUFC_METADATA); | |
1337 | default: | |
1338 | break; | |
1339 | } | |
1340 | panic("undefined ARC buffer type!"); | |
1341 | return ((uint32_t)-1); | |
1342 | } | |
1343 | ||
34dc7c2f BB |
1344 | void |
1345 | arc_buf_thaw(arc_buf_t *buf) | |
1346 | { | |
1347 | if (zfs_flags & ZFS_DEBUG_MODIFY) { | |
b9541d6b | 1348 | if (buf->b_hdr->b_l1hdr.b_state != arc_anon) |
34dc7c2f | 1349 | panic("modifying non-anon buffer!"); |
b9541d6b | 1350 | if (HDR_IO_IN_PROGRESS(buf->b_hdr)) |
34dc7c2f BB |
1351 | panic("modifying buffer while i/o in progress!"); |
1352 | arc_cksum_verify(buf); | |
1353 | } | |
1354 | ||
b9541d6b | 1355 | mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
1356 | if (buf->b_hdr->b_freeze_cksum != NULL) { |
1357 | kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
1358 | buf->b_hdr->b_freeze_cksum = NULL; | |
1359 | } | |
428870ff | 1360 | |
b9541d6b | 1361 | mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock); |
498877ba MA |
1362 | |
1363 | arc_buf_unwatch(buf); | |
34dc7c2f BB |
1364 | } |
1365 | ||
1366 | void | |
1367 | arc_buf_freeze(arc_buf_t *buf) | |
1368 | { | |
428870ff BB |
1369 | kmutex_t *hash_lock; |
1370 | ||
34dc7c2f BB |
1371 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) |
1372 | return; | |
1373 | ||
428870ff BB |
1374 | hash_lock = HDR_LOCK(buf->b_hdr); |
1375 | mutex_enter(hash_lock); | |
1376 | ||
34dc7c2f | 1377 | ASSERT(buf->b_hdr->b_freeze_cksum != NULL || |
b9541d6b | 1378 | buf->b_hdr->b_l1hdr.b_state == arc_anon); |
34dc7c2f | 1379 | arc_cksum_compute(buf, B_FALSE); |
428870ff | 1380 | mutex_exit(hash_lock); |
498877ba | 1381 | |
34dc7c2f BB |
1382 | } |
1383 | ||
1384 | static void | |
2a432414 | 1385 | add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) |
34dc7c2f | 1386 | { |
b9541d6b CW |
1387 | arc_state_t *state; |
1388 | ||
1389 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
34dc7c2f BB |
1390 | ASSERT(MUTEX_HELD(hash_lock)); |
1391 | ||
b9541d6b CW |
1392 | state = hdr->b_l1hdr.b_state; |
1393 | ||
1394 | if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && | |
1395 | (state != arc_anon)) { | |
1396 | /* We don't use the L2-only state list. */ | |
1397 | if (state != arc_l2c_only) { | |
ca0bf58d | 1398 | arc_buf_contents_t type = arc_buf_type(hdr); |
b9541d6b | 1399 | uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt; |
ca0bf58d PS |
1400 | multilist_t *list = &state->arcs_list[type]; |
1401 | uint64_t *size = &state->arcs_lsize[type]; | |
1402 | ||
1403 | multilist_remove(list, hdr); | |
b9541d6b | 1404 | |
b9541d6b CW |
1405 | if (GHOST_STATE(state)) { |
1406 | ASSERT0(hdr->b_l1hdr.b_datacnt); | |
1407 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); | |
1408 | delta = hdr->b_size; | |
1409 | } | |
1410 | ASSERT(delta > 0); | |
1411 | ASSERT3U(*size, >=, delta); | |
1412 | atomic_add_64(size, -delta); | |
34dc7c2f | 1413 | } |
b128c09f | 1414 | /* remove the prefetch flag if we get a reference */ |
b9541d6b | 1415 | hdr->b_flags &= ~ARC_FLAG_PREFETCH; |
34dc7c2f BB |
1416 | } |
1417 | } | |
1418 | ||
1419 | static int | |
2a432414 | 1420 | remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) |
34dc7c2f BB |
1421 | { |
1422 | int cnt; | |
b9541d6b | 1423 | arc_state_t *state = hdr->b_l1hdr.b_state; |
34dc7c2f | 1424 | |
b9541d6b | 1425 | ASSERT(HDR_HAS_L1HDR(hdr)); |
34dc7c2f BB |
1426 | ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); |
1427 | ASSERT(!GHOST_STATE(state)); | |
1428 | ||
b9541d6b CW |
1429 | /* |
1430 | * arc_l2c_only counts as a ghost state so we don't need to explicitly | |
1431 | * check to prevent usage of the arc_l2c_only list. | |
1432 | */ | |
1433 | if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && | |
34dc7c2f | 1434 | (state != arc_anon)) { |
ca0bf58d PS |
1435 | arc_buf_contents_t type = arc_buf_type(hdr); |
1436 | multilist_t *list = &state->arcs_list[type]; | |
1437 | uint64_t *size = &state->arcs_lsize[type]; | |
1438 | ||
1439 | multilist_insert(list, hdr); | |
34dc7c2f | 1440 | |
b9541d6b CW |
1441 | ASSERT(hdr->b_l1hdr.b_datacnt > 0); |
1442 | atomic_add_64(size, hdr->b_size * | |
1443 | hdr->b_l1hdr.b_datacnt); | |
34dc7c2f BB |
1444 | } |
1445 | return (cnt); | |
1446 | } | |
1447 | ||
e0b0ca98 BB |
1448 | /* |
1449 | * Returns detailed information about a specific arc buffer. When the | |
1450 | * state_index argument is set the function will calculate the arc header | |
1451 | * list position for its arc state. Since this requires a linear traversal | |
1452 | * callers are strongly encourage not to do this. However, it can be helpful | |
1453 | * for targeted analysis so the functionality is provided. | |
1454 | */ | |
1455 | void | |
1456 | arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index) | |
1457 | { | |
1458 | arc_buf_hdr_t *hdr = ab->b_hdr; | |
b9541d6b CW |
1459 | l1arc_buf_hdr_t *l1hdr = NULL; |
1460 | l2arc_buf_hdr_t *l2hdr = NULL; | |
1461 | arc_state_t *state = NULL; | |
1462 | ||
1463 | if (HDR_HAS_L1HDR(hdr)) { | |
1464 | l1hdr = &hdr->b_l1hdr; | |
1465 | state = l1hdr->b_state; | |
1466 | } | |
1467 | if (HDR_HAS_L2HDR(hdr)) | |
1468 | l2hdr = &hdr->b_l2hdr; | |
e0b0ca98 | 1469 | |
d1d7e268 | 1470 | memset(abi, 0, sizeof (arc_buf_info_t)); |
e0b0ca98 | 1471 | abi->abi_flags = hdr->b_flags; |
b9541d6b CW |
1472 | |
1473 | if (l1hdr) { | |
1474 | abi->abi_datacnt = l1hdr->b_datacnt; | |
1475 | abi->abi_access = l1hdr->b_arc_access; | |
1476 | abi->abi_mru_hits = l1hdr->b_mru_hits; | |
1477 | abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits; | |
1478 | abi->abi_mfu_hits = l1hdr->b_mfu_hits; | |
1479 | abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits; | |
1480 | abi->abi_holds = refcount_count(&l1hdr->b_refcnt); | |
1481 | } | |
1482 | ||
1483 | if (l2hdr) { | |
1484 | abi->abi_l2arc_dattr = l2hdr->b_daddr; | |
1485 | abi->abi_l2arc_asize = l2hdr->b_asize; | |
1486 | abi->abi_l2arc_compress = HDR_GET_COMPRESS(hdr); | |
1487 | abi->abi_l2arc_hits = l2hdr->b_hits; | |
1488 | } | |
1489 | ||
e0b0ca98 | 1490 | abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON; |
b9541d6b | 1491 | abi->abi_state_contents = arc_buf_type(hdr); |
e0b0ca98 | 1492 | abi->abi_size = hdr->b_size; |
e0b0ca98 BB |
1493 | } |
1494 | ||
34dc7c2f | 1495 | /* |
ca0bf58d | 1496 | * Move the supplied buffer to the indicated state. The hash lock |
34dc7c2f BB |
1497 | * for the buffer must be held by the caller. |
1498 | */ | |
1499 | static void | |
2a432414 GW |
1500 | arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, |
1501 | kmutex_t *hash_lock) | |
34dc7c2f | 1502 | { |
b9541d6b CW |
1503 | arc_state_t *old_state; |
1504 | int64_t refcnt; | |
1505 | uint32_t datacnt; | |
34dc7c2f | 1506 | uint64_t from_delta, to_delta; |
b9541d6b CW |
1507 | arc_buf_contents_t buftype = arc_buf_type(hdr); |
1508 | ||
1509 | /* | |
1510 | * We almost always have an L1 hdr here, since we call arc_hdr_realloc() | |
1511 | * in arc_read() when bringing a buffer out of the L2ARC. However, the | |
1512 | * L1 hdr doesn't always exist when we change state to arc_anon before | |
1513 | * destroying a header, in which case reallocating to add the L1 hdr is | |
1514 | * pointless. | |
1515 | */ | |
1516 | if (HDR_HAS_L1HDR(hdr)) { | |
1517 | old_state = hdr->b_l1hdr.b_state; | |
1518 | refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); | |
1519 | datacnt = hdr->b_l1hdr.b_datacnt; | |
1520 | } else { | |
1521 | old_state = arc_l2c_only; | |
1522 | refcnt = 0; | |
1523 | datacnt = 0; | |
1524 | } | |
34dc7c2f BB |
1525 | |
1526 | ASSERT(MUTEX_HELD(hash_lock)); | |
e8b96c60 | 1527 | ASSERT3P(new_state, !=, old_state); |
b9541d6b CW |
1528 | ASSERT(refcnt == 0 || datacnt > 0); |
1529 | ASSERT(!GHOST_STATE(new_state) || datacnt == 0); | |
1530 | ASSERT(old_state != arc_anon || datacnt <= 1); | |
34dc7c2f | 1531 | |
b9541d6b | 1532 | from_delta = to_delta = datacnt * hdr->b_size; |
34dc7c2f BB |
1533 | |
1534 | /* | |
1535 | * If this buffer is evictable, transfer it from the | |
1536 | * old state list to the new state list. | |
1537 | */ | |
1538 | if (refcnt == 0) { | |
b9541d6b | 1539 | if (old_state != arc_anon && old_state != arc_l2c_only) { |
b9541d6b | 1540 | uint64_t *size = &old_state->arcs_lsize[buftype]; |
34dc7c2f | 1541 | |
b9541d6b | 1542 | ASSERT(HDR_HAS_L1HDR(hdr)); |
ca0bf58d | 1543 | multilist_remove(&old_state->arcs_list[buftype], hdr); |
34dc7c2f BB |
1544 | |
1545 | /* | |
1546 | * If prefetching out of the ghost cache, | |
428870ff | 1547 | * we will have a non-zero datacnt. |
34dc7c2f | 1548 | */ |
b9541d6b | 1549 | if (GHOST_STATE(old_state) && datacnt == 0) { |
34dc7c2f | 1550 | /* ghost elements have a ghost size */ |
b9541d6b | 1551 | ASSERT(hdr->b_l1hdr.b_buf == NULL); |
2a432414 | 1552 | from_delta = hdr->b_size; |
34dc7c2f BB |
1553 | } |
1554 | ASSERT3U(*size, >=, from_delta); | |
1555 | atomic_add_64(size, -from_delta); | |
34dc7c2f | 1556 | } |
b9541d6b | 1557 | if (new_state != arc_anon && new_state != arc_l2c_only) { |
b9541d6b | 1558 | uint64_t *size = &new_state->arcs_lsize[buftype]; |
34dc7c2f | 1559 | |
b9541d6b CW |
1560 | /* |
1561 | * An L1 header always exists here, since if we're | |
1562 | * moving to some L1-cached state (i.e. not l2c_only or | |
1563 | * anonymous), we realloc the header to add an L1hdr | |
1564 | * beforehand. | |
1565 | */ | |
1566 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
ca0bf58d | 1567 | multilist_insert(&new_state->arcs_list[buftype], hdr); |
34dc7c2f BB |
1568 | |
1569 | /* ghost elements have a ghost size */ | |
1570 | if (GHOST_STATE(new_state)) { | |
b9541d6b CW |
1571 | ASSERT0(datacnt); |
1572 | ASSERT(hdr->b_l1hdr.b_buf == NULL); | |
2a432414 | 1573 | to_delta = hdr->b_size; |
34dc7c2f BB |
1574 | } |
1575 | atomic_add_64(size, to_delta); | |
34dc7c2f BB |
1576 | } |
1577 | } | |
1578 | ||
2a432414 GW |
1579 | ASSERT(!BUF_EMPTY(hdr)); |
1580 | if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) | |
1581 | buf_hash_remove(hdr); | |
34dc7c2f | 1582 | |
b9541d6b | 1583 | /* adjust state sizes (ignore arc_l2c_only) */ |
36da08ef PS |
1584 | |
1585 | if (to_delta && new_state != arc_l2c_only) { | |
1586 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
1587 | if (GHOST_STATE(new_state)) { | |
1588 | ASSERT0(datacnt); | |
1589 | ||
1590 | /* | |
1591 | * We moving a header to a ghost state, we first | |
1592 | * remove all arc buffers. Thus, we'll have a | |
1593 | * datacnt of zero, and no arc buffer to use for | |
1594 | * the reference. As a result, we use the arc | |
1595 | * header pointer for the reference. | |
1596 | */ | |
1597 | (void) refcount_add_many(&new_state->arcs_size, | |
1598 | hdr->b_size, hdr); | |
1599 | } else { | |
1600 | arc_buf_t *buf; | |
1601 | ASSERT3U(datacnt, !=, 0); | |
1602 | ||
1603 | /* | |
1604 | * Each individual buffer holds a unique reference, | |
1605 | * thus we must remove each of these references one | |
1606 | * at a time. | |
1607 | */ | |
1608 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; | |
1609 | buf = buf->b_next) { | |
1610 | (void) refcount_add_many(&new_state->arcs_size, | |
1611 | hdr->b_size, buf); | |
1612 | } | |
1613 | } | |
1614 | } | |
1615 | ||
b9541d6b | 1616 | if (from_delta && old_state != arc_l2c_only) { |
36da08ef PS |
1617 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1618 | if (GHOST_STATE(old_state)) { | |
1619 | /* | |
1620 | * When moving a header off of a ghost state, | |
1621 | * there's the possibility for datacnt to be | |
1622 | * non-zero. This is because we first add the | |
1623 | * arc buffer to the header prior to changing | |
1624 | * the header's state. Since we used the header | |
1625 | * for the reference when putting the header on | |
1626 | * the ghost state, we must balance that and use | |
1627 | * the header when removing off the ghost state | |
1628 | * (even though datacnt is non zero). | |
1629 | */ | |
1630 | ||
1631 | IMPLY(datacnt == 0, new_state == arc_anon || | |
1632 | new_state == arc_l2c_only); | |
1633 | ||
1634 | (void) refcount_remove_many(&old_state->arcs_size, | |
1635 | hdr->b_size, hdr); | |
1636 | } else { | |
1637 | arc_buf_t *buf; | |
1638 | ASSERT3U(datacnt, !=, 0); | |
1639 | ||
1640 | /* | |
1641 | * Each individual buffer holds a unique reference, | |
1642 | * thus we must remove each of these references one | |
1643 | * at a time. | |
1644 | */ | |
1645 | for (buf = hdr->b_l1hdr.b_buf; buf != NULL; | |
1646 | buf = buf->b_next) { | |
1647 | (void) refcount_remove_many( | |
1648 | &old_state->arcs_size, hdr->b_size, buf); | |
1649 | } | |
1650 | } | |
34dc7c2f | 1651 | } |
36da08ef | 1652 | |
b9541d6b CW |
1653 | if (HDR_HAS_L1HDR(hdr)) |
1654 | hdr->b_l1hdr.b_state = new_state; | |
34dc7c2f | 1655 | |
b9541d6b CW |
1656 | /* |
1657 | * L2 headers should never be on the L2 state list since they don't | |
1658 | * have L1 headers allocated. | |
1659 | */ | |
ca0bf58d PS |
1660 | ASSERT(multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && |
1661 | multilist_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); | |
34dc7c2f BB |
1662 | } |
1663 | ||
1664 | void | |
d164b209 | 1665 | arc_space_consume(uint64_t space, arc_space_type_t type) |
34dc7c2f | 1666 | { |
d164b209 BB |
1667 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
1668 | ||
1669 | switch (type) { | |
e75c13c3 BB |
1670 | default: |
1671 | break; | |
d164b209 BB |
1672 | case ARC_SPACE_DATA: |
1673 | ARCSTAT_INCR(arcstat_data_size, space); | |
1674 | break; | |
cc7f677c | 1675 | case ARC_SPACE_META: |
500445c0 | 1676 | ARCSTAT_INCR(arcstat_metadata_size, space); |
cc7f677c | 1677 | break; |
d164b209 BB |
1678 | case ARC_SPACE_OTHER: |
1679 | ARCSTAT_INCR(arcstat_other_size, space); | |
1680 | break; | |
1681 | case ARC_SPACE_HDRS: | |
1682 | ARCSTAT_INCR(arcstat_hdr_size, space); | |
1683 | break; | |
1684 | case ARC_SPACE_L2HDRS: | |
1685 | ARCSTAT_INCR(arcstat_l2_hdr_size, space); | |
1686 | break; | |
1687 | } | |
1688 | ||
500445c0 | 1689 | if (type != ARC_SPACE_DATA) |
cc7f677c PS |
1690 | ARCSTAT_INCR(arcstat_meta_used, space); |
1691 | ||
34dc7c2f BB |
1692 | atomic_add_64(&arc_size, space); |
1693 | } | |
1694 | ||
1695 | void | |
d164b209 | 1696 | arc_space_return(uint64_t space, arc_space_type_t type) |
34dc7c2f | 1697 | { |
d164b209 BB |
1698 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
1699 | ||
1700 | switch (type) { | |
e75c13c3 BB |
1701 | default: |
1702 | break; | |
d164b209 BB |
1703 | case ARC_SPACE_DATA: |
1704 | ARCSTAT_INCR(arcstat_data_size, -space); | |
1705 | break; | |
cc7f677c | 1706 | case ARC_SPACE_META: |
500445c0 | 1707 | ARCSTAT_INCR(arcstat_metadata_size, -space); |
cc7f677c | 1708 | break; |
d164b209 BB |
1709 | case ARC_SPACE_OTHER: |
1710 | ARCSTAT_INCR(arcstat_other_size, -space); | |
1711 | break; | |
1712 | case ARC_SPACE_HDRS: | |
1713 | ARCSTAT_INCR(arcstat_hdr_size, -space); | |
1714 | break; | |
1715 | case ARC_SPACE_L2HDRS: | |
1716 | ARCSTAT_INCR(arcstat_l2_hdr_size, -space); | |
1717 | break; | |
1718 | } | |
1719 | ||
cc7f677c PS |
1720 | if (type != ARC_SPACE_DATA) { |
1721 | ASSERT(arc_meta_used >= space); | |
500445c0 PS |
1722 | if (arc_meta_max < arc_meta_used) |
1723 | arc_meta_max = arc_meta_used; | |
cc7f677c PS |
1724 | ARCSTAT_INCR(arcstat_meta_used, -space); |
1725 | } | |
1726 | ||
34dc7c2f BB |
1727 | ASSERT(arc_size >= space); |
1728 | atomic_add_64(&arc_size, -space); | |
1729 | } | |
1730 | ||
34dc7c2f | 1731 | arc_buf_t * |
5f6d0b6f | 1732 | arc_buf_alloc(spa_t *spa, uint64_t size, void *tag, arc_buf_contents_t type) |
34dc7c2f BB |
1733 | { |
1734 | arc_buf_hdr_t *hdr; | |
1735 | arc_buf_t *buf; | |
1736 | ||
f1512ee6 | 1737 | VERIFY3U(size, <=, spa_maxblocksize(spa)); |
b9541d6b | 1738 | hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); |
34dc7c2f | 1739 | ASSERT(BUF_EMPTY(hdr)); |
b9541d6b | 1740 | ASSERT3P(hdr->b_freeze_cksum, ==, NULL); |
34dc7c2f | 1741 | hdr->b_size = size; |
3541dc6d | 1742 | hdr->b_spa = spa_load_guid(spa); |
b9541d6b CW |
1743 | hdr->b_l1hdr.b_mru_hits = 0; |
1744 | hdr->b_l1hdr.b_mru_ghost_hits = 0; | |
1745 | hdr->b_l1hdr.b_mfu_hits = 0; | |
1746 | hdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
1747 | hdr->b_l1hdr.b_l2_hits = 0; | |
1748 | ||
34dc7c2f BB |
1749 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
1750 | buf->b_hdr = hdr; | |
1751 | buf->b_data = NULL; | |
1752 | buf->b_efunc = NULL; | |
1753 | buf->b_private = NULL; | |
1754 | buf->b_next = NULL; | |
b9541d6b CW |
1755 | |
1756 | hdr->b_flags = arc_bufc_to_flags(type); | |
1757 | hdr->b_flags |= ARC_FLAG_HAS_L1HDR; | |
1758 | ||
1759 | hdr->b_l1hdr.b_buf = buf; | |
1760 | hdr->b_l1hdr.b_state = arc_anon; | |
1761 | hdr->b_l1hdr.b_arc_access = 0; | |
1762 | hdr->b_l1hdr.b_datacnt = 1; | |
ca0bf58d | 1763 | hdr->b_l1hdr.b_tmp_cdata = NULL; |
b9541d6b | 1764 | |
34dc7c2f | 1765 | arc_get_data_buf(buf); |
b9541d6b CW |
1766 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
1767 | (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); | |
34dc7c2f BB |
1768 | |
1769 | return (buf); | |
1770 | } | |
1771 | ||
9babb374 BB |
1772 | static char *arc_onloan_tag = "onloan"; |
1773 | ||
1774 | /* | |
1775 | * Loan out an anonymous arc buffer. Loaned buffers are not counted as in | |
1776 | * flight data by arc_tempreserve_space() until they are "returned". Loaned | |
1777 | * buffers must be returned to the arc before they can be used by the DMU or | |
1778 | * freed. | |
1779 | */ | |
1780 | arc_buf_t * | |
5f6d0b6f | 1781 | arc_loan_buf(spa_t *spa, uint64_t size) |
9babb374 BB |
1782 | { |
1783 | arc_buf_t *buf; | |
1784 | ||
1785 | buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); | |
1786 | ||
1787 | atomic_add_64(&arc_loaned_bytes, size); | |
1788 | return (buf); | |
1789 | } | |
1790 | ||
1791 | /* | |
1792 | * Return a loaned arc buffer to the arc. | |
1793 | */ | |
1794 | void | |
1795 | arc_return_buf(arc_buf_t *buf, void *tag) | |
1796 | { | |
1797 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1798 | ||
9babb374 | 1799 | ASSERT(buf->b_data != NULL); |
b9541d6b CW |
1800 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1801 | (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); | |
1802 | (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); | |
9babb374 BB |
1803 | |
1804 | atomic_add_64(&arc_loaned_bytes, -hdr->b_size); | |
1805 | } | |
1806 | ||
428870ff BB |
1807 | /* Detach an arc_buf from a dbuf (tag) */ |
1808 | void | |
1809 | arc_loan_inuse_buf(arc_buf_t *buf, void *tag) | |
1810 | { | |
b9541d6b | 1811 | arc_buf_hdr_t *hdr = buf->b_hdr; |
428870ff BB |
1812 | |
1813 | ASSERT(buf->b_data != NULL); | |
b9541d6b CW |
1814 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1815 | (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); | |
1816 | (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); | |
428870ff BB |
1817 | buf->b_efunc = NULL; |
1818 | buf->b_private = NULL; | |
1819 | ||
1820 | atomic_add_64(&arc_loaned_bytes, hdr->b_size); | |
1821 | } | |
1822 | ||
34dc7c2f BB |
1823 | static arc_buf_t * |
1824 | arc_buf_clone(arc_buf_t *from) | |
1825 | { | |
1826 | arc_buf_t *buf; | |
1827 | arc_buf_hdr_t *hdr = from->b_hdr; | |
1828 | uint64_t size = hdr->b_size; | |
1829 | ||
b9541d6b CW |
1830 | ASSERT(HDR_HAS_L1HDR(hdr)); |
1831 | ASSERT(hdr->b_l1hdr.b_state != arc_anon); | |
428870ff | 1832 | |
34dc7c2f BB |
1833 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
1834 | buf->b_hdr = hdr; | |
1835 | buf->b_data = NULL; | |
1836 | buf->b_efunc = NULL; | |
1837 | buf->b_private = NULL; | |
b9541d6b CW |
1838 | buf->b_next = hdr->b_l1hdr.b_buf; |
1839 | hdr->b_l1hdr.b_buf = buf; | |
34dc7c2f BB |
1840 | arc_get_data_buf(buf); |
1841 | bcopy(from->b_data, buf->b_data, size); | |
1eb5bfa3 GW |
1842 | |
1843 | /* | |
1844 | * This buffer already exists in the arc so create a duplicate | |
1845 | * copy for the caller. If the buffer is associated with user data | |
1846 | * then track the size and number of duplicates. These stats will be | |
1847 | * updated as duplicate buffers are created and destroyed. | |
1848 | */ | |
b9541d6b | 1849 | if (HDR_ISTYPE_DATA(hdr)) { |
1eb5bfa3 GW |
1850 | ARCSTAT_BUMP(arcstat_duplicate_buffers); |
1851 | ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); | |
1852 | } | |
b9541d6b | 1853 | hdr->b_l1hdr.b_datacnt += 1; |
34dc7c2f BB |
1854 | return (buf); |
1855 | } | |
1856 | ||
1857 | void | |
1858 | arc_buf_add_ref(arc_buf_t *buf, void* tag) | |
1859 | { | |
1860 | arc_buf_hdr_t *hdr; | |
1861 | kmutex_t *hash_lock; | |
1862 | ||
1863 | /* | |
b128c09f BB |
1864 | * Check to see if this buffer is evicted. Callers |
1865 | * must verify b_data != NULL to know if the add_ref | |
1866 | * was successful. | |
34dc7c2f | 1867 | */ |
428870ff | 1868 | mutex_enter(&buf->b_evict_lock); |
b128c09f | 1869 | if (buf->b_data == NULL) { |
428870ff | 1870 | mutex_exit(&buf->b_evict_lock); |
34dc7c2f BB |
1871 | return; |
1872 | } | |
428870ff | 1873 | hash_lock = HDR_LOCK(buf->b_hdr); |
34dc7c2f | 1874 | mutex_enter(hash_lock); |
428870ff | 1875 | hdr = buf->b_hdr; |
b9541d6b | 1876 | ASSERT(HDR_HAS_L1HDR(hdr)); |
428870ff BB |
1877 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); |
1878 | mutex_exit(&buf->b_evict_lock); | |
34dc7c2f | 1879 | |
b9541d6b CW |
1880 | ASSERT(hdr->b_l1hdr.b_state == arc_mru || |
1881 | hdr->b_l1hdr.b_state == arc_mfu); | |
1882 | ||
34dc7c2f | 1883 | add_reference(hdr, hash_lock, tag); |
d164b209 | 1884 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); |
34dc7c2f BB |
1885 | arc_access(hdr, hash_lock); |
1886 | mutex_exit(hash_lock); | |
1887 | ARCSTAT_BUMP(arcstat_hits); | |
b9541d6b CW |
1888 | ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), |
1889 | demand, prefetch, !HDR_ISTYPE_METADATA(hdr), | |
34dc7c2f BB |
1890 | data, metadata, hits); |
1891 | } | |
1892 | ||
ca0bf58d PS |
1893 | static void |
1894 | arc_buf_free_on_write(void *data, size_t size, | |
1895 | void (*free_func)(void *, size_t)) | |
1896 | { | |
1897 | l2arc_data_free_t *df; | |
1898 | ||
1899 | df = kmem_alloc(sizeof (*df), KM_SLEEP); | |
1900 | df->l2df_data = data; | |
1901 | df->l2df_size = size; | |
1902 | df->l2df_func = free_func; | |
1903 | mutex_enter(&l2arc_free_on_write_mtx); | |
1904 | list_insert_head(l2arc_free_on_write, df); | |
1905 | mutex_exit(&l2arc_free_on_write_mtx); | |
1906 | } | |
1907 | ||
34dc7c2f BB |
1908 | /* |
1909 | * Free the arc data buffer. If it is an l2arc write in progress, | |
1910 | * the buffer is placed on l2arc_free_on_write to be freed later. | |
1911 | */ | |
1912 | static void | |
498877ba | 1913 | arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) |
34dc7c2f | 1914 | { |
498877ba MA |
1915 | arc_buf_hdr_t *hdr = buf->b_hdr; |
1916 | ||
34dc7c2f | 1917 | if (HDR_L2_WRITING(hdr)) { |
ca0bf58d | 1918 | arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func); |
34dc7c2f BB |
1919 | ARCSTAT_BUMP(arcstat_l2_free_on_write); |
1920 | } else { | |
498877ba | 1921 | free_func(buf->b_data, hdr->b_size); |
34dc7c2f BB |
1922 | } |
1923 | } | |
1924 | ||
ca0bf58d PS |
1925 | static void |
1926 | arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr) | |
1927 | { | |
1928 | ASSERT(HDR_HAS_L2HDR(hdr)); | |
1929 | ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx)); | |
1930 | ||
1931 | /* | |
1932 | * The b_tmp_cdata field is linked off of the b_l1hdr, so if | |
1933 | * that doesn't exist, the header is in the arc_l2c_only state, | |
1934 | * and there isn't anything to free (it's already been freed). | |
1935 | */ | |
1936 | if (!HDR_HAS_L1HDR(hdr)) | |
1937 | return; | |
1938 | ||
1939 | /* | |
1940 | * The header isn't being written to the l2arc device, thus it | |
1941 | * shouldn't have a b_tmp_cdata to free. | |
1942 | */ | |
1943 | if (!HDR_L2_WRITING(hdr)) { | |
1944 | ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
1945 | return; | |
1946 | } | |
1947 | ||
1948 | /* | |
1949 | * The header does not have compression enabled. This can be due | |
1950 | * to the buffer not being compressible, or because we're | |
1951 | * freeing the buffer before the second phase of | |
1952 | * l2arc_write_buffer() has started (which does the compression | |
1953 | * step). In either case, b_tmp_cdata does not point to a | |
1954 | * separately compressed buffer, so there's nothing to free (it | |
1955 | * points to the same buffer as the arc_buf_t's b_data field). | |
1956 | */ | |
1957 | if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) { | |
1958 | hdr->b_l1hdr.b_tmp_cdata = NULL; | |
1959 | return; | |
1960 | } | |
1961 | ||
1962 | /* | |
1963 | * There's nothing to free since the buffer was all zero's and | |
1964 | * compressed to a zero length buffer. | |
1965 | */ | |
1966 | if (HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_EMPTY) { | |
1967 | ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
1968 | return; | |
1969 | } | |
1970 | ||
1971 | ASSERT(L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr))); | |
1972 | ||
1973 | arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata, | |
1974 | hdr->b_size, zio_data_buf_free); | |
1975 | ||
1976 | ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write); | |
1977 | hdr->b_l1hdr.b_tmp_cdata = NULL; | |
1978 | } | |
1979 | ||
bd089c54 MA |
1980 | /* |
1981 | * Free up buf->b_data and if 'remove' is set, then pull the | |
1982 | * arc_buf_t off of the the arc_buf_hdr_t's list and free it. | |
1983 | */ | |
34dc7c2f | 1984 | static void |
ca0bf58d | 1985 | arc_buf_destroy(arc_buf_t *buf, boolean_t remove) |
34dc7c2f BB |
1986 | { |
1987 | arc_buf_t **bufp; | |
1988 | ||
1989 | /* free up data associated with the buf */ | |
b9541d6b CW |
1990 | if (buf->b_data != NULL) { |
1991 | arc_state_t *state = buf->b_hdr->b_l1hdr.b_state; | |
34dc7c2f | 1992 | uint64_t size = buf->b_hdr->b_size; |
b9541d6b | 1993 | arc_buf_contents_t type = arc_buf_type(buf->b_hdr); |
34dc7c2f BB |
1994 | |
1995 | arc_cksum_verify(buf); | |
498877ba | 1996 | arc_buf_unwatch(buf); |
428870ff | 1997 | |
ca0bf58d PS |
1998 | if (type == ARC_BUFC_METADATA) { |
1999 | arc_buf_data_free(buf, zio_buf_free); | |
2000 | arc_space_return(size, ARC_SPACE_META); | |
2001 | } else { | |
2002 | ASSERT(type == ARC_BUFC_DATA); | |
2003 | arc_buf_data_free(buf, zio_data_buf_free); | |
2004 | arc_space_return(size, ARC_SPACE_DATA); | |
34dc7c2f | 2005 | } |
ca0bf58d PS |
2006 | |
2007 | /* protected by hash lock, if in the hash table */ | |
2008 | if (multilist_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) { | |
34dc7c2f BB |
2009 | uint64_t *cnt = &state->arcs_lsize[type]; |
2010 | ||
b9541d6b CW |
2011 | ASSERT(refcount_is_zero( |
2012 | &buf->b_hdr->b_l1hdr.b_refcnt)); | |
2013 | ASSERT(state != arc_anon && state != arc_l2c_only); | |
34dc7c2f BB |
2014 | |
2015 | ASSERT3U(*cnt, >=, size); | |
2016 | atomic_add_64(cnt, -size); | |
2017 | } | |
36da08ef PS |
2018 | |
2019 | (void) refcount_remove_many(&state->arcs_size, size, buf); | |
34dc7c2f | 2020 | buf->b_data = NULL; |
1eb5bfa3 GW |
2021 | |
2022 | /* | |
2023 | * If we're destroying a duplicate buffer make sure | |
2024 | * that the appropriate statistics are updated. | |
2025 | */ | |
b9541d6b CW |
2026 | if (buf->b_hdr->b_l1hdr.b_datacnt > 1 && |
2027 | HDR_ISTYPE_DATA(buf->b_hdr)) { | |
1eb5bfa3 GW |
2028 | ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); |
2029 | ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); | |
2030 | } | |
b9541d6b CW |
2031 | ASSERT(buf->b_hdr->b_l1hdr.b_datacnt > 0); |
2032 | buf->b_hdr->b_l1hdr.b_datacnt -= 1; | |
34dc7c2f BB |
2033 | } |
2034 | ||
2035 | /* only remove the buf if requested */ | |
bd089c54 | 2036 | if (!remove) |
34dc7c2f BB |
2037 | return; |
2038 | ||
2039 | /* remove the buf from the hdr list */ | |
b9541d6b CW |
2040 | for (bufp = &buf->b_hdr->b_l1hdr.b_buf; *bufp != buf; |
2041 | bufp = &(*bufp)->b_next) | |
34dc7c2f BB |
2042 | continue; |
2043 | *bufp = buf->b_next; | |
428870ff | 2044 | buf->b_next = NULL; |
34dc7c2f BB |
2045 | |
2046 | ASSERT(buf->b_efunc == NULL); | |
2047 | ||
2048 | /* clean up the buf */ | |
2049 | buf->b_hdr = NULL; | |
2050 | kmem_cache_free(buf_cache, buf); | |
2051 | } | |
2052 | ||
d962d5da PS |
2053 | static void |
2054 | arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) | |
2055 | { | |
2056 | l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; | |
2057 | l2arc_dev_t *dev = l2hdr->b_dev; | |
2058 | ||
2059 | ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); | |
2060 | ASSERT(HDR_HAS_L2HDR(hdr)); | |
2061 | ||
2062 | list_remove(&dev->l2ad_buflist, hdr); | |
2063 | ||
d962d5da PS |
2064 | /* |
2065 | * We don't want to leak the b_tmp_cdata buffer that was | |
2066 | * allocated in l2arc_write_buffers() | |
2067 | */ | |
2068 | arc_buf_l2_cdata_free(hdr); | |
2069 | ||
2070 | /* | |
2071 | * If the l2hdr's b_daddr is equal to L2ARC_ADDR_UNSET, then | |
2072 | * this header is being processed by l2arc_write_buffers() (i.e. | |
2073 | * it's in the first stage of l2arc_write_buffers()). | |
2074 | * Re-affirming that truth here, just to serve as a reminder. If | |
2075 | * b_daddr does not equal L2ARC_ADDR_UNSET, then the header may or | |
2076 | * may not have its HDR_L2_WRITING flag set. (the write may have | |
2077 | * completed, in which case HDR_L2_WRITING will be false and the | |
2078 | * b_daddr field will point to the address of the buffer on disk). | |
2079 | */ | |
2080 | IMPLY(l2hdr->b_daddr == L2ARC_ADDR_UNSET, HDR_L2_WRITING(hdr)); | |
2081 | ||
2082 | /* | |
2083 | * If b_daddr is equal to L2ARC_ADDR_UNSET, we're racing with | |
2084 | * l2arc_write_buffers(). Since we've just removed this header | |
2085 | * from the l2arc buffer list, this header will never reach the | |
2086 | * second stage of l2arc_write_buffers(), which increments the | |
2087 | * accounting stats for this header. Thus, we must be careful | |
2088 | * not to decrement them for this header either. | |
2089 | */ | |
2090 | if (l2hdr->b_daddr != L2ARC_ADDR_UNSET) { | |
2091 | ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); | |
2092 | ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); | |
2093 | ||
2094 | vdev_space_update(dev->l2ad_vdev, | |
2095 | -l2hdr->b_asize, 0, 0); | |
2096 | ||
2097 | (void) refcount_remove_many(&dev->l2ad_alloc, | |
2098 | l2hdr->b_asize, hdr); | |
2099 | } | |
2100 | ||
2101 | hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR; | |
2102 | } | |
2103 | ||
34dc7c2f BB |
2104 | static void |
2105 | arc_hdr_destroy(arc_buf_hdr_t *hdr) | |
2106 | { | |
b9541d6b CW |
2107 | if (HDR_HAS_L1HDR(hdr)) { |
2108 | ASSERT(hdr->b_l1hdr.b_buf == NULL || | |
2109 | hdr->b_l1hdr.b_datacnt > 0); | |
2110 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
2111 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); | |
2112 | } | |
34dc7c2f | 2113 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
b9541d6b CW |
2114 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); |
2115 | ||
2116 | if (HDR_HAS_L2HDR(hdr)) { | |
d962d5da PS |
2117 | l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; |
2118 | boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); | |
428870ff | 2119 | |
d962d5da PS |
2120 | if (!buflist_held) |
2121 | mutex_enter(&dev->l2ad_mtx); | |
b9541d6b | 2122 | |
ca0bf58d | 2123 | /* |
d962d5da PS |
2124 | * Even though we checked this conditional above, we |
2125 | * need to check this again now that we have the | |
2126 | * l2ad_mtx. This is because we could be racing with | |
2127 | * another thread calling l2arc_evict() which might have | |
2128 | * destroyed this header's L2 portion as we were waiting | |
2129 | * to acquire the l2ad_mtx. If that happens, we don't | |
2130 | * want to re-destroy the header's L2 portion. | |
ca0bf58d | 2131 | */ |
d962d5da PS |
2132 | if (HDR_HAS_L2HDR(hdr)) |
2133 | arc_hdr_l2hdr_destroy(hdr); | |
428870ff BB |
2134 | |
2135 | if (!buflist_held) | |
d962d5da | 2136 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
2137 | } |
2138 | ||
b9541d6b | 2139 | if (!BUF_EMPTY(hdr)) |
428870ff | 2140 | buf_discard_identity(hdr); |
b9541d6b | 2141 | |
34dc7c2f BB |
2142 | if (hdr->b_freeze_cksum != NULL) { |
2143 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
2144 | hdr->b_freeze_cksum = NULL; | |
2145 | } | |
2146 | ||
b9541d6b CW |
2147 | if (HDR_HAS_L1HDR(hdr)) { |
2148 | while (hdr->b_l1hdr.b_buf) { | |
2149 | arc_buf_t *buf = hdr->b_l1hdr.b_buf; | |
2150 | ||
2151 | if (buf->b_efunc != NULL) { | |
ca0bf58d | 2152 | mutex_enter(&arc_user_evicts_lock); |
b9541d6b CW |
2153 | mutex_enter(&buf->b_evict_lock); |
2154 | ASSERT(buf->b_hdr != NULL); | |
ca0bf58d | 2155 | arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE); |
b9541d6b CW |
2156 | hdr->b_l1hdr.b_buf = buf->b_next; |
2157 | buf->b_hdr = &arc_eviction_hdr; | |
2158 | buf->b_next = arc_eviction_list; | |
2159 | arc_eviction_list = buf; | |
2160 | mutex_exit(&buf->b_evict_lock); | |
ca0bf58d PS |
2161 | cv_signal(&arc_user_evicts_cv); |
2162 | mutex_exit(&arc_user_evicts_lock); | |
b9541d6b | 2163 | } else { |
ca0bf58d | 2164 | arc_buf_destroy(hdr->b_l1hdr.b_buf, TRUE); |
b9541d6b CW |
2165 | } |
2166 | } | |
2167 | } | |
2168 | ||
34dc7c2f | 2169 | ASSERT3P(hdr->b_hash_next, ==, NULL); |
b9541d6b | 2170 | if (HDR_HAS_L1HDR(hdr)) { |
ca0bf58d | 2171 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); |
b9541d6b CW |
2172 | ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); |
2173 | kmem_cache_free(hdr_full_cache, hdr); | |
2174 | } else { | |
2175 | kmem_cache_free(hdr_l2only_cache, hdr); | |
2176 | } | |
34dc7c2f BB |
2177 | } |
2178 | ||
2179 | void | |
2180 | arc_buf_free(arc_buf_t *buf, void *tag) | |
2181 | { | |
2182 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
b9541d6b | 2183 | int hashed = hdr->b_l1hdr.b_state != arc_anon; |
34dc7c2f BB |
2184 | |
2185 | ASSERT(buf->b_efunc == NULL); | |
2186 | ASSERT(buf->b_data != NULL); | |
2187 | ||
2188 | if (hashed) { | |
2189 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
2190 | ||
2191 | mutex_enter(hash_lock); | |
428870ff BB |
2192 | hdr = buf->b_hdr; |
2193 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); | |
2194 | ||
34dc7c2f | 2195 | (void) remove_reference(hdr, hash_lock, tag); |
b9541d6b | 2196 | if (hdr->b_l1hdr.b_datacnt > 1) { |
ca0bf58d | 2197 | arc_buf_destroy(buf, TRUE); |
428870ff | 2198 | } else { |
b9541d6b | 2199 | ASSERT(buf == hdr->b_l1hdr.b_buf); |
428870ff | 2200 | ASSERT(buf->b_efunc == NULL); |
2a432414 | 2201 | hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; |
428870ff | 2202 | } |
34dc7c2f BB |
2203 | mutex_exit(hash_lock); |
2204 | } else if (HDR_IO_IN_PROGRESS(hdr)) { | |
2205 | int destroy_hdr; | |
2206 | /* | |
2207 | * We are in the middle of an async write. Don't destroy | |
2208 | * this buffer unless the write completes before we finish | |
2209 | * decrementing the reference count. | |
2210 | */ | |
ca0bf58d | 2211 | mutex_enter(&arc_user_evicts_lock); |
34dc7c2f | 2212 | (void) remove_reference(hdr, NULL, tag); |
b9541d6b | 2213 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
34dc7c2f | 2214 | destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); |
ca0bf58d | 2215 | mutex_exit(&arc_user_evicts_lock); |
34dc7c2f BB |
2216 | if (destroy_hdr) |
2217 | arc_hdr_destroy(hdr); | |
2218 | } else { | |
428870ff | 2219 | if (remove_reference(hdr, NULL, tag) > 0) |
ca0bf58d | 2220 | arc_buf_destroy(buf, TRUE); |
428870ff | 2221 | else |
34dc7c2f | 2222 | arc_hdr_destroy(hdr); |
34dc7c2f BB |
2223 | } |
2224 | } | |
2225 | ||
13fe0198 | 2226 | boolean_t |
34dc7c2f BB |
2227 | arc_buf_remove_ref(arc_buf_t *buf, void* tag) |
2228 | { | |
2229 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
96c080cb | 2230 | kmutex_t *hash_lock = HDR_LOCK(hdr); |
13fe0198 | 2231 | boolean_t no_callback = (buf->b_efunc == NULL); |
34dc7c2f | 2232 | |
b9541d6b CW |
2233 | if (hdr->b_l1hdr.b_state == arc_anon) { |
2234 | ASSERT(hdr->b_l1hdr.b_datacnt == 1); | |
34dc7c2f BB |
2235 | arc_buf_free(buf, tag); |
2236 | return (no_callback); | |
2237 | } | |
2238 | ||
2239 | mutex_enter(hash_lock); | |
428870ff | 2240 | hdr = buf->b_hdr; |
b9541d6b | 2241 | ASSERT(hdr->b_l1hdr.b_datacnt > 0); |
428870ff | 2242 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); |
b9541d6b | 2243 | ASSERT(hdr->b_l1hdr.b_state != arc_anon); |
34dc7c2f BB |
2244 | ASSERT(buf->b_data != NULL); |
2245 | ||
2246 | (void) remove_reference(hdr, hash_lock, tag); | |
b9541d6b | 2247 | if (hdr->b_l1hdr.b_datacnt > 1) { |
34dc7c2f | 2248 | if (no_callback) |
ca0bf58d | 2249 | arc_buf_destroy(buf, TRUE); |
34dc7c2f | 2250 | } else if (no_callback) { |
b9541d6b | 2251 | ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL); |
428870ff | 2252 | ASSERT(buf->b_efunc == NULL); |
2a432414 | 2253 | hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; |
34dc7c2f | 2254 | } |
b9541d6b CW |
2255 | ASSERT(no_callback || hdr->b_l1hdr.b_datacnt > 1 || |
2256 | refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
34dc7c2f BB |
2257 | mutex_exit(hash_lock); |
2258 | return (no_callback); | |
2259 | } | |
2260 | ||
5f6d0b6f | 2261 | uint64_t |
34dc7c2f BB |
2262 | arc_buf_size(arc_buf_t *buf) |
2263 | { | |
2264 | return (buf->b_hdr->b_size); | |
2265 | } | |
2266 | ||
1eb5bfa3 GW |
2267 | /* |
2268 | * Called from the DMU to determine if the current buffer should be | |
2269 | * evicted. In order to ensure proper locking, the eviction must be initiated | |
2270 | * from the DMU. Return true if the buffer is associated with user data and | |
2271 | * duplicate buffers still exist. | |
2272 | */ | |
2273 | boolean_t | |
2274 | arc_buf_eviction_needed(arc_buf_t *buf) | |
2275 | { | |
2276 | arc_buf_hdr_t *hdr; | |
2277 | boolean_t evict_needed = B_FALSE; | |
2278 | ||
2279 | if (zfs_disable_dup_eviction) | |
2280 | return (B_FALSE); | |
2281 | ||
2282 | mutex_enter(&buf->b_evict_lock); | |
2283 | hdr = buf->b_hdr; | |
2284 | if (hdr == NULL) { | |
2285 | /* | |
2286 | * We are in arc_do_user_evicts(); let that function | |
2287 | * perform the eviction. | |
2288 | */ | |
2289 | ASSERT(buf->b_data == NULL); | |
2290 | mutex_exit(&buf->b_evict_lock); | |
2291 | return (B_FALSE); | |
2292 | } else if (buf->b_data == NULL) { | |
2293 | /* | |
2294 | * We have already been added to the arc eviction list; | |
2295 | * recommend eviction. | |
2296 | */ | |
2297 | ASSERT3P(hdr, ==, &arc_eviction_hdr); | |
2298 | mutex_exit(&buf->b_evict_lock); | |
2299 | return (B_TRUE); | |
2300 | } | |
2301 | ||
b9541d6b | 2302 | if (hdr->b_l1hdr.b_datacnt > 1 && HDR_ISTYPE_DATA(hdr)) |
1eb5bfa3 GW |
2303 | evict_needed = B_TRUE; |
2304 | ||
2305 | mutex_exit(&buf->b_evict_lock); | |
2306 | return (evict_needed); | |
2307 | } | |
2308 | ||
34dc7c2f | 2309 | /* |
ca0bf58d PS |
2310 | * Evict the arc_buf_hdr that is provided as a parameter. The resultant |
2311 | * state of the header is dependent on its state prior to entering this | |
2312 | * function. The following transitions are possible: | |
34dc7c2f | 2313 | * |
ca0bf58d PS |
2314 | * - arc_mru -> arc_mru_ghost |
2315 | * - arc_mfu -> arc_mfu_ghost | |
2316 | * - arc_mru_ghost -> arc_l2c_only | |
2317 | * - arc_mru_ghost -> deleted | |
2318 | * - arc_mfu_ghost -> arc_l2c_only | |
2319 | * - arc_mfu_ghost -> deleted | |
34dc7c2f | 2320 | */ |
ca0bf58d PS |
2321 | static int64_t |
2322 | arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) | |
34dc7c2f | 2323 | { |
ca0bf58d PS |
2324 | arc_state_t *evicted_state, *state; |
2325 | int64_t bytes_evicted = 0; | |
34dc7c2f | 2326 | |
ca0bf58d PS |
2327 | ASSERT(MUTEX_HELD(hash_lock)); |
2328 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
e8b96c60 | 2329 | |
ca0bf58d PS |
2330 | state = hdr->b_l1hdr.b_state; |
2331 | if (GHOST_STATE(state)) { | |
2332 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2333 | ASSERT(hdr->b_l1hdr.b_buf == NULL); | |
e8b96c60 MA |
2334 | |
2335 | /* | |
ca0bf58d PS |
2336 | * l2arc_write_buffers() relies on a header's L1 portion |
2337 | * (i.e. its b_tmp_cdata field) during its write phase. | |
2338 | * Thus, we cannot push a header onto the arc_l2c_only | |
2339 | * state (removing its L1 piece) until the header is | |
2340 | * done being written to the l2arc. | |
e8b96c60 | 2341 | */ |
ca0bf58d PS |
2342 | if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { |
2343 | ARCSTAT_BUMP(arcstat_evict_l2_skip); | |
2344 | return (bytes_evicted); | |
e8b96c60 MA |
2345 | } |
2346 | ||
ca0bf58d PS |
2347 | ARCSTAT_BUMP(arcstat_deleted); |
2348 | bytes_evicted += hdr->b_size; | |
428870ff | 2349 | |
ca0bf58d | 2350 | DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); |
428870ff | 2351 | |
ca0bf58d PS |
2352 | if (HDR_HAS_L2HDR(hdr)) { |
2353 | /* | |
2354 | * This buffer is cached on the 2nd Level ARC; | |
2355 | * don't destroy the header. | |
2356 | */ | |
2357 | arc_change_state(arc_l2c_only, hdr, hash_lock); | |
2358 | /* | |
2359 | * dropping from L1+L2 cached to L2-only, | |
2360 | * realloc to remove the L1 header. | |
2361 | */ | |
2362 | hdr = arc_hdr_realloc(hdr, hdr_full_cache, | |
2363 | hdr_l2only_cache); | |
34dc7c2f | 2364 | } else { |
ca0bf58d PS |
2365 | arc_change_state(arc_anon, hdr, hash_lock); |
2366 | arc_hdr_destroy(hdr); | |
34dc7c2f | 2367 | } |
ca0bf58d | 2368 | return (bytes_evicted); |
34dc7c2f BB |
2369 | } |
2370 | ||
ca0bf58d PS |
2371 | ASSERT(state == arc_mru || state == arc_mfu); |
2372 | evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
34dc7c2f | 2373 | |
ca0bf58d PS |
2374 | /* prefetch buffers have a minimum lifespan */ |
2375 | if (HDR_IO_IN_PROGRESS(hdr) || | |
2376 | ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && | |
2377 | ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < | |
2378 | arc_min_prefetch_lifespan)) { | |
2379 | ARCSTAT_BUMP(arcstat_evict_skip); | |
2380 | return (bytes_evicted); | |
da8ccd0e PS |
2381 | } |
2382 | ||
ca0bf58d PS |
2383 | ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); |
2384 | ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0); | |
2385 | while (hdr->b_l1hdr.b_buf) { | |
2386 | arc_buf_t *buf = hdr->b_l1hdr.b_buf; | |
2387 | if (!mutex_tryenter(&buf->b_evict_lock)) { | |
2388 | ARCSTAT_BUMP(arcstat_mutex_miss); | |
2389 | break; | |
2390 | } | |
2391 | if (buf->b_data != NULL) | |
2392 | bytes_evicted += hdr->b_size; | |
2393 | if (buf->b_efunc != NULL) { | |
2394 | mutex_enter(&arc_user_evicts_lock); | |
2395 | arc_buf_destroy(buf, FALSE); | |
2396 | hdr->b_l1hdr.b_buf = buf->b_next; | |
2397 | buf->b_hdr = &arc_eviction_hdr; | |
2398 | buf->b_next = arc_eviction_list; | |
2399 | arc_eviction_list = buf; | |
2400 | cv_signal(&arc_user_evicts_cv); | |
2401 | mutex_exit(&arc_user_evicts_lock); | |
2402 | mutex_exit(&buf->b_evict_lock); | |
2403 | } else { | |
2404 | mutex_exit(&buf->b_evict_lock); | |
2405 | arc_buf_destroy(buf, TRUE); | |
2406 | } | |
2407 | } | |
34dc7c2f | 2408 | |
ca0bf58d PS |
2409 | if (HDR_HAS_L2HDR(hdr)) { |
2410 | ARCSTAT_INCR(arcstat_evict_l2_cached, hdr->b_size); | |
2411 | } else { | |
2412 | if (l2arc_write_eligible(hdr->b_spa, hdr)) | |
2413 | ARCSTAT_INCR(arcstat_evict_l2_eligible, hdr->b_size); | |
2414 | else | |
2415 | ARCSTAT_INCR(arcstat_evict_l2_ineligible, hdr->b_size); | |
2416 | } | |
34dc7c2f | 2417 | |
ca0bf58d PS |
2418 | if (hdr->b_l1hdr.b_datacnt == 0) { |
2419 | arc_change_state(evicted_state, hdr, hash_lock); | |
2420 | ASSERT(HDR_IN_HASH_TABLE(hdr)); | |
2421 | hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE; | |
2422 | hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; | |
2423 | DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); | |
2424 | } | |
34dc7c2f | 2425 | |
ca0bf58d | 2426 | return (bytes_evicted); |
34dc7c2f BB |
2427 | } |
2428 | ||
ca0bf58d PS |
2429 | static uint64_t |
2430 | arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, | |
2431 | uint64_t spa, int64_t bytes) | |
34dc7c2f | 2432 | { |
ca0bf58d PS |
2433 | multilist_sublist_t *mls; |
2434 | uint64_t bytes_evicted = 0; | |
2435 | arc_buf_hdr_t *hdr; | |
34dc7c2f | 2436 | kmutex_t *hash_lock; |
ca0bf58d | 2437 | int evict_count = 0; |
34dc7c2f | 2438 | |
ca0bf58d | 2439 | ASSERT3P(marker, !=, NULL); |
96c080cb | 2440 | IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); |
ca0bf58d PS |
2441 | |
2442 | mls = multilist_sublist_lock(ml, idx); | |
572e2857 | 2443 | |
ca0bf58d PS |
2444 | for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; |
2445 | hdr = multilist_sublist_prev(mls, marker)) { | |
2446 | if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || | |
2447 | (evict_count >= zfs_arc_evict_batch_limit)) | |
2448 | break; | |
2449 | ||
2450 | /* | |
2451 | * To keep our iteration location, move the marker | |
2452 | * forward. Since we're not holding hdr's hash lock, we | |
2453 | * must be very careful and not remove 'hdr' from the | |
2454 | * sublist. Otherwise, other consumers might mistake the | |
2455 | * 'hdr' as not being on a sublist when they call the | |
2456 | * multilist_link_active() function (they all rely on | |
2457 | * the hash lock protecting concurrent insertions and | |
2458 | * removals). multilist_sublist_move_forward() was | |
2459 | * specifically implemented to ensure this is the case | |
2460 | * (only 'marker' will be removed and re-inserted). | |
2461 | */ | |
2462 | multilist_sublist_move_forward(mls, marker); | |
2463 | ||
2464 | /* | |
2465 | * The only case where the b_spa field should ever be | |
2466 | * zero, is the marker headers inserted by | |
2467 | * arc_evict_state(). It's possible for multiple threads | |
2468 | * to be calling arc_evict_state() concurrently (e.g. | |
2469 | * dsl_pool_close() and zio_inject_fault()), so we must | |
2470 | * skip any markers we see from these other threads. | |
2471 | */ | |
2a432414 | 2472 | if (hdr->b_spa == 0) |
572e2857 BB |
2473 | continue; |
2474 | ||
ca0bf58d PS |
2475 | /* we're only interested in evicting buffers of a certain spa */ |
2476 | if (spa != 0 && hdr->b_spa != spa) { | |
2477 | ARCSTAT_BUMP(arcstat_evict_skip); | |
428870ff | 2478 | continue; |
ca0bf58d PS |
2479 | } |
2480 | ||
2481 | hash_lock = HDR_LOCK(hdr); | |
e8b96c60 MA |
2482 | |
2483 | /* | |
ca0bf58d PS |
2484 | * We aren't calling this function from any code path |
2485 | * that would already be holding a hash lock, so we're | |
2486 | * asserting on this assumption to be defensive in case | |
2487 | * this ever changes. Without this check, it would be | |
2488 | * possible to incorrectly increment arcstat_mutex_miss | |
2489 | * below (e.g. if the code changed such that we called | |
2490 | * this function with a hash lock held). | |
e8b96c60 | 2491 | */ |
ca0bf58d PS |
2492 | ASSERT(!MUTEX_HELD(hash_lock)); |
2493 | ||
34dc7c2f | 2494 | if (mutex_tryenter(hash_lock)) { |
ca0bf58d PS |
2495 | uint64_t evicted = arc_evict_hdr(hdr, hash_lock); |
2496 | mutex_exit(hash_lock); | |
34dc7c2f | 2497 | |
ca0bf58d | 2498 | bytes_evicted += evicted; |
34dc7c2f | 2499 | |
572e2857 | 2500 | /* |
ca0bf58d PS |
2501 | * If evicted is zero, arc_evict_hdr() must have |
2502 | * decided to skip this header, don't increment | |
2503 | * evict_count in this case. | |
572e2857 | 2504 | */ |
ca0bf58d PS |
2505 | if (evicted != 0) |
2506 | evict_count++; | |
2507 | ||
2508 | /* | |
2509 | * If arc_size isn't overflowing, signal any | |
2510 | * threads that might happen to be waiting. | |
2511 | * | |
2512 | * For each header evicted, we wake up a single | |
2513 | * thread. If we used cv_broadcast, we could | |
2514 | * wake up "too many" threads causing arc_size | |
2515 | * to significantly overflow arc_c; since | |
2516 | * arc_get_data_buf() doesn't check for overflow | |
2517 | * when it's woken up (it doesn't because it's | |
2518 | * possible for the ARC to be overflowing while | |
2519 | * full of un-evictable buffers, and the | |
2520 | * function should proceed in this case). | |
2521 | * | |
2522 | * If threads are left sleeping, due to not | |
2523 | * using cv_broadcast, they will be woken up | |
2524 | * just before arc_reclaim_thread() sleeps. | |
2525 | */ | |
2526 | mutex_enter(&arc_reclaim_lock); | |
2527 | if (!arc_is_overflowing()) | |
2528 | cv_signal(&arc_reclaim_waiters_cv); | |
2529 | mutex_exit(&arc_reclaim_lock); | |
e8b96c60 | 2530 | } else { |
ca0bf58d | 2531 | ARCSTAT_BUMP(arcstat_mutex_miss); |
e8b96c60 | 2532 | } |
34dc7c2f | 2533 | } |
34dc7c2f | 2534 | |
ca0bf58d | 2535 | multilist_sublist_unlock(mls); |
34dc7c2f | 2536 | |
ca0bf58d | 2537 | return (bytes_evicted); |
34dc7c2f BB |
2538 | } |
2539 | ||
ca0bf58d PS |
2540 | /* |
2541 | * Evict buffers from the given arc state, until we've removed the | |
2542 | * specified number of bytes. Move the removed buffers to the | |
2543 | * appropriate evict state. | |
2544 | * | |
2545 | * This function makes a "best effort". It skips over any buffers | |
2546 | * it can't get a hash_lock on, and so, may not catch all candidates. | |
2547 | * It may also return without evicting as much space as requested. | |
2548 | * | |
2549 | * If bytes is specified using the special value ARC_EVICT_ALL, this | |
2550 | * will evict all available (i.e. unlocked and evictable) buffers from | |
2551 | * the given arc state; which is used by arc_flush(). | |
2552 | */ | |
2553 | static uint64_t | |
2554 | arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, | |
2555 | arc_buf_contents_t type) | |
34dc7c2f | 2556 | { |
ca0bf58d PS |
2557 | uint64_t total_evicted = 0; |
2558 | multilist_t *ml = &state->arcs_list[type]; | |
2559 | int num_sublists; | |
2560 | arc_buf_hdr_t **markers; | |
2561 | int i; | |
2562 | ||
96c080cb | 2563 | IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); |
ca0bf58d PS |
2564 | |
2565 | num_sublists = multilist_get_num_sublists(ml); | |
d164b209 BB |
2566 | |
2567 | /* | |
ca0bf58d PS |
2568 | * If we've tried to evict from each sublist, made some |
2569 | * progress, but still have not hit the target number of bytes | |
2570 | * to evict, we want to keep trying. The markers allow us to | |
2571 | * pick up where we left off for each individual sublist, rather | |
2572 | * than starting from the tail each time. | |
d164b209 | 2573 | */ |
ca0bf58d PS |
2574 | markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); |
2575 | for (i = 0; i < num_sublists; i++) { | |
2576 | multilist_sublist_t *mls; | |
34dc7c2f | 2577 | |
ca0bf58d PS |
2578 | markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); |
2579 | ||
2580 | /* | |
2581 | * A b_spa of 0 is used to indicate that this header is | |
2582 | * a marker. This fact is used in arc_adjust_type() and | |
2583 | * arc_evict_state_impl(). | |
2584 | */ | |
2585 | markers[i]->b_spa = 0; | |
34dc7c2f | 2586 | |
ca0bf58d PS |
2587 | mls = multilist_sublist_lock(ml, i); |
2588 | multilist_sublist_insert_tail(mls, markers[i]); | |
2589 | multilist_sublist_unlock(mls); | |
34dc7c2f BB |
2590 | } |
2591 | ||
d164b209 | 2592 | /* |
ca0bf58d PS |
2593 | * While we haven't hit our target number of bytes to evict, or |
2594 | * we're evicting all available buffers. | |
d164b209 | 2595 | */ |
ca0bf58d PS |
2596 | while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { |
2597 | /* | |
2598 | * Start eviction using a randomly selected sublist, | |
2599 | * this is to try and evenly balance eviction across all | |
2600 | * sublists. Always starting at the same sublist | |
2601 | * (e.g. index 0) would cause evictions to favor certain | |
2602 | * sublists over others. | |
2603 | */ | |
2604 | int sublist_idx = multilist_get_random_index(ml); | |
2605 | uint64_t scan_evicted = 0; | |
34dc7c2f | 2606 | |
ca0bf58d PS |
2607 | for (i = 0; i < num_sublists; i++) { |
2608 | uint64_t bytes_remaining; | |
2609 | uint64_t bytes_evicted; | |
d164b209 | 2610 | |
ca0bf58d PS |
2611 | if (bytes == ARC_EVICT_ALL) |
2612 | bytes_remaining = ARC_EVICT_ALL; | |
2613 | else if (total_evicted < bytes) | |
2614 | bytes_remaining = bytes - total_evicted; | |
2615 | else | |
2616 | break; | |
34dc7c2f | 2617 | |
ca0bf58d PS |
2618 | bytes_evicted = arc_evict_state_impl(ml, sublist_idx, |
2619 | markers[sublist_idx], spa, bytes_remaining); | |
2620 | ||
2621 | scan_evicted += bytes_evicted; | |
2622 | total_evicted += bytes_evicted; | |
2623 | ||
2624 | /* we've reached the end, wrap to the beginning */ | |
2625 | if (++sublist_idx >= num_sublists) | |
2626 | sublist_idx = 0; | |
2627 | } | |
2628 | ||
2629 | /* | |
2630 | * If we didn't evict anything during this scan, we have | |
2631 | * no reason to believe we'll evict more during another | |
2632 | * scan, so break the loop. | |
2633 | */ | |
2634 | if (scan_evicted == 0) { | |
2635 | /* This isn't possible, let's make that obvious */ | |
2636 | ASSERT3S(bytes, !=, 0); | |
34dc7c2f | 2637 | |
ca0bf58d PS |
2638 | /* |
2639 | * When bytes is ARC_EVICT_ALL, the only way to | |
2640 | * break the loop is when scan_evicted is zero. | |
2641 | * In that case, we actually have evicted enough, | |
2642 | * so we don't want to increment the kstat. | |
2643 | */ | |
2644 | if (bytes != ARC_EVICT_ALL) { | |
2645 | ASSERT3S(total_evicted, <, bytes); | |
2646 | ARCSTAT_BUMP(arcstat_evict_not_enough); | |
2647 | } | |
d164b209 | 2648 | |
ca0bf58d PS |
2649 | break; |
2650 | } | |
d164b209 | 2651 | } |
34dc7c2f | 2652 | |
ca0bf58d PS |
2653 | for (i = 0; i < num_sublists; i++) { |
2654 | multilist_sublist_t *mls = multilist_sublist_lock(ml, i); | |
2655 | multilist_sublist_remove(mls, markers[i]); | |
2656 | multilist_sublist_unlock(mls); | |
34dc7c2f | 2657 | |
ca0bf58d | 2658 | kmem_cache_free(hdr_full_cache, markers[i]); |
34dc7c2f | 2659 | } |
ca0bf58d PS |
2660 | kmem_free(markers, sizeof (*markers) * num_sublists); |
2661 | ||
2662 | return (total_evicted); | |
2663 | } | |
2664 | ||
2665 | /* | |
2666 | * Flush all "evictable" data of the given type from the arc state | |
2667 | * specified. This will not evict any "active" buffers (i.e. referenced). | |
2668 | * | |
2669 | * When 'retry' is set to FALSE, the function will make a single pass | |
2670 | * over the state and evict any buffers that it can. Since it doesn't | |
2671 | * continually retry the eviction, it might end up leaving some buffers | |
2672 | * in the ARC due to lock misses. | |
2673 | * | |
2674 | * When 'retry' is set to TRUE, the function will continually retry the | |
2675 | * eviction until *all* evictable buffers have been removed from the | |
2676 | * state. As a result, if concurrent insertions into the state are | |
2677 | * allowed (e.g. if the ARC isn't shutting down), this function might | |
2678 | * wind up in an infinite loop, continually trying to evict buffers. | |
2679 | */ | |
2680 | static uint64_t | |
2681 | arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, | |
2682 | boolean_t retry) | |
2683 | { | |
2684 | uint64_t evicted = 0; | |
2685 | ||
2686 | while (state->arcs_lsize[type] != 0) { | |
2687 | evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); | |
2688 | ||
2689 | if (!retry) | |
2690 | break; | |
2691 | } | |
2692 | ||
2693 | return (evicted); | |
34dc7c2f BB |
2694 | } |
2695 | ||
ab26409d | 2696 | /* |
f6046738 BB |
2697 | * Helper function for arc_prune() it is responsible for safely handling |
2698 | * the execution of a registered arc_prune_func_t. | |
ab26409d BB |
2699 | */ |
2700 | static void | |
f6046738 | 2701 | arc_prune_task(void *ptr) |
ab26409d | 2702 | { |
f6046738 BB |
2703 | arc_prune_t *ap = (arc_prune_t *)ptr; |
2704 | arc_prune_func_t *func = ap->p_pfunc; | |
ab26409d | 2705 | |
f6046738 BB |
2706 | if (func != NULL) |
2707 | func(ap->p_adjust, ap->p_private); | |
ab26409d | 2708 | |
f6046738 BB |
2709 | /* Callback unregistered concurrently with execution */ |
2710 | if (refcount_remove(&ap->p_refcnt, func) == 0) { | |
2711 | ASSERT(!list_link_active(&ap->p_node)); | |
2712 | refcount_destroy(&ap->p_refcnt); | |
2713 | kmem_free(ap, sizeof (*ap)); | |
2714 | } | |
2715 | } | |
ab26409d | 2716 | |
f6046738 BB |
2717 | /* |
2718 | * Notify registered consumers they must drop holds on a portion of the ARC | |
2719 | * buffered they reference. This provides a mechanism to ensure the ARC can | |
2720 | * honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This | |
2721 | * is analogous to dnlc_reduce_cache() but more generic. | |
2722 | * | |
2723 | * This operation is performed asyncronously so it may be safely called | |
ca67b33a | 2724 | * in the context of the arc_reclaim_thread(). A reference is taken here |
f6046738 BB |
2725 | * for each registered arc_prune_t and the arc_prune_task() is responsible |
2726 | * for releasing it once the registered arc_prune_func_t has completed. | |
2727 | */ | |
2728 | static void | |
2729 | arc_prune_async(int64_t adjust) | |
2730 | { | |
2731 | arc_prune_t *ap; | |
ab26409d | 2732 | |
f6046738 BB |
2733 | mutex_enter(&arc_prune_mtx); |
2734 | for (ap = list_head(&arc_prune_list); ap != NULL; | |
2735 | ap = list_next(&arc_prune_list, ap)) { | |
ab26409d | 2736 | |
f6046738 BB |
2737 | if (refcount_count(&ap->p_refcnt) >= 2) |
2738 | continue; | |
ab26409d | 2739 | |
f6046738 BB |
2740 | refcount_add(&ap->p_refcnt, ap->p_pfunc); |
2741 | ap->p_adjust = adjust; | |
2742 | taskq_dispatch(arc_prune_taskq, arc_prune_task, ap, TQ_SLEEP); | |
2743 | ARCSTAT_BUMP(arcstat_prune); | |
ab26409d | 2744 | } |
ab26409d BB |
2745 | mutex_exit(&arc_prune_mtx); |
2746 | } | |
2747 | ||
f6046738 BB |
2748 | static void |
2749 | arc_prune(int64_t adjust) | |
2750 | { | |
2751 | arc_prune_async(adjust); | |
2752 | taskq_wait_outstanding(arc_prune_taskq, 0); | |
2753 | } | |
2754 | ||
ca0bf58d PS |
2755 | /* |
2756 | * Evict the specified number of bytes from the state specified, | |
2757 | * restricting eviction to the spa and type given. This function | |
2758 | * prevents us from trying to evict more from a state's list than | |
2759 | * is "evictable", and to skip evicting altogether when passed a | |
2760 | * negative value for "bytes". In contrast, arc_evict_state() will | |
2761 | * evict everything it can, when passed a negative value for "bytes". | |
2762 | */ | |
2763 | static uint64_t | |
2764 | arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, | |
2765 | arc_buf_contents_t type) | |
2766 | { | |
2767 | int64_t delta; | |
2768 | ||
2769 | if (bytes > 0 && state->arcs_lsize[type] > 0) { | |
2770 | delta = MIN(state->arcs_lsize[type], bytes); | |
2771 | return (arc_evict_state(state, spa, delta, type)); | |
2772 | } | |
2773 | ||
2774 | return (0); | |
2775 | } | |
2776 | ||
2777 | /* | |
2778 | * The goal of this function is to evict enough meta data buffers from the | |
2779 | * ARC in order to enforce the arc_meta_limit. Achieving this is slightly | |
2780 | * more complicated than it appears because it is common for data buffers | |
2781 | * to have holds on meta data buffers. In addition, dnode meta data buffers | |
2782 | * will be held by the dnodes in the block preventing them from being freed. | |
2783 | * This means we can't simply traverse the ARC and expect to always find | |
2784 | * enough unheld meta data buffer to release. | |
2785 | * | |
2786 | * Therefore, this function has been updated to make alternating passes | |
2787 | * over the ARC releasing data buffers and then newly unheld meta data | |
2788 | * buffers. This ensures forward progress is maintained and arc_meta_used | |
2789 | * will decrease. Normally this is sufficient, but if required the ARC | |
2790 | * will call the registered prune callbacks causing dentry and inodes to | |
2791 | * be dropped from the VFS cache. This will make dnode meta data buffers | |
2792 | * available for reclaim. | |
2793 | */ | |
2794 | static uint64_t | |
f6046738 | 2795 | arc_adjust_meta_balanced(void) |
ca0bf58d PS |
2796 | { |
2797 | int64_t adjustmnt, delta, prune = 0; | |
2798 | uint64_t total_evicted = 0; | |
2799 | arc_buf_contents_t type = ARC_BUFC_DATA; | |
ca67b33a | 2800 | int restarts = MAX(zfs_arc_meta_adjust_restarts, 0); |
ca0bf58d PS |
2801 | |
2802 | restart: | |
2803 | /* | |
2804 | * This slightly differs than the way we evict from the mru in | |
2805 | * arc_adjust because we don't have a "target" value (i.e. no | |
2806 | * "meta" arc_p). As a result, I think we can completely | |
2807 | * cannibalize the metadata in the MRU before we evict the | |
2808 | * metadata from the MFU. I think we probably need to implement a | |
2809 | * "metadata arc_p" value to do this properly. | |
2810 | */ | |
2811 | adjustmnt = arc_meta_used - arc_meta_limit; | |
2812 | ||
2813 | if (adjustmnt > 0 && arc_mru->arcs_lsize[type] > 0) { | |
2814 | delta = MIN(arc_mru->arcs_lsize[type], adjustmnt); | |
2815 | total_evicted += arc_adjust_impl(arc_mru, 0, delta, type); | |
2816 | adjustmnt -= delta; | |
2817 | } | |
2818 | ||
2819 | /* | |
2820 | * We can't afford to recalculate adjustmnt here. If we do, | |
2821 | * new metadata buffers can sneak into the MRU or ANON lists, | |
2822 | * thus penalize the MFU metadata. Although the fudge factor is | |
2823 | * small, it has been empirically shown to be significant for | |
2824 | * certain workloads (e.g. creating many empty directories). As | |
2825 | * such, we use the original calculation for adjustmnt, and | |
2826 | * simply decrement the amount of data evicted from the MRU. | |
2827 | */ | |
2828 | ||
2829 | if (adjustmnt > 0 && arc_mfu->arcs_lsize[type] > 0) { | |
2830 | delta = MIN(arc_mfu->arcs_lsize[type], adjustmnt); | |
2831 | total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type); | |
2832 | } | |
2833 | ||
2834 | adjustmnt = arc_meta_used - arc_meta_limit; | |
2835 | ||
2836 | if (adjustmnt > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { | |
2837 | delta = MIN(adjustmnt, | |
2838 | arc_mru_ghost->arcs_lsize[type]); | |
2839 | total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type); | |
2840 | adjustmnt -= delta; | |
2841 | } | |
2842 | ||
2843 | if (adjustmnt > 0 && arc_mfu_ghost->arcs_lsize[type] > 0) { | |
2844 | delta = MIN(adjustmnt, | |
2845 | arc_mfu_ghost->arcs_lsize[type]); | |
2846 | total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type); | |
2847 | } | |
2848 | ||
2849 | /* | |
2850 | * If after attempting to make the requested adjustment to the ARC | |
2851 | * the meta limit is still being exceeded then request that the | |
2852 | * higher layers drop some cached objects which have holds on ARC | |
2853 | * meta buffers. Requests to the upper layers will be made with | |
2854 | * increasingly large scan sizes until the ARC is below the limit. | |
2855 | */ | |
2856 | if (arc_meta_used > arc_meta_limit) { | |
2857 | if (type == ARC_BUFC_DATA) { | |
2858 | type = ARC_BUFC_METADATA; | |
2859 | } else { | |
2860 | type = ARC_BUFC_DATA; | |
2861 | ||
2862 | if (zfs_arc_meta_prune) { | |
2863 | prune += zfs_arc_meta_prune; | |
f6046738 | 2864 | arc_prune_async(prune); |
ca0bf58d PS |
2865 | } |
2866 | } | |
2867 | ||
2868 | if (restarts > 0) { | |
2869 | restarts--; | |
2870 | goto restart; | |
2871 | } | |
2872 | } | |
2873 | return (total_evicted); | |
2874 | } | |
2875 | ||
f6046738 BB |
2876 | /* |
2877 | * Evict metadata buffers from the cache, such that arc_meta_used is | |
2878 | * capped by the arc_meta_limit tunable. | |
2879 | */ | |
2880 | static uint64_t | |
2881 | arc_adjust_meta_only(void) | |
2882 | { | |
2883 | uint64_t total_evicted = 0; | |
2884 | int64_t target; | |
2885 | ||
2886 | /* | |
2887 | * If we're over the meta limit, we want to evict enough | |
2888 | * metadata to get back under the meta limit. We don't want to | |
2889 | * evict so much that we drop the MRU below arc_p, though. If | |
2890 | * we're over the meta limit more than we're over arc_p, we | |
2891 | * evict some from the MRU here, and some from the MFU below. | |
2892 | */ | |
2893 | target = MIN((int64_t)(arc_meta_used - arc_meta_limit), | |
36da08ef PS |
2894 | (int64_t)(refcount_count(&arc_anon->arcs_size) + |
2895 | refcount_count(&arc_mru->arcs_size) - arc_p)); | |
f6046738 BB |
2896 | |
2897 | total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
2898 | ||
2899 | /* | |
2900 | * Similar to the above, we want to evict enough bytes to get us | |
2901 | * below the meta limit, but not so much as to drop us below the | |
2902 | * space alloted to the MFU (which is defined as arc_c - arc_p). | |
2903 | */ | |
2904 | target = MIN((int64_t)(arc_meta_used - arc_meta_limit), | |
36da08ef | 2905 | (int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p))); |
f6046738 BB |
2906 | |
2907 | total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
2908 | ||
2909 | return (total_evicted); | |
2910 | } | |
2911 | ||
2912 | static uint64_t | |
2913 | arc_adjust_meta(void) | |
2914 | { | |
2915 | if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY) | |
2916 | return (arc_adjust_meta_only()); | |
2917 | else | |
2918 | return (arc_adjust_meta_balanced()); | |
2919 | } | |
2920 | ||
ca0bf58d PS |
2921 | /* |
2922 | * Return the type of the oldest buffer in the given arc state | |
2923 | * | |
2924 | * This function will select a random sublist of type ARC_BUFC_DATA and | |
2925 | * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist | |
2926 | * is compared, and the type which contains the "older" buffer will be | |
2927 | * returned. | |
2928 | */ | |
2929 | static arc_buf_contents_t | |
2930 | arc_adjust_type(arc_state_t *state) | |
2931 | { | |
2932 | multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA]; | |
2933 | multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA]; | |
2934 | int data_idx = multilist_get_random_index(data_ml); | |
2935 | int meta_idx = multilist_get_random_index(meta_ml); | |
2936 | multilist_sublist_t *data_mls; | |
2937 | multilist_sublist_t *meta_mls; | |
2938 | arc_buf_contents_t type; | |
2939 | arc_buf_hdr_t *data_hdr; | |
2940 | arc_buf_hdr_t *meta_hdr; | |
2941 | ||
2942 | /* | |
2943 | * We keep the sublist lock until we're finished, to prevent | |
2944 | * the headers from being destroyed via arc_evict_state(). | |
2945 | */ | |
2946 | data_mls = multilist_sublist_lock(data_ml, data_idx); | |
2947 | meta_mls = multilist_sublist_lock(meta_ml, meta_idx); | |
2948 | ||
2949 | /* | |
2950 | * These two loops are to ensure we skip any markers that | |
2951 | * might be at the tail of the lists due to arc_evict_state(). | |
2952 | */ | |
2953 | ||
2954 | for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; | |
2955 | data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { | |
2956 | if (data_hdr->b_spa != 0) | |
2957 | break; | |
2958 | } | |
2959 | ||
2960 | for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; | |
2961 | meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { | |
2962 | if (meta_hdr->b_spa != 0) | |
2963 | break; | |
2964 | } | |
2965 | ||
2966 | if (data_hdr == NULL && meta_hdr == NULL) { | |
2967 | type = ARC_BUFC_DATA; | |
2968 | } else if (data_hdr == NULL) { | |
2969 | ASSERT3P(meta_hdr, !=, NULL); | |
2970 | type = ARC_BUFC_METADATA; | |
2971 | } else if (meta_hdr == NULL) { | |
2972 | ASSERT3P(data_hdr, !=, NULL); | |
2973 | type = ARC_BUFC_DATA; | |
2974 | } else { | |
2975 | ASSERT3P(data_hdr, !=, NULL); | |
2976 | ASSERT3P(meta_hdr, !=, NULL); | |
2977 | ||
2978 | /* The headers can't be on the sublist without an L1 header */ | |
2979 | ASSERT(HDR_HAS_L1HDR(data_hdr)); | |
2980 | ASSERT(HDR_HAS_L1HDR(meta_hdr)); | |
2981 | ||
2982 | if (data_hdr->b_l1hdr.b_arc_access < | |
2983 | meta_hdr->b_l1hdr.b_arc_access) { | |
2984 | type = ARC_BUFC_DATA; | |
2985 | } else { | |
2986 | type = ARC_BUFC_METADATA; | |
2987 | } | |
2988 | } | |
2989 | ||
2990 | multilist_sublist_unlock(meta_mls); | |
2991 | multilist_sublist_unlock(data_mls); | |
2992 | ||
2993 | return (type); | |
2994 | } | |
2995 | ||
2996 | /* | |
2997 | * Evict buffers from the cache, such that arc_size is capped by arc_c. | |
2998 | */ | |
2999 | static uint64_t | |
3000 | arc_adjust(void) | |
3001 | { | |
3002 | uint64_t total_evicted = 0; | |
3003 | uint64_t bytes; | |
3004 | int64_t target; | |
3005 | ||
3006 | /* | |
3007 | * If we're over arc_meta_limit, we want to correct that before | |
3008 | * potentially evicting data buffers below. | |
3009 | */ | |
3010 | total_evicted += arc_adjust_meta(); | |
3011 | ||
3012 | /* | |
3013 | * Adjust MRU size | |
3014 | * | |
3015 | * If we're over the target cache size, we want to evict enough | |
3016 | * from the list to get back to our target size. We don't want | |
3017 | * to evict too much from the MRU, such that it drops below | |
3018 | * arc_p. So, if we're over our target cache size more than | |
3019 | * the MRU is over arc_p, we'll evict enough to get back to | |
3020 | * arc_p here, and then evict more from the MFU below. | |
3021 | */ | |
3022 | target = MIN((int64_t)(arc_size - arc_c), | |
36da08ef PS |
3023 | (int64_t)(refcount_count(&arc_anon->arcs_size) + |
3024 | refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p)); | |
ca0bf58d PS |
3025 | |
3026 | /* | |
3027 | * If we're below arc_meta_min, always prefer to evict data. | |
3028 | * Otherwise, try to satisfy the requested number of bytes to | |
3029 | * evict from the type which contains older buffers; in an | |
3030 | * effort to keep newer buffers in the cache regardless of their | |
3031 | * type. If we cannot satisfy the number of bytes from this | |
3032 | * type, spill over into the next type. | |
3033 | */ | |
3034 | if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && | |
3035 | arc_meta_used > arc_meta_min) { | |
3036 | bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
3037 | total_evicted += bytes; | |
3038 | ||
3039 | /* | |
3040 | * If we couldn't evict our target number of bytes from | |
3041 | * metadata, we try to get the rest from data. | |
3042 | */ | |
3043 | target -= bytes; | |
3044 | ||
3045 | total_evicted += | |
3046 | arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); | |
3047 | } else { | |
3048 | bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); | |
3049 | total_evicted += bytes; | |
3050 | ||
3051 | /* | |
3052 | * If we couldn't evict our target number of bytes from | |
3053 | * data, we try to get the rest from metadata. | |
3054 | */ | |
3055 | target -= bytes; | |
3056 | ||
3057 | total_evicted += | |
3058 | arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); | |
3059 | } | |
3060 | ||
3061 | /* | |
3062 | * Adjust MFU size | |
3063 | * | |
3064 | * Now that we've tried to evict enough from the MRU to get its | |
3065 | * size back to arc_p, if we're still above the target cache | |
3066 | * size, we evict the rest from the MFU. | |
3067 | */ | |
3068 | target = arc_size - arc_c; | |
3069 | ||
a7b10a93 | 3070 | if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && |
ca0bf58d PS |
3071 | arc_meta_used > arc_meta_min) { |
3072 | bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
3073 | total_evicted += bytes; | |
3074 | ||
3075 | /* | |
3076 | * If we couldn't evict our target number of bytes from | |
3077 | * metadata, we try to get the rest from data. | |
3078 | */ | |
3079 | target -= bytes; | |
3080 | ||
3081 | total_evicted += | |
3082 | arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); | |
3083 | } else { | |
3084 | bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); | |
3085 | total_evicted += bytes; | |
3086 | ||
3087 | /* | |
3088 | * If we couldn't evict our target number of bytes from | |
3089 | * data, we try to get the rest from data. | |
3090 | */ | |
3091 | target -= bytes; | |
3092 | ||
3093 | total_evicted += | |
3094 | arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); | |
3095 | } | |
3096 | ||
3097 | /* | |
3098 | * Adjust ghost lists | |
3099 | * | |
3100 | * In addition to the above, the ARC also defines target values | |
3101 | * for the ghost lists. The sum of the mru list and mru ghost | |
3102 | * list should never exceed the target size of the cache, and | |
3103 | * the sum of the mru list, mfu list, mru ghost list, and mfu | |
3104 | * ghost list should never exceed twice the target size of the | |
3105 | * cache. The following logic enforces these limits on the ghost | |
3106 | * caches, and evicts from them as needed. | |
3107 | */ | |
36da08ef PS |
3108 | target = refcount_count(&arc_mru->arcs_size) + |
3109 | refcount_count(&arc_mru_ghost->arcs_size) - arc_c; | |
ca0bf58d PS |
3110 | |
3111 | bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); | |
3112 | total_evicted += bytes; | |
3113 | ||
3114 | target -= bytes; | |
3115 | ||
3116 | total_evicted += | |
3117 | arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); | |
3118 | ||
3119 | /* | |
3120 | * We assume the sum of the mru list and mfu list is less than | |
3121 | * or equal to arc_c (we enforced this above), which means we | |
3122 | * can use the simpler of the two equations below: | |
3123 | * | |
3124 | * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c | |
3125 | * mru ghost + mfu ghost <= arc_c | |
3126 | */ | |
36da08ef PS |
3127 | target = refcount_count(&arc_mru_ghost->arcs_size) + |
3128 | refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; | |
ca0bf58d PS |
3129 | |
3130 | bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); | |
3131 | total_evicted += bytes; | |
3132 | ||
3133 | target -= bytes; | |
3134 | ||
3135 | total_evicted += | |
3136 | arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); | |
3137 | ||
3138 | return (total_evicted); | |
3139 | } | |
3140 | ||
34dc7c2f BB |
3141 | static void |
3142 | arc_do_user_evicts(void) | |
3143 | { | |
ca0bf58d | 3144 | mutex_enter(&arc_user_evicts_lock); |
34dc7c2f BB |
3145 | while (arc_eviction_list != NULL) { |
3146 | arc_buf_t *buf = arc_eviction_list; | |
3147 | arc_eviction_list = buf->b_next; | |
428870ff | 3148 | mutex_enter(&buf->b_evict_lock); |
34dc7c2f | 3149 | buf->b_hdr = NULL; |
428870ff | 3150 | mutex_exit(&buf->b_evict_lock); |
ca0bf58d | 3151 | mutex_exit(&arc_user_evicts_lock); |
34dc7c2f BB |
3152 | |
3153 | if (buf->b_efunc != NULL) | |
bd089c54 | 3154 | VERIFY0(buf->b_efunc(buf->b_private)); |
34dc7c2f BB |
3155 | |
3156 | buf->b_efunc = NULL; | |
3157 | buf->b_private = NULL; | |
3158 | kmem_cache_free(buf_cache, buf); | |
ca0bf58d | 3159 | mutex_enter(&arc_user_evicts_lock); |
34dc7c2f | 3160 | } |
ca0bf58d | 3161 | mutex_exit(&arc_user_evicts_lock); |
34dc7c2f BB |
3162 | } |
3163 | ||
ca0bf58d PS |
3164 | void |
3165 | arc_flush(spa_t *spa, boolean_t retry) | |
ab26409d | 3166 | { |
ca0bf58d | 3167 | uint64_t guid = 0; |
94520ca4 | 3168 | |
bc888666 | 3169 | /* |
ca0bf58d PS |
3170 | * If retry is TRUE, a spa must not be specified since we have |
3171 | * no good way to determine if all of a spa's buffers have been | |
3172 | * evicted from an arc state. | |
bc888666 | 3173 | */ |
ca0bf58d | 3174 | ASSERT(!retry || spa == 0); |
d164b209 | 3175 | |
b9541d6b | 3176 | if (spa != NULL) |
3541dc6d | 3177 | guid = spa_load_guid(spa); |
d164b209 | 3178 | |
ca0bf58d PS |
3179 | (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); |
3180 | (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); | |
3181 | ||
3182 | (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); | |
3183 | (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); | |
3184 | ||
3185 | (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); | |
3186 | (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); | |
34dc7c2f | 3187 | |
ca0bf58d PS |
3188 | (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); |
3189 | (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); | |
34dc7c2f | 3190 | |
34dc7c2f | 3191 | arc_do_user_evicts(); |
34dc7c2f BB |
3192 | ASSERT(spa || arc_eviction_list == NULL); |
3193 | } | |
3194 | ||
34dc7c2f | 3195 | void |
ca67b33a | 3196 | arc_shrink(int64_t to_free) |
34dc7c2f BB |
3197 | { |
3198 | if (arc_c > arc_c_min) { | |
302f753f | 3199 | |
34dc7c2f BB |
3200 | if (arc_c > arc_c_min + to_free) |
3201 | atomic_add_64(&arc_c, -to_free); | |
3202 | else | |
3203 | arc_c = arc_c_min; | |
3204 | ||
ca67b33a | 3205 | atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); |
34dc7c2f BB |
3206 | if (arc_c > arc_size) |
3207 | arc_c = MAX(arc_size, arc_c_min); | |
3208 | if (arc_p > arc_c) | |
3209 | arc_p = (arc_c >> 1); | |
3210 | ASSERT(arc_c >= arc_c_min); | |
3211 | ASSERT((int64_t)arc_p >= 0); | |
3212 | } | |
3213 | ||
3214 | if (arc_size > arc_c) | |
ca0bf58d | 3215 | (void) arc_adjust(); |
34dc7c2f BB |
3216 | } |
3217 | ||
ca67b33a MA |
3218 | typedef enum free_memory_reason_t { |
3219 | FMR_UNKNOWN, | |
3220 | FMR_NEEDFREE, | |
3221 | FMR_LOTSFREE, | |
3222 | FMR_SWAPFS_MINFREE, | |
3223 | FMR_PAGES_PP_MAXIMUM, | |
3224 | FMR_HEAP_ARENA, | |
3225 | FMR_ZIO_ARENA, | |
3226 | } free_memory_reason_t; | |
3227 | ||
3228 | int64_t last_free_memory; | |
3229 | free_memory_reason_t last_free_reason; | |
3230 | ||
3231 | #ifdef _KERNEL | |
ca67b33a MA |
3232 | /* |
3233 | * Additional reserve of pages for pp_reserve. | |
3234 | */ | |
3235 | int64_t arc_pages_pp_reserve = 64; | |
3236 | ||
3237 | /* | |
3238 | * Additional reserve of pages for swapfs. | |
3239 | */ | |
3240 | int64_t arc_swapfs_reserve = 64; | |
ca67b33a MA |
3241 | #endif /* _KERNEL */ |
3242 | ||
3243 | /* | |
3244 | * Return the amount of memory that can be consumed before reclaim will be | |
3245 | * needed. Positive if there is sufficient free memory, negative indicates | |
3246 | * the amount of memory that needs to be freed up. | |
3247 | */ | |
3248 | static int64_t | |
3249 | arc_available_memory(void) | |
3250 | { | |
3251 | int64_t lowest = INT64_MAX; | |
3252 | free_memory_reason_t r = FMR_UNKNOWN; | |
ca67b33a | 3253 | #ifdef _KERNEL |
ca67b33a | 3254 | int64_t n; |
11f552fa BB |
3255 | #ifdef __linux__ |
3256 | pgcnt_t needfree = btop(arc_need_free); | |
3257 | pgcnt_t lotsfree = btop(arc_sys_free); | |
3258 | pgcnt_t desfree = 0; | |
3259 | #endif | |
ca67b33a | 3260 | |
ca67b33a MA |
3261 | if (needfree > 0) { |
3262 | n = PAGESIZE * (-needfree); | |
3263 | if (n < lowest) { | |
3264 | lowest = n; | |
3265 | r = FMR_NEEDFREE; | |
3266 | } | |
3267 | } | |
3268 | ||
3269 | /* | |
3270 | * check that we're out of range of the pageout scanner. It starts to | |
3271 | * schedule paging if freemem is less than lotsfree and needfree. | |
3272 | * lotsfree is the high-water mark for pageout, and needfree is the | |
3273 | * number of needed free pages. We add extra pages here to make sure | |
3274 | * the scanner doesn't start up while we're freeing memory. | |
3275 | */ | |
3276 | n = PAGESIZE * (freemem - lotsfree - needfree - desfree); | |
3277 | if (n < lowest) { | |
3278 | lowest = n; | |
3279 | r = FMR_LOTSFREE; | |
3280 | } | |
3281 | ||
11f552fa | 3282 | #ifndef __linux__ |
ca67b33a MA |
3283 | /* |
3284 | * check to make sure that swapfs has enough space so that anon | |
3285 | * reservations can still succeed. anon_resvmem() checks that the | |
3286 | * availrmem is greater than swapfs_minfree, and the number of reserved | |
3287 | * swap pages. We also add a bit of extra here just to prevent | |
3288 | * circumstances from getting really dire. | |
3289 | */ | |
3290 | n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - | |
3291 | desfree - arc_swapfs_reserve); | |
3292 | if (n < lowest) { | |
3293 | lowest = n; | |
3294 | r = FMR_SWAPFS_MINFREE; | |
3295 | } | |
3296 | ||
3297 | ||
3298 | /* | |
3299 | * Check that we have enough availrmem that memory locking (e.g., via | |
3300 | * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum | |
3301 | * stores the number of pages that cannot be locked; when availrmem | |
3302 | * drops below pages_pp_maximum, page locking mechanisms such as | |
3303 | * page_pp_lock() will fail.) | |
3304 | */ | |
3305 | n = PAGESIZE * (availrmem - pages_pp_maximum - | |
3306 | arc_pages_pp_reserve); | |
3307 | if (n < lowest) { | |
3308 | lowest = n; | |
3309 | r = FMR_PAGES_PP_MAXIMUM; | |
3310 | } | |
11f552fa | 3311 | #endif |
ca67b33a MA |
3312 | |
3313 | #if defined(__i386) | |
3314 | /* | |
3315 | * If we're on an i386 platform, it's possible that we'll exhaust the | |
3316 | * kernel heap space before we ever run out of available physical | |
3317 | * memory. Most checks of the size of the heap_area compare against | |
3318 | * tune.t_minarmem, which is the minimum available real memory that we | |
3319 | * can have in the system. However, this is generally fixed at 25 pages | |
3320 | * which is so low that it's useless. In this comparison, we seek to | |
3321 | * calculate the total heap-size, and reclaim if more than 3/4ths of the | |
3322 | * heap is allocated. (Or, in the calculation, if less than 1/4th is | |
3323 | * free) | |
3324 | */ | |
3325 | n = vmem_size(heap_arena, VMEM_FREE) - | |
3326 | (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); | |
3327 | if (n < lowest) { | |
3328 | lowest = n; | |
3329 | r = FMR_HEAP_ARENA; | |
3330 | } | |
3331 | #endif | |
3332 | ||
3333 | /* | |
3334 | * If zio data pages are being allocated out of a separate heap segment, | |
3335 | * then enforce that the size of available vmem for this arena remains | |
3336 | * above about 1/16th free. | |
3337 | * | |
3338 | * Note: The 1/16th arena free requirement was put in place | |
3339 | * to aggressively evict memory from the arc in order to avoid | |
3340 | * memory fragmentation issues. | |
3341 | */ | |
3342 | if (zio_arena != NULL) { | |
3343 | n = vmem_size(zio_arena, VMEM_FREE) - | |
3344 | (vmem_size(zio_arena, VMEM_ALLOC) >> 4); | |
3345 | if (n < lowest) { | |
3346 | lowest = n; | |
3347 | r = FMR_ZIO_ARENA; | |
3348 | } | |
3349 | } | |
11f552fa | 3350 | #else /* _KERNEL */ |
ca67b33a MA |
3351 | /* Every 100 calls, free a small amount */ |
3352 | if (spa_get_random(100) == 0) | |
3353 | lowest = -1024; | |
11f552fa | 3354 | #endif /* _KERNEL */ |
ca67b33a MA |
3355 | |
3356 | last_free_memory = lowest; | |
3357 | last_free_reason = r; | |
3358 | ||
3359 | return (lowest); | |
3360 | } | |
3361 | ||
3362 | /* | |
3363 | * Determine if the system is under memory pressure and is asking | |
3364 | * to reclaim memory. A return value of TRUE indicates that the system | |
3365 | * is under memory pressure and that the arc should adjust accordingly. | |
3366 | */ | |
3367 | static boolean_t | |
3368 | arc_reclaim_needed(void) | |
3369 | { | |
3370 | return (arc_available_memory() < 0); | |
3371 | } | |
3372 | ||
34dc7c2f | 3373 | static void |
ca67b33a | 3374 | arc_kmem_reap_now(void) |
34dc7c2f BB |
3375 | { |
3376 | size_t i; | |
3377 | kmem_cache_t *prev_cache = NULL; | |
3378 | kmem_cache_t *prev_data_cache = NULL; | |
3379 | extern kmem_cache_t *zio_buf_cache[]; | |
3380 | extern kmem_cache_t *zio_data_buf_cache[]; | |
669dedb3 | 3381 | extern kmem_cache_t *range_seg_cache; |
34dc7c2f | 3382 | |
f6046738 BB |
3383 | if ((arc_meta_used >= arc_meta_limit) && zfs_arc_meta_prune) { |
3384 | /* | |
3385 | * We are exceeding our meta-data cache limit. | |
3386 | * Prune some entries to release holds on meta-data. | |
3387 | */ | |
3388 | arc_prune(zfs_arc_meta_prune); | |
3389 | } | |
3390 | ||
34dc7c2f BB |
3391 | for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { |
3392 | if (zio_buf_cache[i] != prev_cache) { | |
3393 | prev_cache = zio_buf_cache[i]; | |
3394 | kmem_cache_reap_now(zio_buf_cache[i]); | |
3395 | } | |
3396 | if (zio_data_buf_cache[i] != prev_data_cache) { | |
3397 | prev_data_cache = zio_data_buf_cache[i]; | |
3398 | kmem_cache_reap_now(zio_data_buf_cache[i]); | |
3399 | } | |
3400 | } | |
ca0bf58d | 3401 | kmem_cache_reap_now(buf_cache); |
b9541d6b CW |
3402 | kmem_cache_reap_now(hdr_full_cache); |
3403 | kmem_cache_reap_now(hdr_l2only_cache); | |
669dedb3 | 3404 | kmem_cache_reap_now(range_seg_cache); |
ca67b33a MA |
3405 | |
3406 | if (zio_arena != NULL) { | |
3407 | /* | |
3408 | * Ask the vmem arena to reclaim unused memory from its | |
3409 | * quantum caches. | |
3410 | */ | |
3411 | vmem_qcache_reap(zio_arena); | |
3412 | } | |
34dc7c2f BB |
3413 | } |
3414 | ||
302f753f | 3415 | /* |
ca0bf58d PS |
3416 | * Threads can block in arc_get_data_buf() waiting for this thread to evict |
3417 | * enough data and signal them to proceed. When this happens, the threads in | |
3418 | * arc_get_data_buf() are sleeping while holding the hash lock for their | |
3419 | * particular arc header. Thus, we must be careful to never sleep on a | |
3420 | * hash lock in this thread. This is to prevent the following deadlock: | |
3421 | * | |
3422 | * - Thread A sleeps on CV in arc_get_data_buf() holding hash lock "L", | |
3423 | * waiting for the reclaim thread to signal it. | |
3424 | * | |
3425 | * - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter, | |
3426 | * fails, and goes to sleep forever. | |
3427 | * | |
3428 | * This possible deadlock is avoided by always acquiring a hash lock | |
3429 | * using mutex_tryenter() from arc_reclaim_thread(). | |
302f753f | 3430 | */ |
34dc7c2f | 3431 | static void |
ca67b33a | 3432 | arc_reclaim_thread(void) |
34dc7c2f | 3433 | { |
ca67b33a MA |
3434 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
3435 | clock_t growtime = 0; | |
34dc7c2f BB |
3436 | callb_cpr_t cpr; |
3437 | ||
ca0bf58d | 3438 | CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG); |
34dc7c2f | 3439 | |
ca0bf58d | 3440 | mutex_enter(&arc_reclaim_lock); |
ca67b33a MA |
3441 | while (!arc_reclaim_thread_exit) { |
3442 | int64_t to_free; | |
3443 | int64_t free_memory = arc_available_memory(); | |
3444 | uint64_t evicted = 0; | |
302f753f | 3445 | |
ca67b33a | 3446 | arc_tuning_update(); |
34dc7c2f | 3447 | |
ca67b33a | 3448 | mutex_exit(&arc_reclaim_lock); |
34dc7c2f | 3449 | |
ca67b33a | 3450 | if (free_memory < 0) { |
34dc7c2f | 3451 | |
ca67b33a | 3452 | arc_no_grow = B_TRUE; |
b128c09f | 3453 | arc_warm = B_TRUE; |
34dc7c2f | 3454 | |
ca67b33a MA |
3455 | /* |
3456 | * Wait at least zfs_grow_retry (default 5) seconds | |
3457 | * before considering growing. | |
3458 | */ | |
3459 | growtime = ddi_get_lbolt() + (arc_grow_retry * hz); | |
6a8f9b6b | 3460 | |
ca67b33a | 3461 | arc_kmem_reap_now(); |
34dc7c2f | 3462 | |
ca67b33a MA |
3463 | /* |
3464 | * If we are still low on memory, shrink the ARC | |
3465 | * so that we have arc_shrink_min free space. | |
3466 | */ | |
3467 | free_memory = arc_available_memory(); | |
34dc7c2f | 3468 | |
ca67b33a MA |
3469 | to_free = (arc_c >> arc_shrink_shift) - free_memory; |
3470 | if (to_free > 0) { | |
3471 | #ifdef _KERNEL | |
11f552fa | 3472 | to_free = MAX(to_free, arc_need_free); |
ca67b33a MA |
3473 | #endif |
3474 | arc_shrink(to_free); | |
3475 | } | |
3476 | } else if (free_memory < arc_c >> arc_no_grow_shift) { | |
3477 | arc_no_grow = B_TRUE; | |
3478 | } else if (ddi_get_lbolt() >= growtime) { | |
3479 | arc_no_grow = B_FALSE; | |
3480 | } | |
bce45ec9 | 3481 | |
ca67b33a | 3482 | evicted = arc_adjust(); |
bce45ec9 | 3483 | |
ca67b33a | 3484 | mutex_enter(&arc_reclaim_lock); |
bce45ec9 | 3485 | |
ca67b33a MA |
3486 | /* |
3487 | * If evicted is zero, we couldn't evict anything via | |
3488 | * arc_adjust(). This could be due to hash lock | |
3489 | * collisions, but more likely due to the majority of | |
3490 | * arc buffers being unevictable. Therefore, even if | |
3491 | * arc_size is above arc_c, another pass is unlikely to | |
3492 | * be helpful and could potentially cause us to enter an | |
3493 | * infinite loop. | |
3494 | */ | |
3495 | if (arc_size <= arc_c || evicted == 0) { | |
3496 | /* | |
3497 | * We're either no longer overflowing, or we | |
3498 | * can't evict anything more, so we should wake | |
11f552fa BB |
3499 | * up any threads before we go to sleep and clear |
3500 | * arc_need_free since nothing more can be done. | |
ca67b33a MA |
3501 | */ |
3502 | cv_broadcast(&arc_reclaim_waiters_cv); | |
11f552fa | 3503 | arc_need_free = 0; |
bce45ec9 | 3504 | |
ca67b33a MA |
3505 | /* |
3506 | * Block until signaled, or after one second (we | |
3507 | * might need to perform arc_kmem_reap_now() | |
3508 | * even if we aren't being signalled) | |
3509 | */ | |
3510 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
3511 | (void) cv_timedwait_sig(&arc_reclaim_thread_cv, | |
3512 | &arc_reclaim_lock, ddi_get_lbolt() + hz); | |
3513 | CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock); | |
3514 | } | |
ca0bf58d | 3515 | } |
bce45ec9 | 3516 | |
ca67b33a | 3517 | arc_reclaim_thread_exit = FALSE; |
ca0bf58d PS |
3518 | cv_broadcast(&arc_reclaim_thread_cv); |
3519 | CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */ | |
3520 | spl_fstrans_unmark(cookie); | |
3521 | thread_exit(); | |
3522 | } | |
3523 | ||
3524 | static void | |
3525 | arc_user_evicts_thread(void) | |
3526 | { | |
ca67b33a | 3527 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
ca0bf58d | 3528 | callb_cpr_t cpr; |
bce45ec9 | 3529 | |
ca0bf58d | 3530 | CALLB_CPR_INIT(&cpr, &arc_user_evicts_lock, callb_generic_cpr, FTAG); |
bce45ec9 | 3531 | |
ca0bf58d PS |
3532 | mutex_enter(&arc_user_evicts_lock); |
3533 | while (!arc_user_evicts_thread_exit) { | |
3534 | mutex_exit(&arc_user_evicts_lock); | |
3535 | ||
3536 | arc_do_user_evicts(); | |
3537 | ||
3538 | /* | |
3539 | * This is necessary in order for the mdb ::arc dcmd to | |
3540 | * show up to date information. Since the ::arc command | |
3541 | * does not call the kstat's update function, without | |
3542 | * this call, the command may show stale stats for the | |
3543 | * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even | |
3544 | * with this change, the data might be up to 1 second | |
3545 | * out of date; but that should suffice. The arc_state_t | |
3546 | * structures can be queried directly if more accurate | |
3547 | * information is needed. | |
3548 | */ | |
3549 | if (arc_ksp != NULL) | |
3550 | arc_ksp->ks_update(arc_ksp, KSTAT_READ); | |
3551 | ||
3552 | mutex_enter(&arc_user_evicts_lock); | |
3553 | ||
3554 | /* | |
3555 | * Block until signaled, or after one second (we need to | |
3556 | * call the arc's kstat update function regularly). | |
3557 | */ | |
3558 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
b64ccd6c | 3559 | (void) cv_timedwait_sig(&arc_user_evicts_cv, |
ca0bf58d PS |
3560 | &arc_user_evicts_lock, ddi_get_lbolt() + hz); |
3561 | CALLB_CPR_SAFE_END(&cpr, &arc_user_evicts_lock); | |
34dc7c2f BB |
3562 | } |
3563 | ||
ca0bf58d PS |
3564 | arc_user_evicts_thread_exit = FALSE; |
3565 | cv_broadcast(&arc_user_evicts_cv); | |
3566 | CALLB_CPR_EXIT(&cpr); /* drops arc_user_evicts_lock */ | |
40d06e3c | 3567 | spl_fstrans_unmark(cookie); |
34dc7c2f BB |
3568 | thread_exit(); |
3569 | } | |
3570 | ||
7cb67b45 BB |
3571 | #ifdef _KERNEL |
3572 | /* | |
302f753f BB |
3573 | * Determine the amount of memory eligible for eviction contained in the |
3574 | * ARC. All clean data reported by the ghost lists can always be safely | |
3575 | * evicted. Due to arc_c_min, the same does not hold for all clean data | |
3576 | * contained by the regular mru and mfu lists. | |
3577 | * | |
3578 | * In the case of the regular mru and mfu lists, we need to report as | |
3579 | * much clean data as possible, such that evicting that same reported | |
3580 | * data will not bring arc_size below arc_c_min. Thus, in certain | |
3581 | * circumstances, the total amount of clean data in the mru and mfu | |
3582 | * lists might not actually be evictable. | |
3583 | * | |
3584 | * The following two distinct cases are accounted for: | |
3585 | * | |
3586 | * 1. The sum of the amount of dirty data contained by both the mru and | |
3587 | * mfu lists, plus the ARC's other accounting (e.g. the anon list), | |
3588 | * is greater than or equal to arc_c_min. | |
3589 | * (i.e. amount of dirty data >= arc_c_min) | |
3590 | * | |
3591 | * This is the easy case; all clean data contained by the mru and mfu | |
3592 | * lists is evictable. Evicting all clean data can only drop arc_size | |
3593 | * to the amount of dirty data, which is greater than arc_c_min. | |
3594 | * | |
3595 | * 2. The sum of the amount of dirty data contained by both the mru and | |
3596 | * mfu lists, plus the ARC's other accounting (e.g. the anon list), | |
3597 | * is less than arc_c_min. | |
3598 | * (i.e. arc_c_min > amount of dirty data) | |
3599 | * | |
3600 | * 2.1. arc_size is greater than or equal arc_c_min. | |
3601 | * (i.e. arc_size >= arc_c_min > amount of dirty data) | |
3602 | * | |
3603 | * In this case, not all clean data from the regular mru and mfu | |
3604 | * lists is actually evictable; we must leave enough clean data | |
3605 | * to keep arc_size above arc_c_min. Thus, the maximum amount of | |
3606 | * evictable data from the two lists combined, is exactly the | |
3607 | * difference between arc_size and arc_c_min. | |
3608 | * | |
3609 | * 2.2. arc_size is less than arc_c_min | |
3610 | * (i.e. arc_c_min > arc_size > amount of dirty data) | |
3611 | * | |
3612 | * In this case, none of the data contained in the mru and mfu | |
3613 | * lists is evictable, even if it's clean. Since arc_size is | |
3614 | * already below arc_c_min, evicting any more would only | |
3615 | * increase this negative difference. | |
7cb67b45 | 3616 | */ |
302f753f BB |
3617 | static uint64_t |
3618 | arc_evictable_memory(void) { | |
3619 | uint64_t arc_clean = | |
3620 | arc_mru->arcs_lsize[ARC_BUFC_DATA] + | |
3621 | arc_mru->arcs_lsize[ARC_BUFC_METADATA] + | |
3622 | arc_mfu->arcs_lsize[ARC_BUFC_DATA] + | |
3623 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; | |
3624 | uint64_t ghost_clean = | |
3625 | arc_mru_ghost->arcs_lsize[ARC_BUFC_DATA] + | |
3626 | arc_mru_ghost->arcs_lsize[ARC_BUFC_METADATA] + | |
3627 | arc_mfu_ghost->arcs_lsize[ARC_BUFC_DATA] + | |
3628 | arc_mfu_ghost->arcs_lsize[ARC_BUFC_METADATA]; | |
3629 | uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0); | |
3630 | ||
3631 | if (arc_dirty >= arc_c_min) | |
3632 | return (ghost_clean + arc_clean); | |
3633 | ||
3634 | return (ghost_clean + MAX((int64_t)arc_size - (int64_t)arc_c_min, 0)); | |
3635 | } | |
3636 | ||
ed6e9cc2 TC |
3637 | /* |
3638 | * If sc->nr_to_scan is zero, the caller is requesting a query of the | |
3639 | * number of objects which can potentially be freed. If it is nonzero, | |
3640 | * the request is to free that many objects. | |
3641 | * | |
3642 | * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks | |
3643 | * in struct shrinker and also require the shrinker to return the number | |
3644 | * of objects freed. | |
3645 | * | |
3646 | * Older kernels require the shrinker to return the number of freeable | |
3647 | * objects following the freeing of nr_to_free. | |
3648 | */ | |
3649 | static spl_shrinker_t | |
7e7baeca | 3650 | __arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc) |
7cb67b45 | 3651 | { |
ed6e9cc2 | 3652 | int64_t pages; |
7cb67b45 | 3653 | |
302f753f BB |
3654 | /* The arc is considered warm once reclaim has occurred */ |
3655 | if (unlikely(arc_warm == B_FALSE)) | |
3656 | arc_warm = B_TRUE; | |
7cb67b45 | 3657 | |
302f753f | 3658 | /* Return the potential number of reclaimable pages */ |
ed6e9cc2 | 3659 | pages = btop((int64_t)arc_evictable_memory()); |
302f753f BB |
3660 | if (sc->nr_to_scan == 0) |
3661 | return (pages); | |
3fd70ee6 BB |
3662 | |
3663 | /* Not allowed to perform filesystem reclaim */ | |
7e7baeca | 3664 | if (!(sc->gfp_mask & __GFP_FS)) |
ed6e9cc2 | 3665 | return (SHRINK_STOP); |
3fd70ee6 | 3666 | |
7cb67b45 | 3667 | /* Reclaim in progress */ |
ca0bf58d | 3668 | if (mutex_tryenter(&arc_reclaim_lock) == 0) |
ed6e9cc2 | 3669 | return (SHRINK_STOP); |
7cb67b45 | 3670 | |
ca0bf58d PS |
3671 | mutex_exit(&arc_reclaim_lock); |
3672 | ||
302f753f BB |
3673 | /* |
3674 | * Evict the requested number of pages by shrinking arc_c the | |
3675 | * requested amount. If there is nothing left to evict just | |
3676 | * reap whatever we can from the various arc slabs. | |
3677 | */ | |
3678 | if (pages > 0) { | |
ca67b33a MA |
3679 | arc_shrink(ptob(sc->nr_to_scan)); |
3680 | arc_kmem_reap_now(); | |
ed6e9cc2 TC |
3681 | #ifdef HAVE_SPLIT_SHRINKER_CALLBACK |
3682 | pages = MAX(pages - btop(arc_evictable_memory()), 0); | |
3683 | #else | |
1e3cb67b | 3684 | pages = btop(arc_evictable_memory()); |
ed6e9cc2 | 3685 | #endif |
302f753f | 3686 | } else { |
ca67b33a | 3687 | arc_kmem_reap_now(); |
ed6e9cc2 | 3688 | pages = SHRINK_STOP; |
302f753f BB |
3689 | } |
3690 | ||
ca0bf58d PS |
3691 | /* |
3692 | * We've reaped what we can, wake up threads. | |
3693 | */ | |
3694 | cv_broadcast(&arc_reclaim_waiters_cv); | |
3695 | ||
302f753f BB |
3696 | /* |
3697 | * When direct reclaim is observed it usually indicates a rapid | |
3698 | * increase in memory pressure. This occurs because the kswapd | |
3699 | * threads were unable to asynchronously keep enough free memory | |
3700 | * available. In this case set arc_no_grow to briefly pause arc | |
3701 | * growth to avoid compounding the memory pressure. | |
3702 | */ | |
7cb67b45 | 3703 | if (current_is_kswapd()) { |
302f753f | 3704 | ARCSTAT_BUMP(arcstat_memory_indirect_count); |
7cb67b45 | 3705 | } else { |
302f753f | 3706 | arc_no_grow = B_TRUE; |
11f552fa | 3707 | arc_need_free = ptob(sc->nr_to_scan); |
302f753f | 3708 | ARCSTAT_BUMP(arcstat_memory_direct_count); |
7cb67b45 BB |
3709 | } |
3710 | ||
1e3cb67b | 3711 | return (pages); |
7cb67b45 | 3712 | } |
7e7baeca | 3713 | SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func); |
7cb67b45 BB |
3714 | |
3715 | SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS); | |
3716 | #endif /* _KERNEL */ | |
3717 | ||
34dc7c2f BB |
3718 | /* |
3719 | * Adapt arc info given the number of bytes we are trying to add and | |
3720 | * the state that we are comming from. This function is only called | |
3721 | * when we are adding new content to the cache. | |
3722 | */ | |
3723 | static void | |
3724 | arc_adapt(int bytes, arc_state_t *state) | |
3725 | { | |
3726 | int mult; | |
728d6ae9 | 3727 | uint64_t arc_p_min = (arc_c >> arc_p_min_shift); |
36da08ef PS |
3728 | int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); |
3729 | int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); | |
34dc7c2f BB |
3730 | |
3731 | if (state == arc_l2c_only) | |
3732 | return; | |
3733 | ||
3734 | ASSERT(bytes > 0); | |
3735 | /* | |
3736 | * Adapt the target size of the MRU list: | |
3737 | * - if we just hit in the MRU ghost list, then increase | |
3738 | * the target size of the MRU list. | |
3739 | * - if we just hit in the MFU ghost list, then increase | |
3740 | * the target size of the MFU list by decreasing the | |
3741 | * target size of the MRU list. | |
3742 | */ | |
3743 | if (state == arc_mru_ghost) { | |
36da08ef | 3744 | mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); |
62422785 PS |
3745 | if (!zfs_arc_p_dampener_disable) |
3746 | mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ | |
34dc7c2f | 3747 | |
728d6ae9 | 3748 | arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); |
34dc7c2f | 3749 | } else if (state == arc_mfu_ghost) { |
d164b209 BB |
3750 | uint64_t delta; |
3751 | ||
36da08ef | 3752 | mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); |
62422785 PS |
3753 | if (!zfs_arc_p_dampener_disable) |
3754 | mult = MIN(mult, 10); | |
34dc7c2f | 3755 | |
d164b209 | 3756 | delta = MIN(bytes * mult, arc_p); |
728d6ae9 | 3757 | arc_p = MAX(arc_p_min, arc_p - delta); |
34dc7c2f BB |
3758 | } |
3759 | ASSERT((int64_t)arc_p >= 0); | |
3760 | ||
ca67b33a MA |
3761 | if (arc_reclaim_needed()) { |
3762 | cv_signal(&arc_reclaim_thread_cv); | |
3763 | return; | |
3764 | } | |
3765 | ||
34dc7c2f BB |
3766 | if (arc_no_grow) |
3767 | return; | |
3768 | ||
3769 | if (arc_c >= arc_c_max) | |
3770 | return; | |
3771 | ||
3772 | /* | |
3773 | * If we're within (2 * maxblocksize) bytes of the target | |
3774 | * cache size, increment the target cache size | |
3775 | */ | |
121b3cae TC |
3776 | VERIFY3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT); |
3777 | if (arc_size >= arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { | |
34dc7c2f BB |
3778 | atomic_add_64(&arc_c, (int64_t)bytes); |
3779 | if (arc_c > arc_c_max) | |
3780 | arc_c = arc_c_max; | |
3781 | else if (state == arc_anon) | |
3782 | atomic_add_64(&arc_p, (int64_t)bytes); | |
3783 | if (arc_p > arc_c) | |
3784 | arc_p = arc_c; | |
3785 | } | |
3786 | ASSERT((int64_t)arc_p >= 0); | |
3787 | } | |
3788 | ||
3789 | /* | |
ca0bf58d PS |
3790 | * Check if arc_size has grown past our upper threshold, determined by |
3791 | * zfs_arc_overflow_shift. | |
34dc7c2f | 3792 | */ |
ca0bf58d PS |
3793 | static boolean_t |
3794 | arc_is_overflowing(void) | |
34dc7c2f | 3795 | { |
ca0bf58d PS |
3796 | /* Always allow at least one block of overflow */ |
3797 | uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, | |
3798 | arc_c >> zfs_arc_overflow_shift); | |
34dc7c2f | 3799 | |
ca0bf58d | 3800 | return (arc_size >= arc_c + overflow); |
34dc7c2f BB |
3801 | } |
3802 | ||
3803 | /* | |
ca0bf58d PS |
3804 | * The buffer, supplied as the first argument, needs a data block. If we |
3805 | * are hitting the hard limit for the cache size, we must sleep, waiting | |
3806 | * for the eviction thread to catch up. If we're past the target size | |
3807 | * but below the hard limit, we'll only signal the reclaim thread and | |
3808 | * continue on. | |
34dc7c2f BB |
3809 | */ |
3810 | static void | |
3811 | arc_get_data_buf(arc_buf_t *buf) | |
3812 | { | |
b9541d6b | 3813 | arc_state_t *state = buf->b_hdr->b_l1hdr.b_state; |
34dc7c2f | 3814 | uint64_t size = buf->b_hdr->b_size; |
b9541d6b | 3815 | arc_buf_contents_t type = arc_buf_type(buf->b_hdr); |
34dc7c2f BB |
3816 | |
3817 | arc_adapt(size, state); | |
3818 | ||
3819 | /* | |
ca0bf58d PS |
3820 | * If arc_size is currently overflowing, and has grown past our |
3821 | * upper limit, we must be adding data faster than the evict | |
3822 | * thread can evict. Thus, to ensure we don't compound the | |
3823 | * problem by adding more data and forcing arc_size to grow even | |
3824 | * further past it's target size, we halt and wait for the | |
3825 | * eviction thread to catch up. | |
3826 | * | |
3827 | * It's also possible that the reclaim thread is unable to evict | |
3828 | * enough buffers to get arc_size below the overflow limit (e.g. | |
3829 | * due to buffers being un-evictable, or hash lock collisions). | |
3830 | * In this case, we want to proceed regardless if we're | |
3831 | * overflowing; thus we don't use a while loop here. | |
34dc7c2f | 3832 | */ |
ca0bf58d PS |
3833 | if (arc_is_overflowing()) { |
3834 | mutex_enter(&arc_reclaim_lock); | |
3835 | ||
3836 | /* | |
3837 | * Now that we've acquired the lock, we may no longer be | |
3838 | * over the overflow limit, lets check. | |
3839 | * | |
3840 | * We're ignoring the case of spurious wake ups. If that | |
3841 | * were to happen, it'd let this thread consume an ARC | |
3842 | * buffer before it should have (i.e. before we're under | |
3843 | * the overflow limit and were signalled by the reclaim | |
3844 | * thread). As long as that is a rare occurrence, it | |
3845 | * shouldn't cause any harm. | |
3846 | */ | |
3847 | if (arc_is_overflowing()) { | |
3848 | cv_signal(&arc_reclaim_thread_cv); | |
3849 | cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock); | |
34dc7c2f | 3850 | } |
34dc7c2f | 3851 | |
ca0bf58d | 3852 | mutex_exit(&arc_reclaim_lock); |
34dc7c2f | 3853 | } |
ab26409d | 3854 | |
da8ccd0e | 3855 | if (type == ARC_BUFC_METADATA) { |
ca0bf58d PS |
3856 | buf->b_data = zio_buf_alloc(size); |
3857 | arc_space_consume(size, ARC_SPACE_META); | |
3858 | } else { | |
3859 | ASSERT(type == ARC_BUFC_DATA); | |
3860 | buf->b_data = zio_data_buf_alloc(size); | |
3861 | arc_space_consume(size, ARC_SPACE_DATA); | |
da8ccd0e PS |
3862 | } |
3863 | ||
34dc7c2f BB |
3864 | /* |
3865 | * Update the state size. Note that ghost states have a | |
3866 | * "ghost size" and so don't need to be updated. | |
3867 | */ | |
b9541d6b | 3868 | if (!GHOST_STATE(buf->b_hdr->b_l1hdr.b_state)) { |
34dc7c2f | 3869 | arc_buf_hdr_t *hdr = buf->b_hdr; |
36da08ef | 3870 | arc_state_t *state = hdr->b_l1hdr.b_state; |
34dc7c2f | 3871 | |
36da08ef | 3872 | (void) refcount_add_many(&state->arcs_size, size, buf); |
ca0bf58d PS |
3873 | |
3874 | /* | |
3875 | * If this is reached via arc_read, the link is | |
3876 | * protected by the hash lock. If reached via | |
3877 | * arc_buf_alloc, the header should not be accessed by | |
3878 | * any other thread. And, if reached via arc_read_done, | |
3879 | * the hash lock will protect it if it's found in the | |
3880 | * hash table; otherwise no other thread should be | |
3881 | * trying to [add|remove]_reference it. | |
3882 | */ | |
3883 | if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { | |
b9541d6b CW |
3884 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
3885 | atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type], | |
3886 | size); | |
34dc7c2f BB |
3887 | } |
3888 | /* | |
3889 | * If we are growing the cache, and we are adding anonymous | |
3890 | * data, and we have outgrown arc_p, update arc_p | |
3891 | */ | |
ca0bf58d | 3892 | if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon && |
36da08ef PS |
3893 | (refcount_count(&arc_anon->arcs_size) + |
3894 | refcount_count(&arc_mru->arcs_size) > arc_p)) | |
34dc7c2f BB |
3895 | arc_p = MIN(arc_c, arc_p + size); |
3896 | } | |
3897 | } | |
3898 | ||
3899 | /* | |
3900 | * This routine is called whenever a buffer is accessed. | |
3901 | * NOTE: the hash lock is dropped in this function. | |
3902 | */ | |
3903 | static void | |
2a432414 | 3904 | arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) |
34dc7c2f | 3905 | { |
428870ff BB |
3906 | clock_t now; |
3907 | ||
34dc7c2f | 3908 | ASSERT(MUTEX_HELD(hash_lock)); |
b9541d6b | 3909 | ASSERT(HDR_HAS_L1HDR(hdr)); |
34dc7c2f | 3910 | |
b9541d6b | 3911 | if (hdr->b_l1hdr.b_state == arc_anon) { |
34dc7c2f BB |
3912 | /* |
3913 | * This buffer is not in the cache, and does not | |
3914 | * appear in our "ghost" list. Add the new buffer | |
3915 | * to the MRU state. | |
3916 | */ | |
3917 | ||
b9541d6b CW |
3918 | ASSERT0(hdr->b_l1hdr.b_arc_access); |
3919 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); | |
2a432414 GW |
3920 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); |
3921 | arc_change_state(arc_mru, hdr, hash_lock); | |
34dc7c2f | 3922 | |
b9541d6b | 3923 | } else if (hdr->b_l1hdr.b_state == arc_mru) { |
428870ff BB |
3924 | now = ddi_get_lbolt(); |
3925 | ||
34dc7c2f BB |
3926 | /* |
3927 | * If this buffer is here because of a prefetch, then either: | |
3928 | * - clear the flag if this is a "referencing" read | |
3929 | * (any subsequent access will bump this into the MFU state). | |
3930 | * or | |
3931 | * - move the buffer to the head of the list if this is | |
3932 | * another prefetch (to make it less likely to be evicted). | |
3933 | */ | |
b9541d6b CW |
3934 | if (HDR_PREFETCH(hdr)) { |
3935 | if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { | |
ca0bf58d PS |
3936 | /* link protected by hash lock */ |
3937 | ASSERT(multilist_link_active( | |
b9541d6b | 3938 | &hdr->b_l1hdr.b_arc_node)); |
34dc7c2f | 3939 | } else { |
2a432414 | 3940 | hdr->b_flags &= ~ARC_FLAG_PREFETCH; |
b9541d6b | 3941 | atomic_inc_32(&hdr->b_l1hdr.b_mru_hits); |
34dc7c2f BB |
3942 | ARCSTAT_BUMP(arcstat_mru_hits); |
3943 | } | |
b9541d6b | 3944 | hdr->b_l1hdr.b_arc_access = now; |
34dc7c2f BB |
3945 | return; |
3946 | } | |
3947 | ||
3948 | /* | |
3949 | * This buffer has been "accessed" only once so far, | |
3950 | * but it is still in the cache. Move it to the MFU | |
3951 | * state. | |
3952 | */ | |
b9541d6b CW |
3953 | if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access + |
3954 | ARC_MINTIME)) { | |
34dc7c2f BB |
3955 | /* |
3956 | * More than 125ms have passed since we | |
3957 | * instantiated this buffer. Move it to the | |
3958 | * most frequently used state. | |
3959 | */ | |
b9541d6b | 3960 | hdr->b_l1hdr.b_arc_access = now; |
2a432414 GW |
3961 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
3962 | arc_change_state(arc_mfu, hdr, hash_lock); | |
34dc7c2f | 3963 | } |
b9541d6b | 3964 | atomic_inc_32(&hdr->b_l1hdr.b_mru_hits); |
34dc7c2f | 3965 | ARCSTAT_BUMP(arcstat_mru_hits); |
b9541d6b | 3966 | } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { |
34dc7c2f BB |
3967 | arc_state_t *new_state; |
3968 | /* | |
3969 | * This buffer has been "accessed" recently, but | |
3970 | * was evicted from the cache. Move it to the | |
3971 | * MFU state. | |
3972 | */ | |
3973 | ||
b9541d6b | 3974 | if (HDR_PREFETCH(hdr)) { |
34dc7c2f | 3975 | new_state = arc_mru; |
b9541d6b | 3976 | if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) |
2a432414 GW |
3977 | hdr->b_flags &= ~ARC_FLAG_PREFETCH; |
3978 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); | |
34dc7c2f BB |
3979 | } else { |
3980 | new_state = arc_mfu; | |
2a432414 | 3981 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
34dc7c2f BB |
3982 | } |
3983 | ||
b9541d6b | 3984 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 | 3985 | arc_change_state(new_state, hdr, hash_lock); |
34dc7c2f | 3986 | |
b9541d6b | 3987 | atomic_inc_32(&hdr->b_l1hdr.b_mru_ghost_hits); |
34dc7c2f | 3988 | ARCSTAT_BUMP(arcstat_mru_ghost_hits); |
b9541d6b | 3989 | } else if (hdr->b_l1hdr.b_state == arc_mfu) { |
34dc7c2f BB |
3990 | /* |
3991 | * This buffer has been accessed more than once and is | |
3992 | * still in the cache. Keep it in the MFU state. | |
3993 | * | |
3994 | * NOTE: an add_reference() that occurred when we did | |
3995 | * the arc_read() will have kicked this off the list. | |
3996 | * If it was a prefetch, we will explicitly move it to | |
3997 | * the head of the list now. | |
3998 | */ | |
b9541d6b CW |
3999 | if ((HDR_PREFETCH(hdr)) != 0) { |
4000 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); | |
ca0bf58d PS |
4001 | /* link protected by hash_lock */ |
4002 | ASSERT(multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
34dc7c2f | 4003 | } |
b9541d6b | 4004 | atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits); |
34dc7c2f | 4005 | ARCSTAT_BUMP(arcstat_mfu_hits); |
b9541d6b CW |
4006 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
4007 | } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { | |
34dc7c2f BB |
4008 | arc_state_t *new_state = arc_mfu; |
4009 | /* | |
4010 | * This buffer has been accessed more than once but has | |
4011 | * been evicted from the cache. Move it back to the | |
4012 | * MFU state. | |
4013 | */ | |
4014 | ||
b9541d6b | 4015 | if (HDR_PREFETCH(hdr)) { |
34dc7c2f BB |
4016 | /* |
4017 | * This is a prefetch access... | |
4018 | * move this block back to the MRU state. | |
4019 | */ | |
b9541d6b | 4020 | ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); |
34dc7c2f BB |
4021 | new_state = arc_mru; |
4022 | } | |
4023 | ||
b9541d6b | 4024 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 GW |
4025 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
4026 | arc_change_state(new_state, hdr, hash_lock); | |
34dc7c2f | 4027 | |
b9541d6b | 4028 | atomic_inc_32(&hdr->b_l1hdr.b_mfu_ghost_hits); |
34dc7c2f | 4029 | ARCSTAT_BUMP(arcstat_mfu_ghost_hits); |
b9541d6b | 4030 | } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { |
34dc7c2f BB |
4031 | /* |
4032 | * This buffer is on the 2nd Level ARC. | |
4033 | */ | |
4034 | ||
b9541d6b | 4035 | hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); |
2a432414 GW |
4036 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); |
4037 | arc_change_state(arc_mfu, hdr, hash_lock); | |
34dc7c2f | 4038 | } else { |
b9541d6b CW |
4039 | cmn_err(CE_PANIC, "invalid arc state 0x%p", |
4040 | hdr->b_l1hdr.b_state); | |
34dc7c2f BB |
4041 | } |
4042 | } | |
4043 | ||
4044 | /* a generic arc_done_func_t which you can use */ | |
4045 | /* ARGSUSED */ | |
4046 | void | |
4047 | arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
4048 | { | |
428870ff BB |
4049 | if (zio == NULL || zio->io_error == 0) |
4050 | bcopy(buf->b_data, arg, buf->b_hdr->b_size); | |
13fe0198 | 4051 | VERIFY(arc_buf_remove_ref(buf, arg)); |
34dc7c2f BB |
4052 | } |
4053 | ||
4054 | /* a generic arc_done_func_t */ | |
4055 | void | |
4056 | arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
4057 | { | |
4058 | arc_buf_t **bufp = arg; | |
4059 | if (zio && zio->io_error) { | |
13fe0198 | 4060 | VERIFY(arc_buf_remove_ref(buf, arg)); |
34dc7c2f BB |
4061 | *bufp = NULL; |
4062 | } else { | |
4063 | *bufp = buf; | |
428870ff | 4064 | ASSERT(buf->b_data); |
34dc7c2f BB |
4065 | } |
4066 | } | |
4067 | ||
4068 | static void | |
4069 | arc_read_done(zio_t *zio) | |
4070 | { | |
9b67f605 | 4071 | arc_buf_hdr_t *hdr; |
34dc7c2f BB |
4072 | arc_buf_t *buf; |
4073 | arc_buf_t *abuf; /* buffer we're assigning to callback */ | |
9b67f605 | 4074 | kmutex_t *hash_lock = NULL; |
34dc7c2f BB |
4075 | arc_callback_t *callback_list, *acb; |
4076 | int freeable = FALSE; | |
4077 | ||
4078 | buf = zio->io_private; | |
4079 | hdr = buf->b_hdr; | |
4080 | ||
4081 | /* | |
4082 | * The hdr was inserted into hash-table and removed from lists | |
4083 | * prior to starting I/O. We should find this header, since | |
4084 | * it's in the hash table, and it should be legit since it's | |
4085 | * not possible to evict it during the I/O. The only possible | |
4086 | * reason for it not to be found is if we were freed during the | |
4087 | * read. | |
4088 | */ | |
9b67f605 MA |
4089 | if (HDR_IN_HASH_TABLE(hdr)) { |
4090 | arc_buf_hdr_t *found; | |
4091 | ||
4092 | ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); | |
4093 | ASSERT3U(hdr->b_dva.dva_word[0], ==, | |
4094 | BP_IDENTITY(zio->io_bp)->dva_word[0]); | |
4095 | ASSERT3U(hdr->b_dva.dva_word[1], ==, | |
4096 | BP_IDENTITY(zio->io_bp)->dva_word[1]); | |
4097 | ||
4098 | found = buf_hash_find(hdr->b_spa, zio->io_bp, | |
4099 | &hash_lock); | |
4100 | ||
4101 | ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && | |
4102 | hash_lock == NULL) || | |
4103 | (found == hdr && | |
4104 | DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || | |
4105 | (found == hdr && HDR_L2_READING(hdr))); | |
4106 | } | |
34dc7c2f | 4107 | |
2a432414 | 4108 | hdr->b_flags &= ~ARC_FLAG_L2_EVICTED; |
b9541d6b | 4109 | if (l2arc_noprefetch && HDR_PREFETCH(hdr)) |
2a432414 | 4110 | hdr->b_flags &= ~ARC_FLAG_L2CACHE; |
34dc7c2f BB |
4111 | |
4112 | /* byteswap if necessary */ | |
b9541d6b | 4113 | callback_list = hdr->b_l1hdr.b_acb; |
34dc7c2f | 4114 | ASSERT(callback_list != NULL); |
428870ff | 4115 | if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { |
9ae529ec CS |
4116 | dmu_object_byteswap_t bswap = |
4117 | DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); | |
b01615d5 RY |
4118 | if (BP_GET_LEVEL(zio->io_bp) > 0) |
4119 | byteswap_uint64_array(buf->b_data, hdr->b_size); | |
4120 | else | |
4121 | dmu_ot_byteswap[bswap].ob_func(buf->b_data, hdr->b_size); | |
b128c09f | 4122 | } |
34dc7c2f BB |
4123 | |
4124 | arc_cksum_compute(buf, B_FALSE); | |
498877ba | 4125 | arc_buf_watch(buf); |
34dc7c2f | 4126 | |
b9541d6b CW |
4127 | if (hash_lock && zio->io_error == 0 && |
4128 | hdr->b_l1hdr.b_state == arc_anon) { | |
428870ff BB |
4129 | /* |
4130 | * Only call arc_access on anonymous buffers. This is because | |
4131 | * if we've issued an I/O for an evicted buffer, we've already | |
4132 | * called arc_access (to prevent any simultaneous readers from | |
4133 | * getting confused). | |
4134 | */ | |
4135 | arc_access(hdr, hash_lock); | |
4136 | } | |
4137 | ||
34dc7c2f BB |
4138 | /* create copies of the data buffer for the callers */ |
4139 | abuf = buf; | |
4140 | for (acb = callback_list; acb; acb = acb->acb_next) { | |
4141 | if (acb->acb_done) { | |
1eb5bfa3 GW |
4142 | if (abuf == NULL) { |
4143 | ARCSTAT_BUMP(arcstat_duplicate_reads); | |
34dc7c2f | 4144 | abuf = arc_buf_clone(buf); |
1eb5bfa3 | 4145 | } |
34dc7c2f BB |
4146 | acb->acb_buf = abuf; |
4147 | abuf = NULL; | |
4148 | } | |
4149 | } | |
b9541d6b | 4150 | hdr->b_l1hdr.b_acb = NULL; |
2a432414 | 4151 | hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; |
34dc7c2f | 4152 | ASSERT(!HDR_BUF_AVAILABLE(hdr)); |
428870ff BB |
4153 | if (abuf == buf) { |
4154 | ASSERT(buf->b_efunc == NULL); | |
b9541d6b | 4155 | ASSERT(hdr->b_l1hdr.b_datacnt == 1); |
2a432414 | 4156 | hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; |
428870ff | 4157 | } |
34dc7c2f | 4158 | |
b9541d6b CW |
4159 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || |
4160 | callback_list != NULL); | |
34dc7c2f BB |
4161 | |
4162 | if (zio->io_error != 0) { | |
2a432414 | 4163 | hdr->b_flags |= ARC_FLAG_IO_ERROR; |
b9541d6b | 4164 | if (hdr->b_l1hdr.b_state != arc_anon) |
34dc7c2f BB |
4165 | arc_change_state(arc_anon, hdr, hash_lock); |
4166 | if (HDR_IN_HASH_TABLE(hdr)) | |
4167 | buf_hash_remove(hdr); | |
b9541d6b | 4168 | freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); |
34dc7c2f BB |
4169 | } |
4170 | ||
4171 | /* | |
4172 | * Broadcast before we drop the hash_lock to avoid the possibility | |
4173 | * that the hdr (and hence the cv) might be freed before we get to | |
4174 | * the cv_broadcast(). | |
4175 | */ | |
b9541d6b | 4176 | cv_broadcast(&hdr->b_l1hdr.b_cv); |
34dc7c2f | 4177 | |
b9541d6b | 4178 | if (hash_lock != NULL) { |
34dc7c2f BB |
4179 | mutex_exit(hash_lock); |
4180 | } else { | |
4181 | /* | |
4182 | * This block was freed while we waited for the read to | |
4183 | * complete. It has been removed from the hash table and | |
4184 | * moved to the anonymous state (so that it won't show up | |
4185 | * in the cache). | |
4186 | */ | |
b9541d6b CW |
4187 | ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); |
4188 | freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); | |
34dc7c2f BB |
4189 | } |
4190 | ||
4191 | /* execute each callback and free its structure */ | |
4192 | while ((acb = callback_list) != NULL) { | |
4193 | if (acb->acb_done) | |
4194 | acb->acb_done(zio, acb->acb_buf, acb->acb_private); | |
4195 | ||
4196 | if (acb->acb_zio_dummy != NULL) { | |
4197 | acb->acb_zio_dummy->io_error = zio->io_error; | |
4198 | zio_nowait(acb->acb_zio_dummy); | |
4199 | } | |
4200 | ||
4201 | callback_list = acb->acb_next; | |
4202 | kmem_free(acb, sizeof (arc_callback_t)); | |
4203 | } | |
4204 | ||
4205 | if (freeable) | |
4206 | arc_hdr_destroy(hdr); | |
4207 | } | |
4208 | ||
4209 | /* | |
5c839890 | 4210 | * "Read" the block at the specified DVA (in bp) via the |
34dc7c2f BB |
4211 | * cache. If the block is found in the cache, invoke the provided |
4212 | * callback immediately and return. Note that the `zio' parameter | |
4213 | * in the callback will be NULL in this case, since no IO was | |
4214 | * required. If the block is not in the cache pass the read request | |
4215 | * on to the spa with a substitute callback function, so that the | |
4216 | * requested block will be added to the cache. | |
4217 | * | |
4218 | * If a read request arrives for a block that has a read in-progress, | |
4219 | * either wait for the in-progress read to complete (and return the | |
4220 | * results); or, if this is a read with a "done" func, add a record | |
4221 | * to the read to invoke the "done" func when the read completes, | |
4222 | * and return; or just return. | |
4223 | * | |
4224 | * arc_read_done() will invoke all the requested "done" functions | |
4225 | * for readers of this block. | |
4226 | */ | |
4227 | int | |
294f6806 | 4228 | arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, |
2a432414 GW |
4229 | void *private, zio_priority_t priority, int zio_flags, |
4230 | arc_flags_t *arc_flags, const zbookmark_phys_t *zb) | |
34dc7c2f | 4231 | { |
9b67f605 | 4232 | arc_buf_hdr_t *hdr = NULL; |
d4ed6673 | 4233 | arc_buf_t *buf = NULL; |
9b67f605 | 4234 | kmutex_t *hash_lock = NULL; |
34dc7c2f | 4235 | zio_t *rzio; |
3541dc6d | 4236 | uint64_t guid = spa_load_guid(spa); |
1421c891 | 4237 | int rc = 0; |
34dc7c2f | 4238 | |
9b67f605 MA |
4239 | ASSERT(!BP_IS_EMBEDDED(bp) || |
4240 | BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); | |
4241 | ||
34dc7c2f | 4242 | top: |
9b67f605 MA |
4243 | if (!BP_IS_EMBEDDED(bp)) { |
4244 | /* | |
4245 | * Embedded BP's have no DVA and require no I/O to "read". | |
4246 | * Create an anonymous arc buf to back it. | |
4247 | */ | |
4248 | hdr = buf_hash_find(guid, bp, &hash_lock); | |
4249 | } | |
4250 | ||
b9541d6b | 4251 | if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_datacnt > 0) { |
34dc7c2f | 4252 | |
2a432414 | 4253 | *arc_flags |= ARC_FLAG_CACHED; |
34dc7c2f BB |
4254 | |
4255 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
4256 | ||
2a432414 | 4257 | if (*arc_flags & ARC_FLAG_WAIT) { |
b9541d6b | 4258 | cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); |
34dc7c2f BB |
4259 | mutex_exit(hash_lock); |
4260 | goto top; | |
4261 | } | |
2a432414 | 4262 | ASSERT(*arc_flags & ARC_FLAG_NOWAIT); |
34dc7c2f BB |
4263 | |
4264 | if (done) { | |
4265 | arc_callback_t *acb = NULL; | |
4266 | ||
4267 | acb = kmem_zalloc(sizeof (arc_callback_t), | |
79c76d5b | 4268 | KM_SLEEP); |
34dc7c2f BB |
4269 | acb->acb_done = done; |
4270 | acb->acb_private = private; | |
34dc7c2f BB |
4271 | if (pio != NULL) |
4272 | acb->acb_zio_dummy = zio_null(pio, | |
d164b209 | 4273 | spa, NULL, NULL, NULL, zio_flags); |
34dc7c2f BB |
4274 | |
4275 | ASSERT(acb->acb_done != NULL); | |
b9541d6b CW |
4276 | acb->acb_next = hdr->b_l1hdr.b_acb; |
4277 | hdr->b_l1hdr.b_acb = acb; | |
34dc7c2f BB |
4278 | add_reference(hdr, hash_lock, private); |
4279 | mutex_exit(hash_lock); | |
1421c891 | 4280 | goto out; |
34dc7c2f BB |
4281 | } |
4282 | mutex_exit(hash_lock); | |
1421c891 | 4283 | goto out; |
34dc7c2f BB |
4284 | } |
4285 | ||
b9541d6b CW |
4286 | ASSERT(hdr->b_l1hdr.b_state == arc_mru || |
4287 | hdr->b_l1hdr.b_state == arc_mfu); | |
34dc7c2f BB |
4288 | |
4289 | if (done) { | |
4290 | add_reference(hdr, hash_lock, private); | |
4291 | /* | |
4292 | * If this block is already in use, create a new | |
4293 | * copy of the data so that we will be guaranteed | |
4294 | * that arc_release() will always succeed. | |
4295 | */ | |
b9541d6b | 4296 | buf = hdr->b_l1hdr.b_buf; |
34dc7c2f BB |
4297 | ASSERT(buf); |
4298 | ASSERT(buf->b_data); | |
4299 | if (HDR_BUF_AVAILABLE(hdr)) { | |
4300 | ASSERT(buf->b_efunc == NULL); | |
2a432414 | 4301 | hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; |
34dc7c2f BB |
4302 | } else { |
4303 | buf = arc_buf_clone(buf); | |
4304 | } | |
428870ff | 4305 | |
2a432414 | 4306 | } else if (*arc_flags & ARC_FLAG_PREFETCH && |
b9541d6b | 4307 | refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { |
2a432414 | 4308 | hdr->b_flags |= ARC_FLAG_PREFETCH; |
34dc7c2f BB |
4309 | } |
4310 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); | |
4311 | arc_access(hdr, hash_lock); | |
2a432414 GW |
4312 | if (*arc_flags & ARC_FLAG_L2CACHE) |
4313 | hdr->b_flags |= ARC_FLAG_L2CACHE; | |
4314 | if (*arc_flags & ARC_FLAG_L2COMPRESS) | |
4315 | hdr->b_flags |= ARC_FLAG_L2COMPRESS; | |
34dc7c2f BB |
4316 | mutex_exit(hash_lock); |
4317 | ARCSTAT_BUMP(arcstat_hits); | |
b9541d6b CW |
4318 | ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), |
4319 | demand, prefetch, !HDR_ISTYPE_METADATA(hdr), | |
34dc7c2f BB |
4320 | data, metadata, hits); |
4321 | ||
4322 | if (done) | |
4323 | done(NULL, buf, private); | |
4324 | } else { | |
4325 | uint64_t size = BP_GET_LSIZE(bp); | |
9b67f605 | 4326 | arc_callback_t *acb; |
b128c09f | 4327 | vdev_t *vd = NULL; |
a117a6d6 | 4328 | uint64_t addr = 0; |
d164b209 | 4329 | boolean_t devw = B_FALSE; |
0ed212dc | 4330 | enum zio_compress b_compress = ZIO_COMPRESS_OFF; |
b9541d6b | 4331 | int32_t b_asize = 0; |
34dc7c2f | 4332 | |
5f6d0b6f BB |
4333 | /* |
4334 | * Gracefully handle a damaged logical block size as a | |
4335 | * checksum error by passing a dummy zio to the done callback. | |
4336 | */ | |
f1512ee6 | 4337 | if (size > spa_maxblocksize(spa)) { |
5f6d0b6f BB |
4338 | if (done) { |
4339 | rzio = zio_null(pio, spa, NULL, | |
4340 | NULL, NULL, zio_flags); | |
4341 | rzio->io_error = ECKSUM; | |
4342 | done(rzio, buf, private); | |
4343 | zio_nowait(rzio); | |
4344 | } | |
4345 | rc = ECKSUM; | |
4346 | goto out; | |
4347 | } | |
4348 | ||
34dc7c2f BB |
4349 | if (hdr == NULL) { |
4350 | /* this block is not in the cache */ | |
9b67f605 | 4351 | arc_buf_hdr_t *exists = NULL; |
34dc7c2f BB |
4352 | arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); |
4353 | buf = arc_buf_alloc(spa, size, private, type); | |
4354 | hdr = buf->b_hdr; | |
9b67f605 MA |
4355 | if (!BP_IS_EMBEDDED(bp)) { |
4356 | hdr->b_dva = *BP_IDENTITY(bp); | |
4357 | hdr->b_birth = BP_PHYSICAL_BIRTH(bp); | |
9b67f605 MA |
4358 | exists = buf_hash_insert(hdr, &hash_lock); |
4359 | } | |
4360 | if (exists != NULL) { | |
34dc7c2f BB |
4361 | /* somebody beat us to the hash insert */ |
4362 | mutex_exit(hash_lock); | |
428870ff | 4363 | buf_discard_identity(hdr); |
34dc7c2f BB |
4364 | (void) arc_buf_remove_ref(buf, private); |
4365 | goto top; /* restart the IO request */ | |
4366 | } | |
2a432414 | 4367 | |
34dc7c2f | 4368 | /* if this is a prefetch, we don't have a reference */ |
2a432414 | 4369 | if (*arc_flags & ARC_FLAG_PREFETCH) { |
34dc7c2f BB |
4370 | (void) remove_reference(hdr, hash_lock, |
4371 | private); | |
2a432414 | 4372 | hdr->b_flags |= ARC_FLAG_PREFETCH; |
34dc7c2f | 4373 | } |
2a432414 GW |
4374 | if (*arc_flags & ARC_FLAG_L2CACHE) |
4375 | hdr->b_flags |= ARC_FLAG_L2CACHE; | |
4376 | if (*arc_flags & ARC_FLAG_L2COMPRESS) | |
4377 | hdr->b_flags |= ARC_FLAG_L2COMPRESS; | |
34dc7c2f | 4378 | if (BP_GET_LEVEL(bp) > 0) |
2a432414 | 4379 | hdr->b_flags |= ARC_FLAG_INDIRECT; |
34dc7c2f | 4380 | } else { |
b9541d6b CW |
4381 | /* |
4382 | * This block is in the ghost cache. If it was L2-only | |
4383 | * (and thus didn't have an L1 hdr), we realloc the | |
4384 | * header to add an L1 hdr. | |
4385 | */ | |
4386 | if (!HDR_HAS_L1HDR(hdr)) { | |
4387 | hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, | |
4388 | hdr_full_cache); | |
4389 | } | |
4390 | ||
4391 | ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state)); | |
34dc7c2f | 4392 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
b9541d6b | 4393 | ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
ca0bf58d | 4394 | ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); |
34dc7c2f BB |
4395 | |
4396 | /* if this is a prefetch, we don't have a reference */ | |
2a432414 GW |
4397 | if (*arc_flags & ARC_FLAG_PREFETCH) |
4398 | hdr->b_flags |= ARC_FLAG_PREFETCH; | |
34dc7c2f BB |
4399 | else |
4400 | add_reference(hdr, hash_lock, private); | |
2a432414 GW |
4401 | if (*arc_flags & ARC_FLAG_L2CACHE) |
4402 | hdr->b_flags |= ARC_FLAG_L2CACHE; | |
4403 | if (*arc_flags & ARC_FLAG_L2COMPRESS) | |
4404 | hdr->b_flags |= ARC_FLAG_L2COMPRESS; | |
34dc7c2f BB |
4405 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
4406 | buf->b_hdr = hdr; | |
4407 | buf->b_data = NULL; | |
4408 | buf->b_efunc = NULL; | |
4409 | buf->b_private = NULL; | |
4410 | buf->b_next = NULL; | |
b9541d6b CW |
4411 | hdr->b_l1hdr.b_buf = buf; |
4412 | ASSERT0(hdr->b_l1hdr.b_datacnt); | |
4413 | hdr->b_l1hdr.b_datacnt = 1; | |
428870ff BB |
4414 | arc_get_data_buf(buf); |
4415 | arc_access(hdr, hash_lock); | |
34dc7c2f BB |
4416 | } |
4417 | ||
b9541d6b | 4418 | ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); |
428870ff | 4419 | |
79c76d5b | 4420 | acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); |
34dc7c2f BB |
4421 | acb->acb_done = done; |
4422 | acb->acb_private = private; | |
34dc7c2f | 4423 | |
b9541d6b CW |
4424 | ASSERT(hdr->b_l1hdr.b_acb == NULL); |
4425 | hdr->b_l1hdr.b_acb = acb; | |
2a432414 | 4426 | hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; |
34dc7c2f | 4427 | |
b9541d6b CW |
4428 | if (HDR_HAS_L2HDR(hdr) && |
4429 | (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { | |
4430 | devw = hdr->b_l2hdr.b_dev->l2ad_writing; | |
4431 | addr = hdr->b_l2hdr.b_daddr; | |
4432 | b_compress = HDR_GET_COMPRESS(hdr); | |
4433 | b_asize = hdr->b_l2hdr.b_asize; | |
b128c09f BB |
4434 | /* |
4435 | * Lock out device removal. | |
4436 | */ | |
4437 | if (vdev_is_dead(vd) || | |
4438 | !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) | |
4439 | vd = NULL; | |
4440 | } | |
4441 | ||
9b67f605 MA |
4442 | if (hash_lock != NULL) |
4443 | mutex_exit(hash_lock); | |
b128c09f | 4444 | |
e49f1e20 WA |
4445 | /* |
4446 | * At this point, we have a level 1 cache miss. Try again in | |
4447 | * L2ARC if possible. | |
4448 | */ | |
34dc7c2f | 4449 | ASSERT3U(hdr->b_size, ==, size); |
428870ff | 4450 | DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, |
5dbd68a3 | 4451 | uint64_t, size, zbookmark_phys_t *, zb); |
34dc7c2f | 4452 | ARCSTAT_BUMP(arcstat_misses); |
b9541d6b CW |
4453 | ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), |
4454 | demand, prefetch, !HDR_ISTYPE_METADATA(hdr), | |
34dc7c2f BB |
4455 | data, metadata, misses); |
4456 | ||
d164b209 | 4457 | if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { |
34dc7c2f BB |
4458 | /* |
4459 | * Read from the L2ARC if the following are true: | |
b128c09f BB |
4460 | * 1. The L2ARC vdev was previously cached. |
4461 | * 2. This buffer still has L2ARC metadata. | |
4462 | * 3. This buffer isn't currently writing to the L2ARC. | |
4463 | * 4. The L2ARC entry wasn't evicted, which may | |
4464 | * also have invalidated the vdev. | |
d164b209 | 4465 | * 5. This isn't prefetch and l2arc_noprefetch is set. |
34dc7c2f | 4466 | */ |
b9541d6b | 4467 | if (HDR_HAS_L2HDR(hdr) && |
d164b209 BB |
4468 | !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && |
4469 | !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { | |
34dc7c2f BB |
4470 | l2arc_read_callback_t *cb; |
4471 | ||
4472 | DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); | |
4473 | ARCSTAT_BUMP(arcstat_l2_hits); | |
b9541d6b | 4474 | atomic_inc_32(&hdr->b_l2hdr.b_hits); |
34dc7c2f | 4475 | |
34dc7c2f | 4476 | cb = kmem_zalloc(sizeof (l2arc_read_callback_t), |
79c76d5b | 4477 | KM_SLEEP); |
34dc7c2f BB |
4478 | cb->l2rcb_buf = buf; |
4479 | cb->l2rcb_spa = spa; | |
4480 | cb->l2rcb_bp = *bp; | |
4481 | cb->l2rcb_zb = *zb; | |
b128c09f | 4482 | cb->l2rcb_flags = zio_flags; |
0ed212dc | 4483 | cb->l2rcb_compress = b_compress; |
34dc7c2f | 4484 | |
a117a6d6 GW |
4485 | ASSERT(addr >= VDEV_LABEL_START_SIZE && |
4486 | addr + size < vd->vdev_psize - | |
4487 | VDEV_LABEL_END_SIZE); | |
4488 | ||
34dc7c2f | 4489 | /* |
b128c09f BB |
4490 | * l2arc read. The SCL_L2ARC lock will be |
4491 | * released by l2arc_read_done(). | |
3a17a7a9 SK |
4492 | * Issue a null zio if the underlying buffer |
4493 | * was squashed to zero size by compression. | |
34dc7c2f | 4494 | */ |
0ed212dc | 4495 | if (b_compress == ZIO_COMPRESS_EMPTY) { |
3a17a7a9 SK |
4496 | rzio = zio_null(pio, spa, vd, |
4497 | l2arc_read_done, cb, | |
4498 | zio_flags | ZIO_FLAG_DONT_CACHE | | |
4499 | ZIO_FLAG_CANFAIL | | |
4500 | ZIO_FLAG_DONT_PROPAGATE | | |
4501 | ZIO_FLAG_DONT_RETRY); | |
4502 | } else { | |
4503 | rzio = zio_read_phys(pio, vd, addr, | |
0ed212dc BP |
4504 | b_asize, buf->b_data, |
4505 | ZIO_CHECKSUM_OFF, | |
3a17a7a9 SK |
4506 | l2arc_read_done, cb, priority, |
4507 | zio_flags | ZIO_FLAG_DONT_CACHE | | |
4508 | ZIO_FLAG_CANFAIL | | |
4509 | ZIO_FLAG_DONT_PROPAGATE | | |
4510 | ZIO_FLAG_DONT_RETRY, B_FALSE); | |
4511 | } | |
34dc7c2f BB |
4512 | DTRACE_PROBE2(l2arc__read, vdev_t *, vd, |
4513 | zio_t *, rzio); | |
0ed212dc | 4514 | ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize); |
34dc7c2f | 4515 | |
2a432414 | 4516 | if (*arc_flags & ARC_FLAG_NOWAIT) { |
b128c09f | 4517 | zio_nowait(rzio); |
1421c891 | 4518 | goto out; |
b128c09f | 4519 | } |
34dc7c2f | 4520 | |
2a432414 | 4521 | ASSERT(*arc_flags & ARC_FLAG_WAIT); |
b128c09f | 4522 | if (zio_wait(rzio) == 0) |
1421c891 | 4523 | goto out; |
b128c09f BB |
4524 | |
4525 | /* l2arc read error; goto zio_read() */ | |
34dc7c2f BB |
4526 | } else { |
4527 | DTRACE_PROBE1(l2arc__miss, | |
4528 | arc_buf_hdr_t *, hdr); | |
4529 | ARCSTAT_BUMP(arcstat_l2_misses); | |
4530 | if (HDR_L2_WRITING(hdr)) | |
4531 | ARCSTAT_BUMP(arcstat_l2_rw_clash); | |
b128c09f | 4532 | spa_config_exit(spa, SCL_L2ARC, vd); |
34dc7c2f | 4533 | } |
d164b209 BB |
4534 | } else { |
4535 | if (vd != NULL) | |
4536 | spa_config_exit(spa, SCL_L2ARC, vd); | |
4537 | if (l2arc_ndev != 0) { | |
4538 | DTRACE_PROBE1(l2arc__miss, | |
4539 | arc_buf_hdr_t *, hdr); | |
4540 | ARCSTAT_BUMP(arcstat_l2_misses); | |
4541 | } | |
34dc7c2f | 4542 | } |
34dc7c2f BB |
4543 | |
4544 | rzio = zio_read(pio, spa, bp, buf->b_data, size, | |
b128c09f | 4545 | arc_read_done, buf, priority, zio_flags, zb); |
34dc7c2f | 4546 | |
2a432414 | 4547 | if (*arc_flags & ARC_FLAG_WAIT) { |
1421c891 PS |
4548 | rc = zio_wait(rzio); |
4549 | goto out; | |
4550 | } | |
34dc7c2f | 4551 | |
2a432414 | 4552 | ASSERT(*arc_flags & ARC_FLAG_NOWAIT); |
34dc7c2f BB |
4553 | zio_nowait(rzio); |
4554 | } | |
1421c891 PS |
4555 | |
4556 | out: | |
4557 | spa_read_history_add(spa, zb, *arc_flags); | |
4558 | return (rc); | |
34dc7c2f BB |
4559 | } |
4560 | ||
ab26409d BB |
4561 | arc_prune_t * |
4562 | arc_add_prune_callback(arc_prune_func_t *func, void *private) | |
4563 | { | |
4564 | arc_prune_t *p; | |
4565 | ||
d1d7e268 | 4566 | p = kmem_alloc(sizeof (*p), KM_SLEEP); |
ab26409d BB |
4567 | p->p_pfunc = func; |
4568 | p->p_private = private; | |
4569 | list_link_init(&p->p_node); | |
4570 | refcount_create(&p->p_refcnt); | |
4571 | ||
4572 | mutex_enter(&arc_prune_mtx); | |
4573 | refcount_add(&p->p_refcnt, &arc_prune_list); | |
4574 | list_insert_head(&arc_prune_list, p); | |
4575 | mutex_exit(&arc_prune_mtx); | |
4576 | ||
4577 | return (p); | |
4578 | } | |
4579 | ||
4580 | void | |
4581 | arc_remove_prune_callback(arc_prune_t *p) | |
4582 | { | |
4583 | mutex_enter(&arc_prune_mtx); | |
4584 | list_remove(&arc_prune_list, p); | |
4585 | if (refcount_remove(&p->p_refcnt, &arc_prune_list) == 0) { | |
4586 | refcount_destroy(&p->p_refcnt); | |
4587 | kmem_free(p, sizeof (*p)); | |
4588 | } | |
4589 | mutex_exit(&arc_prune_mtx); | |
4590 | } | |
4591 | ||
34dc7c2f BB |
4592 | void |
4593 | arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) | |
4594 | { | |
4595 | ASSERT(buf->b_hdr != NULL); | |
b9541d6b CW |
4596 | ASSERT(buf->b_hdr->b_l1hdr.b_state != arc_anon); |
4597 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt) || | |
4598 | func == NULL); | |
428870ff BB |
4599 | ASSERT(buf->b_efunc == NULL); |
4600 | ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); | |
4601 | ||
34dc7c2f BB |
4602 | buf->b_efunc = func; |
4603 | buf->b_private = private; | |
4604 | } | |
4605 | ||
df4474f9 MA |
4606 | /* |
4607 | * Notify the arc that a block was freed, and thus will never be used again. | |
4608 | */ | |
4609 | void | |
4610 | arc_freed(spa_t *spa, const blkptr_t *bp) | |
4611 | { | |
4612 | arc_buf_hdr_t *hdr; | |
4613 | kmutex_t *hash_lock; | |
4614 | uint64_t guid = spa_load_guid(spa); | |
4615 | ||
9b67f605 MA |
4616 | ASSERT(!BP_IS_EMBEDDED(bp)); |
4617 | ||
4618 | hdr = buf_hash_find(guid, bp, &hash_lock); | |
df4474f9 MA |
4619 | if (hdr == NULL) |
4620 | return; | |
4621 | if (HDR_BUF_AVAILABLE(hdr)) { | |
b9541d6b | 4622 | arc_buf_t *buf = hdr->b_l1hdr.b_buf; |
df4474f9 | 4623 | add_reference(hdr, hash_lock, FTAG); |
2a432414 | 4624 | hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE; |
df4474f9 MA |
4625 | mutex_exit(hash_lock); |
4626 | ||
4627 | arc_release(buf, FTAG); | |
4628 | (void) arc_buf_remove_ref(buf, FTAG); | |
4629 | } else { | |
4630 | mutex_exit(hash_lock); | |
4631 | } | |
4632 | ||
4633 | } | |
4634 | ||
34dc7c2f | 4635 | /* |
bd089c54 MA |
4636 | * Clear the user eviction callback set by arc_set_callback(), first calling |
4637 | * it if it exists. Because the presence of a callback keeps an arc_buf cached | |
4638 | * clearing the callback may result in the arc_buf being destroyed. However, | |
4639 | * it will not result in the *last* arc_buf being destroyed, hence the data | |
4640 | * will remain cached in the ARC. We make a copy of the arc buffer here so | |
4641 | * that we can process the callback without holding any locks. | |
4642 | * | |
4643 | * It's possible that the callback is already in the process of being cleared | |
4644 | * by another thread. In this case we can not clear the callback. | |
4645 | * | |
4646 | * Returns B_TRUE if the callback was successfully called and cleared. | |
34dc7c2f | 4647 | */ |
bd089c54 MA |
4648 | boolean_t |
4649 | arc_clear_callback(arc_buf_t *buf) | |
34dc7c2f BB |
4650 | { |
4651 | arc_buf_hdr_t *hdr; | |
4652 | kmutex_t *hash_lock; | |
bd089c54 MA |
4653 | arc_evict_func_t *efunc = buf->b_efunc; |
4654 | void *private = buf->b_private; | |
34dc7c2f | 4655 | |
428870ff | 4656 | mutex_enter(&buf->b_evict_lock); |
34dc7c2f BB |
4657 | hdr = buf->b_hdr; |
4658 | if (hdr == NULL) { | |
4659 | /* | |
4660 | * We are in arc_do_user_evicts(). | |
4661 | */ | |
4662 | ASSERT(buf->b_data == NULL); | |
428870ff | 4663 | mutex_exit(&buf->b_evict_lock); |
bd089c54 | 4664 | return (B_FALSE); |
b128c09f | 4665 | } else if (buf->b_data == NULL) { |
34dc7c2f | 4666 | /* |
b128c09f BB |
4667 | * We are on the eviction list; process this buffer now |
4668 | * but let arc_do_user_evicts() do the reaping. | |
34dc7c2f | 4669 | */ |
b128c09f | 4670 | buf->b_efunc = NULL; |
428870ff | 4671 | mutex_exit(&buf->b_evict_lock); |
bd089c54 MA |
4672 | VERIFY0(efunc(private)); |
4673 | return (B_TRUE); | |
34dc7c2f | 4674 | } |
b128c09f BB |
4675 | hash_lock = HDR_LOCK(hdr); |
4676 | mutex_enter(hash_lock); | |
428870ff BB |
4677 | hdr = buf->b_hdr; |
4678 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); | |
34dc7c2f | 4679 | |
b9541d6b CW |
4680 | ASSERT3U(refcount_count(&hdr->b_l1hdr.b_refcnt), <, |
4681 | hdr->b_l1hdr.b_datacnt); | |
4682 | ASSERT(hdr->b_l1hdr.b_state == arc_mru || | |
4683 | hdr->b_l1hdr.b_state == arc_mfu); | |
34dc7c2f | 4684 | |
bd089c54 MA |
4685 | buf->b_efunc = NULL; |
4686 | buf->b_private = NULL; | |
34dc7c2f | 4687 | |
b9541d6b | 4688 | if (hdr->b_l1hdr.b_datacnt > 1) { |
bd089c54 | 4689 | mutex_exit(&buf->b_evict_lock); |
ca0bf58d | 4690 | arc_buf_destroy(buf, TRUE); |
bd089c54 | 4691 | } else { |
b9541d6b | 4692 | ASSERT(buf == hdr->b_l1hdr.b_buf); |
2a432414 | 4693 | hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE; |
bd089c54 | 4694 | mutex_exit(&buf->b_evict_lock); |
34dc7c2f | 4695 | } |
34dc7c2f | 4696 | |
bd089c54 MA |
4697 | mutex_exit(hash_lock); |
4698 | VERIFY0(efunc(private)); | |
4699 | return (B_TRUE); | |
34dc7c2f BB |
4700 | } |
4701 | ||
4702 | /* | |
e49f1e20 WA |
4703 | * Release this buffer from the cache, making it an anonymous buffer. This |
4704 | * must be done after a read and prior to modifying the buffer contents. | |
34dc7c2f | 4705 | * If the buffer has more than one reference, we must make |
b128c09f | 4706 | * a new hdr for the buffer. |
34dc7c2f BB |
4707 | */ |
4708 | void | |
4709 | arc_release(arc_buf_t *buf, void *tag) | |
4710 | { | |
b9541d6b CW |
4711 | kmutex_t *hash_lock; |
4712 | arc_state_t *state; | |
4713 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
34dc7c2f | 4714 | |
428870ff | 4715 | /* |
ca0bf58d | 4716 | * It would be nice to assert that if its DMU metadata (level > |
428870ff BB |
4717 | * 0 || it's the dnode file), then it must be syncing context. |
4718 | * But we don't know that information at this level. | |
4719 | */ | |
4720 | ||
4721 | mutex_enter(&buf->b_evict_lock); | |
b128c09f | 4722 | |
ca0bf58d PS |
4723 | ASSERT(HDR_HAS_L1HDR(hdr)); |
4724 | ||
b9541d6b CW |
4725 | /* |
4726 | * We don't grab the hash lock prior to this check, because if | |
4727 | * the buffer's header is in the arc_anon state, it won't be | |
4728 | * linked into the hash table. | |
4729 | */ | |
4730 | if (hdr->b_l1hdr.b_state == arc_anon) { | |
4731 | mutex_exit(&buf->b_evict_lock); | |
4732 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
4733 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
4734 | ASSERT(!HDR_HAS_L2HDR(hdr)); | |
4735 | ASSERT(BUF_EMPTY(hdr)); | |
34dc7c2f | 4736 | |
b9541d6b CW |
4737 | ASSERT3U(hdr->b_l1hdr.b_datacnt, ==, 1); |
4738 | ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); | |
4739 | ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); | |
4740 | ||
4741 | ASSERT3P(buf->b_efunc, ==, NULL); | |
4742 | ASSERT3P(buf->b_private, ==, NULL); | |
4743 | ||
4744 | hdr->b_l1hdr.b_arc_access = 0; | |
4745 | arc_buf_thaw(buf); | |
4746 | ||
4747 | return; | |
34dc7c2f BB |
4748 | } |
4749 | ||
b9541d6b CW |
4750 | hash_lock = HDR_LOCK(hdr); |
4751 | mutex_enter(hash_lock); | |
4752 | ||
4753 | /* | |
4754 | * This assignment is only valid as long as the hash_lock is | |
4755 | * held, we must be careful not to reference state or the | |
4756 | * b_state field after dropping the lock. | |
4757 | */ | |
4758 | state = hdr->b_l1hdr.b_state; | |
4759 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); | |
4760 | ASSERT3P(state, !=, arc_anon); | |
4761 | ||
4762 | /* this buffer is not on any list */ | |
4763 | ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0); | |
4764 | ||
4765 | if (HDR_HAS_L2HDR(hdr)) { | |
b9541d6b | 4766 | mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); |
ca0bf58d PS |
4767 | |
4768 | /* | |
d962d5da PS |
4769 | * We have to recheck this conditional again now that |
4770 | * we're holding the l2ad_mtx to prevent a race with | |
4771 | * another thread which might be concurrently calling | |
4772 | * l2arc_evict(). In that case, l2arc_evict() might have | |
4773 | * destroyed the header's L2 portion as we were waiting | |
4774 | * to acquire the l2ad_mtx. | |
ca0bf58d | 4775 | */ |
d962d5da PS |
4776 | if (HDR_HAS_L2HDR(hdr)) |
4777 | arc_hdr_l2hdr_destroy(hdr); | |
ca0bf58d | 4778 | |
b9541d6b | 4779 | mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); |
b128c09f BB |
4780 | } |
4781 | ||
34dc7c2f BB |
4782 | /* |
4783 | * Do we have more than one buf? | |
4784 | */ | |
b9541d6b | 4785 | if (hdr->b_l1hdr.b_datacnt > 1) { |
34dc7c2f BB |
4786 | arc_buf_hdr_t *nhdr; |
4787 | arc_buf_t **bufp; | |
4788 | uint64_t blksz = hdr->b_size; | |
d164b209 | 4789 | uint64_t spa = hdr->b_spa; |
b9541d6b | 4790 | arc_buf_contents_t type = arc_buf_type(hdr); |
34dc7c2f BB |
4791 | uint32_t flags = hdr->b_flags; |
4792 | ||
b9541d6b | 4793 | ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); |
34dc7c2f | 4794 | /* |
428870ff BB |
4795 | * Pull the data off of this hdr and attach it to |
4796 | * a new anonymous hdr. | |
34dc7c2f BB |
4797 | */ |
4798 | (void) remove_reference(hdr, hash_lock, tag); | |
b9541d6b | 4799 | bufp = &hdr->b_l1hdr.b_buf; |
34dc7c2f BB |
4800 | while (*bufp != buf) |
4801 | bufp = &(*bufp)->b_next; | |
428870ff | 4802 | *bufp = buf->b_next; |
34dc7c2f BB |
4803 | buf->b_next = NULL; |
4804 | ||
b9541d6b | 4805 | ASSERT3P(state, !=, arc_l2c_only); |
36da08ef PS |
4806 | |
4807 | (void) refcount_remove_many( | |
4808 | &state->arcs_size, hdr->b_size, buf); | |
4809 | ||
b9541d6b CW |
4810 | if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { |
4811 | uint64_t *size; | |
4812 | ||
4813 | ASSERT3P(state, !=, arc_l2c_only); | |
4814 | size = &state->arcs_lsize[type]; | |
34dc7c2f BB |
4815 | ASSERT3U(*size, >=, hdr->b_size); |
4816 | atomic_add_64(size, -hdr->b_size); | |
4817 | } | |
1eb5bfa3 GW |
4818 | |
4819 | /* | |
4820 | * We're releasing a duplicate user data buffer, update | |
4821 | * our statistics accordingly. | |
4822 | */ | |
b9541d6b | 4823 | if (HDR_ISTYPE_DATA(hdr)) { |
1eb5bfa3 GW |
4824 | ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); |
4825 | ARCSTAT_INCR(arcstat_duplicate_buffers_size, | |
4826 | -hdr->b_size); | |
4827 | } | |
b9541d6b | 4828 | hdr->b_l1hdr.b_datacnt -= 1; |
34dc7c2f | 4829 | arc_cksum_verify(buf); |
498877ba | 4830 | arc_buf_unwatch(buf); |
34dc7c2f BB |
4831 | |
4832 | mutex_exit(hash_lock); | |
4833 | ||
b9541d6b | 4834 | nhdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); |
34dc7c2f BB |
4835 | nhdr->b_size = blksz; |
4836 | nhdr->b_spa = spa; | |
b9541d6b CW |
4837 | |
4838 | nhdr->b_l1hdr.b_mru_hits = 0; | |
4839 | nhdr->b_l1hdr.b_mru_ghost_hits = 0; | |
4840 | nhdr->b_l1hdr.b_mfu_hits = 0; | |
4841 | nhdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
4842 | nhdr->b_l1hdr.b_l2_hits = 0; | |
2a432414 | 4843 | nhdr->b_flags = flags & ARC_FLAG_L2_WRITING; |
b9541d6b CW |
4844 | nhdr->b_flags |= arc_bufc_to_flags(type); |
4845 | nhdr->b_flags |= ARC_FLAG_HAS_L1HDR; | |
4846 | ||
4847 | nhdr->b_l1hdr.b_buf = buf; | |
4848 | nhdr->b_l1hdr.b_datacnt = 1; | |
4849 | nhdr->b_l1hdr.b_state = arc_anon; | |
4850 | nhdr->b_l1hdr.b_arc_access = 0; | |
ca0bf58d | 4851 | nhdr->b_l1hdr.b_tmp_cdata = NULL; |
34dc7c2f | 4852 | nhdr->b_freeze_cksum = NULL; |
b9541d6b CW |
4853 | |
4854 | (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); | |
34dc7c2f | 4855 | buf->b_hdr = nhdr; |
428870ff | 4856 | mutex_exit(&buf->b_evict_lock); |
36da08ef | 4857 | (void) refcount_add_many(&arc_anon->arcs_size, blksz, buf); |
34dc7c2f | 4858 | } else { |
428870ff | 4859 | mutex_exit(&buf->b_evict_lock); |
b9541d6b | 4860 | ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); |
ca0bf58d PS |
4861 | /* protected by hash lock, or hdr is on arc_anon */ |
4862 | ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); | |
34dc7c2f | 4863 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
b9541d6b CW |
4864 | hdr->b_l1hdr.b_mru_hits = 0; |
4865 | hdr->b_l1hdr.b_mru_ghost_hits = 0; | |
4866 | hdr->b_l1hdr.b_mfu_hits = 0; | |
4867 | hdr->b_l1hdr.b_mfu_ghost_hits = 0; | |
4868 | hdr->b_l1hdr.b_l2_hits = 0; | |
4869 | arc_change_state(arc_anon, hdr, hash_lock); | |
4870 | hdr->b_l1hdr.b_arc_access = 0; | |
4871 | mutex_exit(hash_lock); | |
34dc7c2f | 4872 | |
428870ff | 4873 | buf_discard_identity(hdr); |
34dc7c2f BB |
4874 | arc_buf_thaw(buf); |
4875 | } | |
4876 | buf->b_efunc = NULL; | |
4877 | buf->b_private = NULL; | |
34dc7c2f BB |
4878 | } |
4879 | ||
4880 | int | |
4881 | arc_released(arc_buf_t *buf) | |
4882 | { | |
b128c09f BB |
4883 | int released; |
4884 | ||
428870ff | 4885 | mutex_enter(&buf->b_evict_lock); |
b9541d6b CW |
4886 | released = (buf->b_data != NULL && |
4887 | buf->b_hdr->b_l1hdr.b_state == arc_anon); | |
428870ff | 4888 | mutex_exit(&buf->b_evict_lock); |
b128c09f | 4889 | return (released); |
34dc7c2f BB |
4890 | } |
4891 | ||
34dc7c2f BB |
4892 | #ifdef ZFS_DEBUG |
4893 | int | |
4894 | arc_referenced(arc_buf_t *buf) | |
4895 | { | |
b128c09f BB |
4896 | int referenced; |
4897 | ||
428870ff | 4898 | mutex_enter(&buf->b_evict_lock); |
b9541d6b | 4899 | referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); |
428870ff | 4900 | mutex_exit(&buf->b_evict_lock); |
b128c09f | 4901 | return (referenced); |
34dc7c2f BB |
4902 | } |
4903 | #endif | |
4904 | ||
4905 | static void | |
4906 | arc_write_ready(zio_t *zio) | |
4907 | { | |
4908 | arc_write_callback_t *callback = zio->io_private; | |
4909 | arc_buf_t *buf = callback->awcb_buf; | |
4910 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
4911 | ||
b9541d6b CW |
4912 | ASSERT(HDR_HAS_L1HDR(hdr)); |
4913 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); | |
4914 | ASSERT(hdr->b_l1hdr.b_datacnt > 0); | |
b128c09f BB |
4915 | callback->awcb_ready(zio, buf, callback->awcb_private); |
4916 | ||
34dc7c2f BB |
4917 | /* |
4918 | * If the IO is already in progress, then this is a re-write | |
b128c09f BB |
4919 | * attempt, so we need to thaw and re-compute the cksum. |
4920 | * It is the responsibility of the callback to handle the | |
4921 | * accounting for any re-write attempt. | |
34dc7c2f BB |
4922 | */ |
4923 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
b9541d6b | 4924 | mutex_enter(&hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
4925 | if (hdr->b_freeze_cksum != NULL) { |
4926 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
4927 | hdr->b_freeze_cksum = NULL; | |
4928 | } | |
b9541d6b | 4929 | mutex_exit(&hdr->b_l1hdr.b_freeze_lock); |
34dc7c2f BB |
4930 | } |
4931 | arc_cksum_compute(buf, B_FALSE); | |
2a432414 | 4932 | hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS; |
34dc7c2f BB |
4933 | } |
4934 | ||
e8b96c60 MA |
4935 | /* |
4936 | * The SPA calls this callback for each physical write that happens on behalf | |
4937 | * of a logical write. See the comment in dbuf_write_physdone() for details. | |
4938 | */ | |
4939 | static void | |
4940 | arc_write_physdone(zio_t *zio) | |
4941 | { | |
4942 | arc_write_callback_t *cb = zio->io_private; | |
4943 | if (cb->awcb_physdone != NULL) | |
4944 | cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); | |
4945 | } | |
4946 | ||
34dc7c2f BB |
4947 | static void |
4948 | arc_write_done(zio_t *zio) | |
4949 | { | |
4950 | arc_write_callback_t *callback = zio->io_private; | |
4951 | arc_buf_t *buf = callback->awcb_buf; | |
4952 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
4953 | ||
b9541d6b | 4954 | ASSERT(hdr->b_l1hdr.b_acb == NULL); |
428870ff BB |
4955 | |
4956 | if (zio->io_error == 0) { | |
9b67f605 | 4957 | if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { |
b0bc7a84 MG |
4958 | buf_discard_identity(hdr); |
4959 | } else { | |
4960 | hdr->b_dva = *BP_IDENTITY(zio->io_bp); | |
4961 | hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); | |
b0bc7a84 | 4962 | } |
428870ff BB |
4963 | } else { |
4964 | ASSERT(BUF_EMPTY(hdr)); | |
4965 | } | |
34dc7c2f | 4966 | |
34dc7c2f | 4967 | /* |
9b67f605 MA |
4968 | * If the block to be written was all-zero or compressed enough to be |
4969 | * embedded in the BP, no write was performed so there will be no | |
4970 | * dva/birth/checksum. The buffer must therefore remain anonymous | |
4971 | * (and uncached). | |
34dc7c2f BB |
4972 | */ |
4973 | if (!BUF_EMPTY(hdr)) { | |
4974 | arc_buf_hdr_t *exists; | |
4975 | kmutex_t *hash_lock; | |
4976 | ||
428870ff BB |
4977 | ASSERT(zio->io_error == 0); |
4978 | ||
34dc7c2f BB |
4979 | arc_cksum_verify(buf); |
4980 | ||
4981 | exists = buf_hash_insert(hdr, &hash_lock); | |
b9541d6b | 4982 | if (exists != NULL) { |
34dc7c2f BB |
4983 | /* |
4984 | * This can only happen if we overwrite for | |
4985 | * sync-to-convergence, because we remove | |
4986 | * buffers from the hash table when we arc_free(). | |
4987 | */ | |
428870ff BB |
4988 | if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { |
4989 | if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) | |
4990 | panic("bad overwrite, hdr=%p exists=%p", | |
4991 | (void *)hdr, (void *)exists); | |
b9541d6b CW |
4992 | ASSERT(refcount_is_zero( |
4993 | &exists->b_l1hdr.b_refcnt)); | |
428870ff BB |
4994 | arc_change_state(arc_anon, exists, hash_lock); |
4995 | mutex_exit(hash_lock); | |
4996 | arc_hdr_destroy(exists); | |
4997 | exists = buf_hash_insert(hdr, &hash_lock); | |
4998 | ASSERT3P(exists, ==, NULL); | |
03c6040b GW |
4999 | } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { |
5000 | /* nopwrite */ | |
5001 | ASSERT(zio->io_prop.zp_nopwrite); | |
5002 | if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) | |
5003 | panic("bad nopwrite, hdr=%p exists=%p", | |
5004 | (void *)hdr, (void *)exists); | |
428870ff BB |
5005 | } else { |
5006 | /* Dedup */ | |
b9541d6b CW |
5007 | ASSERT(hdr->b_l1hdr.b_datacnt == 1); |
5008 | ASSERT(hdr->b_l1hdr.b_state == arc_anon); | |
428870ff BB |
5009 | ASSERT(BP_GET_DEDUP(zio->io_bp)); |
5010 | ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); | |
5011 | } | |
34dc7c2f | 5012 | } |
2a432414 | 5013 | hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; |
b128c09f | 5014 | /* if it's not anon, we are doing a scrub */ |
b9541d6b | 5015 | if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) |
b128c09f | 5016 | arc_access(hdr, hash_lock); |
34dc7c2f | 5017 | mutex_exit(hash_lock); |
34dc7c2f | 5018 | } else { |
2a432414 | 5019 | hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS; |
34dc7c2f BB |
5020 | } |
5021 | ||
b9541d6b | 5022 | ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); |
428870ff | 5023 | callback->awcb_done(zio, buf, callback->awcb_private); |
34dc7c2f BB |
5024 | |
5025 | kmem_free(callback, sizeof (arc_write_callback_t)); | |
5026 | } | |
5027 | ||
5028 | zio_t * | |
428870ff | 5029 | arc_write(zio_t *pio, spa_t *spa, uint64_t txg, |
3a17a7a9 | 5030 | blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress, |
e8b96c60 MA |
5031 | const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone, |
5032 | arc_done_func_t *done, void *private, zio_priority_t priority, | |
5dbd68a3 | 5033 | int zio_flags, const zbookmark_phys_t *zb) |
34dc7c2f BB |
5034 | { |
5035 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
5036 | arc_write_callback_t *callback; | |
b128c09f | 5037 | zio_t *zio; |
34dc7c2f | 5038 | |
b128c09f | 5039 | ASSERT(ready != NULL); |
428870ff | 5040 | ASSERT(done != NULL); |
34dc7c2f | 5041 | ASSERT(!HDR_IO_ERROR(hdr)); |
b9541d6b CW |
5042 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); |
5043 | ASSERT(hdr->b_l1hdr.b_acb == NULL); | |
5044 | ASSERT(hdr->b_l1hdr.b_datacnt > 0); | |
b128c09f | 5045 | if (l2arc) |
2a432414 | 5046 | hdr->b_flags |= ARC_FLAG_L2CACHE; |
3a17a7a9 | 5047 | if (l2arc_compress) |
2a432414 | 5048 | hdr->b_flags |= ARC_FLAG_L2COMPRESS; |
79c76d5b | 5049 | callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); |
34dc7c2f | 5050 | callback->awcb_ready = ready; |
e8b96c60 | 5051 | callback->awcb_physdone = physdone; |
34dc7c2f BB |
5052 | callback->awcb_done = done; |
5053 | callback->awcb_private = private; | |
5054 | callback->awcb_buf = buf; | |
b128c09f | 5055 | |
428870ff | 5056 | zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, |
e8b96c60 MA |
5057 | arc_write_ready, arc_write_physdone, arc_write_done, callback, |
5058 | priority, zio_flags, zb); | |
34dc7c2f BB |
5059 | |
5060 | return (zio); | |
5061 | } | |
5062 | ||
34dc7c2f | 5063 | static int |
e8b96c60 | 5064 | arc_memory_throttle(uint64_t reserve, uint64_t txg) |
34dc7c2f BB |
5065 | { |
5066 | #ifdef _KERNEL | |
7e8bddd0 BB |
5067 | uint64_t available_memory = ptob(freemem); |
5068 | static uint64_t page_load = 0; | |
5069 | static uint64_t last_txg = 0; | |
5070 | #ifdef __linux__ | |
5071 | pgcnt_t minfree = btop(arc_sys_free / 4); | |
5072 | #endif | |
0c5493d4 | 5073 | |
ca67b33a MA |
5074 | if (freemem > physmem * arc_lotsfree_percent / 100) |
5075 | return (0); | |
5076 | ||
7e8bddd0 BB |
5077 | if (txg > last_txg) { |
5078 | last_txg = txg; | |
5079 | page_load = 0; | |
5080 | } | |
5081 | ||
5082 | /* | |
5083 | * If we are in pageout, we know that memory is already tight, | |
5084 | * the arc is already going to be evicting, so we just want to | |
5085 | * continue to let page writes occur as quickly as possible. | |
5086 | */ | |
5087 | if (current_is_kswapd()) { | |
5088 | if (page_load > MAX(ptob(minfree), available_memory) / 4) { | |
5089 | DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); | |
5090 | return (SET_ERROR(ERESTART)); | |
5091 | } | |
5092 | /* Note: reserve is inflated, so we deflate */ | |
5093 | page_load += reserve / 8; | |
5094 | return (0); | |
5095 | } else if (page_load > 0 && arc_reclaim_needed()) { | |
ca67b33a | 5096 | /* memory is low, delay before restarting */ |
34dc7c2f | 5097 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); |
570827e1 | 5098 | DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim); |
2e528b49 | 5099 | return (SET_ERROR(EAGAIN)); |
34dc7c2f | 5100 | } |
7e8bddd0 | 5101 | page_load = 0; |
34dc7c2f BB |
5102 | #endif |
5103 | return (0); | |
5104 | } | |
5105 | ||
5106 | void | |
5107 | arc_tempreserve_clear(uint64_t reserve) | |
5108 | { | |
5109 | atomic_add_64(&arc_tempreserve, -reserve); | |
5110 | ASSERT((int64_t)arc_tempreserve >= 0); | |
5111 | } | |
5112 | ||
5113 | int | |
5114 | arc_tempreserve_space(uint64_t reserve, uint64_t txg) | |
5115 | { | |
5116 | int error; | |
9babb374 | 5117 | uint64_t anon_size; |
34dc7c2f | 5118 | |
34dc7c2f BB |
5119 | if (reserve > arc_c/4 && !arc_no_grow) |
5120 | arc_c = MIN(arc_c_max, reserve * 4); | |
12f9a6a3 BB |
5121 | |
5122 | /* | |
5123 | * Throttle when the calculated memory footprint for the TXG | |
5124 | * exceeds the target ARC size. | |
5125 | */ | |
570827e1 BB |
5126 | if (reserve > arc_c) { |
5127 | DMU_TX_STAT_BUMP(dmu_tx_memory_reserve); | |
12f9a6a3 | 5128 | return (SET_ERROR(ERESTART)); |
570827e1 | 5129 | } |
34dc7c2f | 5130 | |
9babb374 BB |
5131 | /* |
5132 | * Don't count loaned bufs as in flight dirty data to prevent long | |
5133 | * network delays from blocking transactions that are ready to be | |
5134 | * assigned to a txg. | |
5135 | */ | |
36da08ef PS |
5136 | anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - |
5137 | arc_loaned_bytes), 0); | |
9babb374 | 5138 | |
34dc7c2f BB |
5139 | /* |
5140 | * Writes will, almost always, require additional memory allocations | |
d3cc8b15 | 5141 | * in order to compress/encrypt/etc the data. We therefore need to |
34dc7c2f BB |
5142 | * make sure that there is sufficient available memory for this. |
5143 | */ | |
e8b96c60 MA |
5144 | error = arc_memory_throttle(reserve, txg); |
5145 | if (error != 0) | |
34dc7c2f BB |
5146 | return (error); |
5147 | ||
5148 | /* | |
5149 | * Throttle writes when the amount of dirty data in the cache | |
5150 | * gets too large. We try to keep the cache less than half full | |
5151 | * of dirty blocks so that our sync times don't grow too large. | |
5152 | * Note: if two requests come in concurrently, we might let them | |
5153 | * both succeed, when one of them should fail. Not a huge deal. | |
5154 | */ | |
9babb374 BB |
5155 | |
5156 | if (reserve + arc_tempreserve + anon_size > arc_c / 2 && | |
5157 | anon_size > arc_c / 4) { | |
34dc7c2f BB |
5158 | dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " |
5159 | "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", | |
5160 | arc_tempreserve>>10, | |
5161 | arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, | |
5162 | arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, | |
5163 | reserve>>10, arc_c>>10); | |
570827e1 | 5164 | DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle); |
2e528b49 | 5165 | return (SET_ERROR(ERESTART)); |
34dc7c2f BB |
5166 | } |
5167 | atomic_add_64(&arc_tempreserve, reserve); | |
5168 | return (0); | |
5169 | } | |
5170 | ||
13be560d BB |
5171 | static void |
5172 | arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, | |
5173 | kstat_named_t *evict_data, kstat_named_t *evict_metadata) | |
5174 | { | |
36da08ef | 5175 | size->value.ui64 = refcount_count(&state->arcs_size); |
13be560d BB |
5176 | evict_data->value.ui64 = state->arcs_lsize[ARC_BUFC_DATA]; |
5177 | evict_metadata->value.ui64 = state->arcs_lsize[ARC_BUFC_METADATA]; | |
5178 | } | |
5179 | ||
5180 | static int | |
5181 | arc_kstat_update(kstat_t *ksp, int rw) | |
5182 | { | |
5183 | arc_stats_t *as = ksp->ks_data; | |
5184 | ||
5185 | if (rw == KSTAT_WRITE) { | |
500445c0 | 5186 | return (EACCES); |
13be560d BB |
5187 | } else { |
5188 | arc_kstat_update_state(arc_anon, | |
5189 | &as->arcstat_anon_size, | |
500445c0 PS |
5190 | &as->arcstat_anon_evictable_data, |
5191 | &as->arcstat_anon_evictable_metadata); | |
13be560d BB |
5192 | arc_kstat_update_state(arc_mru, |
5193 | &as->arcstat_mru_size, | |
500445c0 PS |
5194 | &as->arcstat_mru_evictable_data, |
5195 | &as->arcstat_mru_evictable_metadata); | |
13be560d BB |
5196 | arc_kstat_update_state(arc_mru_ghost, |
5197 | &as->arcstat_mru_ghost_size, | |
500445c0 PS |
5198 | &as->arcstat_mru_ghost_evictable_data, |
5199 | &as->arcstat_mru_ghost_evictable_metadata); | |
13be560d BB |
5200 | arc_kstat_update_state(arc_mfu, |
5201 | &as->arcstat_mfu_size, | |
500445c0 PS |
5202 | &as->arcstat_mfu_evictable_data, |
5203 | &as->arcstat_mfu_evictable_metadata); | |
fc41c640 | 5204 | arc_kstat_update_state(arc_mfu_ghost, |
13be560d | 5205 | &as->arcstat_mfu_ghost_size, |
500445c0 PS |
5206 | &as->arcstat_mfu_ghost_evictable_data, |
5207 | &as->arcstat_mfu_ghost_evictable_metadata); | |
13be560d BB |
5208 | } |
5209 | ||
5210 | return (0); | |
5211 | } | |
5212 | ||
ca0bf58d PS |
5213 | /* |
5214 | * This function *must* return indices evenly distributed between all | |
5215 | * sublists of the multilist. This is needed due to how the ARC eviction | |
5216 | * code is laid out; arc_evict_state() assumes ARC buffers are evenly | |
5217 | * distributed between all sublists and uses this assumption when | |
5218 | * deciding which sublist to evict from and how much to evict from it. | |
5219 | */ | |
5220 | unsigned int | |
5221 | arc_state_multilist_index_func(multilist_t *ml, void *obj) | |
5222 | { | |
5223 | arc_buf_hdr_t *hdr = obj; | |
5224 | ||
5225 | /* | |
5226 | * We rely on b_dva to generate evenly distributed index | |
5227 | * numbers using buf_hash below. So, as an added precaution, | |
5228 | * let's make sure we never add empty buffers to the arc lists. | |
5229 | */ | |
5230 | ASSERT(!BUF_EMPTY(hdr)); | |
5231 | ||
5232 | /* | |
5233 | * The assumption here, is the hash value for a given | |
5234 | * arc_buf_hdr_t will remain constant throughout its lifetime | |
5235 | * (i.e. its b_spa, b_dva, and b_birth fields don't change). | |
5236 | * Thus, we don't need to store the header's sublist index | |
5237 | * on insertion, as this index can be recalculated on removal. | |
5238 | * | |
5239 | * Also, the low order bits of the hash value are thought to be | |
5240 | * distributed evenly. Otherwise, in the case that the multilist | |
5241 | * has a power of two number of sublists, each sublists' usage | |
5242 | * would not be evenly distributed. | |
5243 | */ | |
5244 | return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % | |
5245 | multilist_get_num_sublists(ml)); | |
5246 | } | |
5247 | ||
ca67b33a MA |
5248 | /* |
5249 | * Called during module initialization and periodically thereafter to | |
5250 | * apply reasonable changes to the exposed performance tunings. Non-zero | |
5251 | * zfs_* values which differ from the currently set values will be applied. | |
5252 | */ | |
5253 | static void | |
5254 | arc_tuning_update(void) | |
5255 | { | |
5256 | /* Valid range: 64M - <all physical memory> */ | |
5257 | if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) && | |
5258 | (zfs_arc_max > 64 << 20) && (zfs_arc_max < ptob(physmem)) && | |
5259 | (zfs_arc_max > arc_c_min)) { | |
5260 | arc_c_max = zfs_arc_max; | |
5261 | arc_c = arc_c_max; | |
5262 | arc_p = (arc_c >> 1); | |
5263 | arc_meta_limit = MIN(arc_meta_limit, arc_c_max); | |
5264 | } | |
5265 | ||
5266 | /* Valid range: 32M - <arc_c_max> */ | |
5267 | if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) && | |
5268 | (zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) && | |
5269 | (zfs_arc_min <= arc_c_max)) { | |
5270 | arc_c_min = zfs_arc_min; | |
5271 | arc_c = MAX(arc_c, arc_c_min); | |
5272 | } | |
5273 | ||
5274 | /* Valid range: 16M - <arc_c_max> */ | |
5275 | if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) && | |
5276 | (zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) && | |
5277 | (zfs_arc_meta_min <= arc_c_max)) { | |
5278 | arc_meta_min = zfs_arc_meta_min; | |
5279 | arc_meta_limit = MAX(arc_meta_limit, arc_meta_min); | |
5280 | } | |
5281 | ||
5282 | /* Valid range: <arc_meta_min> - <arc_c_max> */ | |
5283 | if ((zfs_arc_meta_limit) && (zfs_arc_meta_limit != arc_meta_limit) && | |
5284 | (zfs_arc_meta_limit >= zfs_arc_meta_min) && | |
5285 | (zfs_arc_meta_limit <= arc_c_max)) | |
5286 | arc_meta_limit = zfs_arc_meta_limit; | |
5287 | ||
5288 | /* Valid range: 1 - N */ | |
5289 | if (zfs_arc_grow_retry) | |
5290 | arc_grow_retry = zfs_arc_grow_retry; | |
5291 | ||
5292 | /* Valid range: 1 - N */ | |
5293 | if (zfs_arc_shrink_shift) { | |
5294 | arc_shrink_shift = zfs_arc_shrink_shift; | |
5295 | arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1); | |
5296 | } | |
5297 | ||
728d6ae9 BB |
5298 | /* Valid range: 1 - N */ |
5299 | if (zfs_arc_p_min_shift) | |
5300 | arc_p_min_shift = zfs_arc_p_min_shift; | |
5301 | ||
ca67b33a MA |
5302 | /* Valid range: 1 - N ticks */ |
5303 | if (zfs_arc_min_prefetch_lifespan) | |
5304 | arc_min_prefetch_lifespan = zfs_arc_min_prefetch_lifespan; | |
11f552fa | 5305 | |
7e8bddd0 BB |
5306 | /* Valid range: 0 - 100 */ |
5307 | if ((zfs_arc_lotsfree_percent >= 0) && | |
5308 | (zfs_arc_lotsfree_percent <= 100)) | |
5309 | arc_lotsfree_percent = zfs_arc_lotsfree_percent; | |
5310 | ||
11f552fa BB |
5311 | /* Valid range: 0 - <all physical memory> */ |
5312 | if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free)) | |
5313 | arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), ptob(physmem)); | |
7e8bddd0 | 5314 | |
ca67b33a MA |
5315 | } |
5316 | ||
34dc7c2f BB |
5317 | void |
5318 | arc_init(void) | |
5319 | { | |
ca67b33a MA |
5320 | /* |
5321 | * allmem is "all memory that we could possibly use". | |
5322 | */ | |
5323 | #ifdef _KERNEL | |
5324 | uint64_t allmem = ptob(physmem); | |
5325 | #else | |
5326 | uint64_t allmem = (physmem * PAGESIZE) / 2; | |
5327 | #endif | |
5328 | ||
ca0bf58d PS |
5329 | mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL); |
5330 | cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL); | |
5331 | cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL); | |
5332 | ||
5333 | mutex_init(&arc_user_evicts_lock, NULL, MUTEX_DEFAULT, NULL); | |
5334 | cv_init(&arc_user_evicts_cv, NULL, CV_DEFAULT, NULL); | |
34dc7c2f BB |
5335 | |
5336 | /* Convert seconds to clock ticks */ | |
ca67b33a | 5337 | arc_min_prefetch_lifespan = 1 * hz; |
34dc7c2f BB |
5338 | |
5339 | /* Start out with 1/8 of all memory */ | |
ca67b33a | 5340 | arc_c = allmem / 8; |
34dc7c2f BB |
5341 | |
5342 | #ifdef _KERNEL | |
5343 | /* | |
5344 | * On architectures where the physical memory can be larger | |
5345 | * than the addressable space (intel in 32-bit mode), we may | |
5346 | * need to limit the cache to 1/8 of VM size. | |
5347 | */ | |
5348 | arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); | |
ca67b33a | 5349 | |
7cb67b45 BB |
5350 | /* |
5351 | * Register a shrinker to support synchronous (direct) memory | |
5352 | * reclaim from the arc. This is done to prevent kswapd from | |
5353 | * swapping out pages when it is preferable to shrink the arc. | |
5354 | */ | |
5355 | spl_register_shrinker(&arc_shrinker); | |
11f552fa BB |
5356 | |
5357 | /* Set to 1/64 of all memory or a minimum of 512K */ | |
5358 | arc_sys_free = MAX(ptob(physmem / 64), (512 * 1024)); | |
5359 | arc_need_free = 0; | |
34dc7c2f BB |
5360 | #endif |
5361 | ||
ca67b33a | 5362 | /* Set min cache to allow safe operation of arc_adapt() */ |
121b3cae | 5363 | arc_c_min = 2ULL << SPA_MAXBLOCKSHIFT; |
ca67b33a MA |
5364 | /* Set max to 1/2 of all memory */ |
5365 | arc_c_max = allmem / 2; | |
34dc7c2f BB |
5366 | |
5367 | arc_c = arc_c_max; | |
5368 | arc_p = (arc_c >> 1); | |
5369 | ||
ca67b33a MA |
5370 | /* Set min to 1/2 of arc_c_min */ |
5371 | arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT; | |
5372 | /* Initialize maximum observed usage to zero */ | |
1834f2d8 | 5373 | arc_meta_max = 0; |
ca67b33a MA |
5374 | /* Set limit to 3/4 of arc_c_max with a floor of arc_meta_min */ |
5375 | arc_meta_limit = MAX((3 * arc_c_max) / 4, arc_meta_min); | |
34dc7c2f | 5376 | |
ca67b33a MA |
5377 | /* Apply user specified tunings */ |
5378 | arc_tuning_update(); | |
c52fca13 | 5379 | |
ca0bf58d | 5380 | if (zfs_arc_num_sublists_per_state < 1) |
ca67b33a | 5381 | zfs_arc_num_sublists_per_state = MAX(boot_ncpus, 1); |
ca0bf58d | 5382 | |
34dc7c2f BB |
5383 | /* if kmem_flags are set, lets try to use less memory */ |
5384 | if (kmem_debugging()) | |
5385 | arc_c = arc_c / 2; | |
5386 | if (arc_c < arc_c_min) | |
5387 | arc_c = arc_c_min; | |
5388 | ||
5389 | arc_anon = &ARC_anon; | |
5390 | arc_mru = &ARC_mru; | |
5391 | arc_mru_ghost = &ARC_mru_ghost; | |
5392 | arc_mfu = &ARC_mfu; | |
5393 | arc_mfu_ghost = &ARC_mfu_ghost; | |
5394 | arc_l2c_only = &ARC_l2c_only; | |
5395 | arc_size = 0; | |
5396 | ||
ca0bf58d | 5397 | multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], |
b9541d6b | 5398 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5399 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5400 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5401 | multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA], | |
b9541d6b | 5402 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5403 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5404 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5405 | multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], | |
b9541d6b | 5406 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5407 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5408 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5409 | multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], | |
b9541d6b | 5410 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5411 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5412 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5413 | multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], | |
b9541d6b | 5414 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5415 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5416 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5417 | multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], | |
b9541d6b | 5418 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5419 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5420 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5421 | multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], | |
b9541d6b | 5422 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5423 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5424 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5425 | multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], | |
b9541d6b | 5426 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5427 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5428 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5429 | multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], | |
b9541d6b | 5430 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5431 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5432 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
5433 | multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], | |
b9541d6b | 5434 | sizeof (arc_buf_hdr_t), |
ca0bf58d PS |
5435 | offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), |
5436 | zfs_arc_num_sublists_per_state, arc_state_multilist_index_func); | |
34dc7c2f | 5437 | |
e0b0ca98 BB |
5438 | arc_anon->arcs_state = ARC_STATE_ANON; |
5439 | arc_mru->arcs_state = ARC_STATE_MRU; | |
5440 | arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST; | |
5441 | arc_mfu->arcs_state = ARC_STATE_MFU; | |
5442 | arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST; | |
5443 | arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY; | |
5444 | ||
36da08ef PS |
5445 | refcount_create(&arc_anon->arcs_size); |
5446 | refcount_create(&arc_mru->arcs_size); | |
5447 | refcount_create(&arc_mru_ghost->arcs_size); | |
5448 | refcount_create(&arc_mfu->arcs_size); | |
5449 | refcount_create(&arc_mfu_ghost->arcs_size); | |
5450 | refcount_create(&arc_l2c_only->arcs_size); | |
5451 | ||
34dc7c2f BB |
5452 | buf_init(); |
5453 | ||
ca0bf58d PS |
5454 | arc_reclaim_thread_exit = FALSE; |
5455 | arc_user_evicts_thread_exit = FALSE; | |
ab26409d BB |
5456 | list_create(&arc_prune_list, sizeof (arc_prune_t), |
5457 | offsetof(arc_prune_t, p_node)); | |
34dc7c2f | 5458 | arc_eviction_list = NULL; |
ab26409d | 5459 | mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
5460 | bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); |
5461 | ||
1229323d | 5462 | arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri, |
aa9af22c | 5463 | max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); |
f6046738 | 5464 | |
34dc7c2f BB |
5465 | arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, |
5466 | sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
5467 | ||
5468 | if (arc_ksp != NULL) { | |
5469 | arc_ksp->ks_data = &arc_stats; | |
13be560d | 5470 | arc_ksp->ks_update = arc_kstat_update; |
34dc7c2f BB |
5471 | kstat_install(arc_ksp); |
5472 | } | |
5473 | ||
ca67b33a | 5474 | (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, |
1229323d | 5475 | TS_RUN, defclsyspri); |
34dc7c2f | 5476 | |
ca0bf58d | 5477 | (void) thread_create(NULL, 0, arc_user_evicts_thread, NULL, 0, &p0, |
1229323d | 5478 | TS_RUN, defclsyspri); |
ca0bf58d | 5479 | |
34dc7c2f | 5480 | arc_dead = FALSE; |
b128c09f | 5481 | arc_warm = B_FALSE; |
34dc7c2f | 5482 | |
e8b96c60 MA |
5483 | /* |
5484 | * Calculate maximum amount of dirty data per pool. | |
5485 | * | |
5486 | * If it has been set by a module parameter, take that. | |
5487 | * Otherwise, use a percentage of physical memory defined by | |
5488 | * zfs_dirty_data_max_percent (default 10%) with a cap at | |
5489 | * zfs_dirty_data_max_max (default 25% of physical memory). | |
5490 | */ | |
5491 | if (zfs_dirty_data_max_max == 0) | |
5492 | zfs_dirty_data_max_max = physmem * PAGESIZE * | |
5493 | zfs_dirty_data_max_max_percent / 100; | |
5494 | ||
5495 | if (zfs_dirty_data_max == 0) { | |
5496 | zfs_dirty_data_max = physmem * PAGESIZE * | |
5497 | zfs_dirty_data_max_percent / 100; | |
5498 | zfs_dirty_data_max = MIN(zfs_dirty_data_max, | |
5499 | zfs_dirty_data_max_max); | |
5500 | } | |
34dc7c2f BB |
5501 | } |
5502 | ||
5503 | void | |
5504 | arc_fini(void) | |
5505 | { | |
ab26409d BB |
5506 | arc_prune_t *p; |
5507 | ||
7cb67b45 BB |
5508 | #ifdef _KERNEL |
5509 | spl_unregister_shrinker(&arc_shrinker); | |
5510 | #endif /* _KERNEL */ | |
5511 | ||
ca0bf58d PS |
5512 | mutex_enter(&arc_reclaim_lock); |
5513 | arc_reclaim_thread_exit = TRUE; | |
5514 | /* | |
5515 | * The reclaim thread will set arc_reclaim_thread_exit back to | |
5516 | * FALSE when it is finished exiting; we're waiting for that. | |
5517 | */ | |
5518 | while (arc_reclaim_thread_exit) { | |
5519 | cv_signal(&arc_reclaim_thread_cv); | |
5520 | cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock); | |
5521 | } | |
5522 | mutex_exit(&arc_reclaim_lock); | |
5523 | ||
5524 | mutex_enter(&arc_user_evicts_lock); | |
5525 | arc_user_evicts_thread_exit = TRUE; | |
5526 | /* | |
5527 | * The user evicts thread will set arc_user_evicts_thread_exit | |
5528 | * to FALSE when it is finished exiting; we're waiting for that. | |
5529 | */ | |
5530 | while (arc_user_evicts_thread_exit) { | |
5531 | cv_signal(&arc_user_evicts_cv); | |
5532 | cv_wait(&arc_user_evicts_cv, &arc_user_evicts_lock); | |
5533 | } | |
5534 | mutex_exit(&arc_user_evicts_lock); | |
34dc7c2f | 5535 | |
ca0bf58d PS |
5536 | /* Use TRUE to ensure *all* buffers are evicted */ |
5537 | arc_flush(NULL, TRUE); | |
34dc7c2f BB |
5538 | |
5539 | arc_dead = TRUE; | |
5540 | ||
5541 | if (arc_ksp != NULL) { | |
5542 | kstat_delete(arc_ksp); | |
5543 | arc_ksp = NULL; | |
5544 | } | |
5545 | ||
f6046738 BB |
5546 | taskq_wait(arc_prune_taskq); |
5547 | taskq_destroy(arc_prune_taskq); | |
5548 | ||
ab26409d BB |
5549 | mutex_enter(&arc_prune_mtx); |
5550 | while ((p = list_head(&arc_prune_list)) != NULL) { | |
5551 | list_remove(&arc_prune_list, p); | |
5552 | refcount_remove(&p->p_refcnt, &arc_prune_list); | |
5553 | refcount_destroy(&p->p_refcnt); | |
5554 | kmem_free(p, sizeof (*p)); | |
5555 | } | |
5556 | mutex_exit(&arc_prune_mtx); | |
5557 | ||
5558 | list_destroy(&arc_prune_list); | |
5559 | mutex_destroy(&arc_prune_mtx); | |
ca0bf58d PS |
5560 | mutex_destroy(&arc_reclaim_lock); |
5561 | cv_destroy(&arc_reclaim_thread_cv); | |
5562 | cv_destroy(&arc_reclaim_waiters_cv); | |
5563 | ||
5564 | mutex_destroy(&arc_user_evicts_lock); | |
5565 | cv_destroy(&arc_user_evicts_cv); | |
5566 | ||
36da08ef PS |
5567 | refcount_destroy(&arc_anon->arcs_size); |
5568 | refcount_destroy(&arc_mru->arcs_size); | |
5569 | refcount_destroy(&arc_mru_ghost->arcs_size); | |
5570 | refcount_destroy(&arc_mfu->arcs_size); | |
5571 | refcount_destroy(&arc_mfu_ghost->arcs_size); | |
5572 | refcount_destroy(&arc_l2c_only->arcs_size); | |
5573 | ||
ca0bf58d PS |
5574 | multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); |
5575 | multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); | |
5576 | multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); | |
5577 | multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); | |
5578 | multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); | |
5579 | multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); | |
5580 | multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); | |
5581 | multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); | |
5582 | multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]); | |
5583 | multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]); | |
34dc7c2f BB |
5584 | |
5585 | buf_fini(); | |
9babb374 | 5586 | |
b9541d6b | 5587 | ASSERT0(arc_loaned_bytes); |
34dc7c2f BB |
5588 | } |
5589 | ||
5590 | /* | |
5591 | * Level 2 ARC | |
5592 | * | |
5593 | * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. | |
5594 | * It uses dedicated storage devices to hold cached data, which are populated | |
5595 | * using large infrequent writes. The main role of this cache is to boost | |
5596 | * the performance of random read workloads. The intended L2ARC devices | |
5597 | * include short-stroked disks, solid state disks, and other media with | |
5598 | * substantially faster read latency than disk. | |
5599 | * | |
5600 | * +-----------------------+ | |
5601 | * | ARC | | |
5602 | * +-----------------------+ | |
5603 | * | ^ ^ | |
5604 | * | | | | |
5605 | * l2arc_feed_thread() arc_read() | |
5606 | * | | | | |
5607 | * | l2arc read | | |
5608 | * V | | | |
5609 | * +---------------+ | | |
5610 | * | L2ARC | | | |
5611 | * +---------------+ | | |
5612 | * | ^ | | |
5613 | * l2arc_write() | | | |
5614 | * | | | | |
5615 | * V | | | |
5616 | * +-------+ +-------+ | |
5617 | * | vdev | | vdev | | |
5618 | * | cache | | cache | | |
5619 | * +-------+ +-------+ | |
5620 | * +=========+ .-----. | |
5621 | * : L2ARC : |-_____-| | |
5622 | * : devices : | Disks | | |
5623 | * +=========+ `-_____-' | |
5624 | * | |
5625 | * Read requests are satisfied from the following sources, in order: | |
5626 | * | |
5627 | * 1) ARC | |
5628 | * 2) vdev cache of L2ARC devices | |
5629 | * 3) L2ARC devices | |
5630 | * 4) vdev cache of disks | |
5631 | * 5) disks | |
5632 | * | |
5633 | * Some L2ARC device types exhibit extremely slow write performance. | |
5634 | * To accommodate for this there are some significant differences between | |
5635 | * the L2ARC and traditional cache design: | |
5636 | * | |
5637 | * 1. There is no eviction path from the ARC to the L2ARC. Evictions from | |
5638 | * the ARC behave as usual, freeing buffers and placing headers on ghost | |
5639 | * lists. The ARC does not send buffers to the L2ARC during eviction as | |
5640 | * this would add inflated write latencies for all ARC memory pressure. | |
5641 | * | |
5642 | * 2. The L2ARC attempts to cache data from the ARC before it is evicted. | |
5643 | * It does this by periodically scanning buffers from the eviction-end of | |
5644 | * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are | |
3a17a7a9 SK |
5645 | * not already there. It scans until a headroom of buffers is satisfied, |
5646 | * which itself is a buffer for ARC eviction. If a compressible buffer is | |
5647 | * found during scanning and selected for writing to an L2ARC device, we | |
5648 | * temporarily boost scanning headroom during the next scan cycle to make | |
5649 | * sure we adapt to compression effects (which might significantly reduce | |
5650 | * the data volume we write to L2ARC). The thread that does this is | |
34dc7c2f BB |
5651 | * l2arc_feed_thread(), illustrated below; example sizes are included to |
5652 | * provide a better sense of ratio than this diagram: | |
5653 | * | |
5654 | * head --> tail | |
5655 | * +---------------------+----------+ | |
5656 | * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC | |
5657 | * +---------------------+----------+ | o L2ARC eligible | |
5658 | * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer | |
5659 | * +---------------------+----------+ | | |
5660 | * 15.9 Gbytes ^ 32 Mbytes | | |
5661 | * headroom | | |
5662 | * l2arc_feed_thread() | |
5663 | * | | |
5664 | * l2arc write hand <--[oooo]--' | |
5665 | * | 8 Mbyte | |
5666 | * | write max | |
5667 | * V | |
5668 | * +==============================+ | |
5669 | * L2ARC dev |####|#|###|###| |####| ... | | |
5670 | * +==============================+ | |
5671 | * 32 Gbytes | |
5672 | * | |
5673 | * 3. If an ARC buffer is copied to the L2ARC but then hit instead of | |
5674 | * evicted, then the L2ARC has cached a buffer much sooner than it probably | |
5675 | * needed to, potentially wasting L2ARC device bandwidth and storage. It is | |
5676 | * safe to say that this is an uncommon case, since buffers at the end of | |
5677 | * the ARC lists have moved there due to inactivity. | |
5678 | * | |
5679 | * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, | |
5680 | * then the L2ARC simply misses copying some buffers. This serves as a | |
5681 | * pressure valve to prevent heavy read workloads from both stalling the ARC | |
5682 | * with waits and clogging the L2ARC with writes. This also helps prevent | |
5683 | * the potential for the L2ARC to churn if it attempts to cache content too | |
5684 | * quickly, such as during backups of the entire pool. | |
5685 | * | |
b128c09f BB |
5686 | * 5. After system boot and before the ARC has filled main memory, there are |
5687 | * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru | |
5688 | * lists can remain mostly static. Instead of searching from tail of these | |
5689 | * lists as pictured, the l2arc_feed_thread() will search from the list heads | |
5690 | * for eligible buffers, greatly increasing its chance of finding them. | |
5691 | * | |
5692 | * The L2ARC device write speed is also boosted during this time so that | |
5693 | * the L2ARC warms up faster. Since there have been no ARC evictions yet, | |
5694 | * there are no L2ARC reads, and no fear of degrading read performance | |
5695 | * through increased writes. | |
5696 | * | |
5697 | * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that | |
34dc7c2f BB |
5698 | * the vdev queue can aggregate them into larger and fewer writes. Each |
5699 | * device is written to in a rotor fashion, sweeping writes through | |
5700 | * available space then repeating. | |
5701 | * | |
b128c09f | 5702 | * 7. The L2ARC does not store dirty content. It never needs to flush |
34dc7c2f BB |
5703 | * write buffers back to disk based storage. |
5704 | * | |
b128c09f | 5705 | * 8. If an ARC buffer is written (and dirtied) which also exists in the |
34dc7c2f BB |
5706 | * L2ARC, the now stale L2ARC buffer is immediately dropped. |
5707 | * | |
5708 | * The performance of the L2ARC can be tweaked by a number of tunables, which | |
5709 | * may be necessary for different workloads: | |
5710 | * | |
5711 | * l2arc_write_max max write bytes per interval | |
b128c09f | 5712 | * l2arc_write_boost extra write bytes during device warmup |
34dc7c2f | 5713 | * l2arc_noprefetch skip caching prefetched buffers |
3a17a7a9 | 5714 | * l2arc_nocompress skip compressing buffers |
34dc7c2f | 5715 | * l2arc_headroom number of max device writes to precache |
3a17a7a9 SK |
5716 | * l2arc_headroom_boost when we find compressed buffers during ARC |
5717 | * scanning, we multiply headroom by this | |
5718 | * percentage factor for the next scan cycle, | |
5719 | * since more compressed buffers are likely to | |
5720 | * be present | |
34dc7c2f BB |
5721 | * l2arc_feed_secs seconds between L2ARC writing |
5722 | * | |
5723 | * Tunables may be removed or added as future performance improvements are | |
5724 | * integrated, and also may become zpool properties. | |
d164b209 BB |
5725 | * |
5726 | * There are three key functions that control how the L2ARC warms up: | |
5727 | * | |
5728 | * l2arc_write_eligible() check if a buffer is eligible to cache | |
5729 | * l2arc_write_size() calculate how much to write | |
5730 | * l2arc_write_interval() calculate sleep delay between writes | |
5731 | * | |
5732 | * These three functions determine what to write, how much, and how quickly | |
5733 | * to send writes. | |
34dc7c2f BB |
5734 | */ |
5735 | ||
d164b209 | 5736 | static boolean_t |
2a432414 | 5737 | l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) |
d164b209 BB |
5738 | { |
5739 | /* | |
5740 | * A buffer is *not* eligible for the L2ARC if it: | |
5741 | * 1. belongs to a different spa. | |
428870ff BB |
5742 | * 2. is already cached on the L2ARC. |
5743 | * 3. has an I/O in progress (it may be an incomplete read). | |
5744 | * 4. is flagged not eligible (zfs property). | |
d164b209 | 5745 | */ |
b9541d6b | 5746 | if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) || |
2a432414 | 5747 | HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr)) |
d164b209 BB |
5748 | return (B_FALSE); |
5749 | ||
5750 | return (B_TRUE); | |
5751 | } | |
5752 | ||
5753 | static uint64_t | |
3a17a7a9 | 5754 | l2arc_write_size(void) |
d164b209 BB |
5755 | { |
5756 | uint64_t size; | |
5757 | ||
3a17a7a9 SK |
5758 | /* |
5759 | * Make sure our globals have meaningful values in case the user | |
5760 | * altered them. | |
5761 | */ | |
5762 | size = l2arc_write_max; | |
5763 | if (size == 0) { | |
5764 | cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " | |
5765 | "be greater than zero, resetting it to the default (%d)", | |
5766 | L2ARC_WRITE_SIZE); | |
5767 | size = l2arc_write_max = L2ARC_WRITE_SIZE; | |
5768 | } | |
d164b209 BB |
5769 | |
5770 | if (arc_warm == B_FALSE) | |
3a17a7a9 | 5771 | size += l2arc_write_boost; |
d164b209 BB |
5772 | |
5773 | return (size); | |
5774 | ||
5775 | } | |
5776 | ||
5777 | static clock_t | |
5778 | l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) | |
5779 | { | |
428870ff | 5780 | clock_t interval, next, now; |
d164b209 BB |
5781 | |
5782 | /* | |
5783 | * If the ARC lists are busy, increase our write rate; if the | |
5784 | * lists are stale, idle back. This is achieved by checking | |
5785 | * how much we previously wrote - if it was more than half of | |
5786 | * what we wanted, schedule the next write much sooner. | |
5787 | */ | |
5788 | if (l2arc_feed_again && wrote > (wanted / 2)) | |
5789 | interval = (hz * l2arc_feed_min_ms) / 1000; | |
5790 | else | |
5791 | interval = hz * l2arc_feed_secs; | |
5792 | ||
428870ff BB |
5793 | now = ddi_get_lbolt(); |
5794 | next = MAX(now, MIN(now + interval, began + interval)); | |
d164b209 BB |
5795 | |
5796 | return (next); | |
5797 | } | |
5798 | ||
34dc7c2f BB |
5799 | /* |
5800 | * Cycle through L2ARC devices. This is how L2ARC load balances. | |
b128c09f | 5801 | * If a device is returned, this also returns holding the spa config lock. |
34dc7c2f BB |
5802 | */ |
5803 | static l2arc_dev_t * | |
5804 | l2arc_dev_get_next(void) | |
5805 | { | |
b128c09f | 5806 | l2arc_dev_t *first, *next = NULL; |
34dc7c2f | 5807 | |
b128c09f BB |
5808 | /* |
5809 | * Lock out the removal of spas (spa_namespace_lock), then removal | |
5810 | * of cache devices (l2arc_dev_mtx). Once a device has been selected, | |
5811 | * both locks will be dropped and a spa config lock held instead. | |
5812 | */ | |
5813 | mutex_enter(&spa_namespace_lock); | |
5814 | mutex_enter(&l2arc_dev_mtx); | |
5815 | ||
5816 | /* if there are no vdevs, there is nothing to do */ | |
5817 | if (l2arc_ndev == 0) | |
5818 | goto out; | |
5819 | ||
5820 | first = NULL; | |
5821 | next = l2arc_dev_last; | |
5822 | do { | |
5823 | /* loop around the list looking for a non-faulted vdev */ | |
5824 | if (next == NULL) { | |
34dc7c2f | 5825 | next = list_head(l2arc_dev_list); |
b128c09f BB |
5826 | } else { |
5827 | next = list_next(l2arc_dev_list, next); | |
5828 | if (next == NULL) | |
5829 | next = list_head(l2arc_dev_list); | |
5830 | } | |
5831 | ||
5832 | /* if we have come back to the start, bail out */ | |
5833 | if (first == NULL) | |
5834 | first = next; | |
5835 | else if (next == first) | |
5836 | break; | |
5837 | ||
5838 | } while (vdev_is_dead(next->l2ad_vdev)); | |
5839 | ||
5840 | /* if we were unable to find any usable vdevs, return NULL */ | |
5841 | if (vdev_is_dead(next->l2ad_vdev)) | |
5842 | next = NULL; | |
34dc7c2f BB |
5843 | |
5844 | l2arc_dev_last = next; | |
5845 | ||
b128c09f BB |
5846 | out: |
5847 | mutex_exit(&l2arc_dev_mtx); | |
5848 | ||
5849 | /* | |
5850 | * Grab the config lock to prevent the 'next' device from being | |
5851 | * removed while we are writing to it. | |
5852 | */ | |
5853 | if (next != NULL) | |
5854 | spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); | |
5855 | mutex_exit(&spa_namespace_lock); | |
5856 | ||
34dc7c2f BB |
5857 | return (next); |
5858 | } | |
5859 | ||
b128c09f BB |
5860 | /* |
5861 | * Free buffers that were tagged for destruction. | |
5862 | */ | |
5863 | static void | |
0bc8fd78 | 5864 | l2arc_do_free_on_write(void) |
b128c09f BB |
5865 | { |
5866 | list_t *buflist; | |
5867 | l2arc_data_free_t *df, *df_prev; | |
5868 | ||
5869 | mutex_enter(&l2arc_free_on_write_mtx); | |
5870 | buflist = l2arc_free_on_write; | |
5871 | ||
5872 | for (df = list_tail(buflist); df; df = df_prev) { | |
5873 | df_prev = list_prev(buflist, df); | |
5874 | ASSERT(df->l2df_data != NULL); | |
5875 | ASSERT(df->l2df_func != NULL); | |
5876 | df->l2df_func(df->l2df_data, df->l2df_size); | |
5877 | list_remove(buflist, df); | |
5878 | kmem_free(df, sizeof (l2arc_data_free_t)); | |
5879 | } | |
5880 | ||
5881 | mutex_exit(&l2arc_free_on_write_mtx); | |
5882 | } | |
5883 | ||
34dc7c2f BB |
5884 | /* |
5885 | * A write to a cache device has completed. Update all headers to allow | |
5886 | * reads from these buffers to begin. | |
5887 | */ | |
5888 | static void | |
5889 | l2arc_write_done(zio_t *zio) | |
5890 | { | |
5891 | l2arc_write_callback_t *cb; | |
5892 | l2arc_dev_t *dev; | |
5893 | list_t *buflist; | |
2a432414 | 5894 | arc_buf_hdr_t *head, *hdr, *hdr_prev; |
34dc7c2f | 5895 | kmutex_t *hash_lock; |
3bec585e | 5896 | int64_t bytes_dropped = 0; |
34dc7c2f BB |
5897 | |
5898 | cb = zio->io_private; | |
5899 | ASSERT(cb != NULL); | |
5900 | dev = cb->l2wcb_dev; | |
5901 | ASSERT(dev != NULL); | |
5902 | head = cb->l2wcb_head; | |
5903 | ASSERT(head != NULL); | |
b9541d6b | 5904 | buflist = &dev->l2ad_buflist; |
34dc7c2f BB |
5905 | ASSERT(buflist != NULL); |
5906 | DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, | |
5907 | l2arc_write_callback_t *, cb); | |
5908 | ||
5909 | if (zio->io_error != 0) | |
5910 | ARCSTAT_BUMP(arcstat_l2_writes_error); | |
5911 | ||
34dc7c2f BB |
5912 | /* |
5913 | * All writes completed, or an error was hit. | |
5914 | */ | |
ca0bf58d PS |
5915 | top: |
5916 | mutex_enter(&dev->l2ad_mtx); | |
2a432414 GW |
5917 | for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { |
5918 | hdr_prev = list_prev(buflist, hdr); | |
34dc7c2f | 5919 | |
2a432414 | 5920 | hash_lock = HDR_LOCK(hdr); |
ca0bf58d PS |
5921 | |
5922 | /* | |
5923 | * We cannot use mutex_enter or else we can deadlock | |
5924 | * with l2arc_write_buffers (due to swapping the order | |
5925 | * the hash lock and l2ad_mtx are taken). | |
5926 | */ | |
34dc7c2f BB |
5927 | if (!mutex_tryenter(hash_lock)) { |
5928 | /* | |
ca0bf58d PS |
5929 | * Missed the hash lock. We must retry so we |
5930 | * don't leave the ARC_FLAG_L2_WRITING bit set. | |
34dc7c2f | 5931 | */ |
ca0bf58d PS |
5932 | ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); |
5933 | ||
5934 | /* | |
5935 | * We don't want to rescan the headers we've | |
5936 | * already marked as having been written out, so | |
5937 | * we reinsert the head node so we can pick up | |
5938 | * where we left off. | |
5939 | */ | |
5940 | list_remove(buflist, head); | |
5941 | list_insert_after(buflist, hdr, head); | |
5942 | ||
5943 | mutex_exit(&dev->l2ad_mtx); | |
5944 | ||
5945 | /* | |
5946 | * We wait for the hash lock to become available | |
5947 | * to try and prevent busy waiting, and increase | |
5948 | * the chance we'll be able to acquire the lock | |
5949 | * the next time around. | |
5950 | */ | |
5951 | mutex_enter(hash_lock); | |
5952 | mutex_exit(hash_lock); | |
5953 | goto top; | |
34dc7c2f BB |
5954 | } |
5955 | ||
b9541d6b | 5956 | /* |
ca0bf58d PS |
5957 | * We could not have been moved into the arc_l2c_only |
5958 | * state while in-flight due to our ARC_FLAG_L2_WRITING | |
5959 | * bit being set. Let's just ensure that's being enforced. | |
5960 | */ | |
5961 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
5962 | ||
5963 | /* | |
5964 | * We may have allocated a buffer for L2ARC compression, | |
5965 | * we must release it to avoid leaking this data. | |
b9541d6b | 5966 | */ |
ca0bf58d | 5967 | l2arc_release_cdata_buf(hdr); |
b9541d6b | 5968 | |
34dc7c2f BB |
5969 | if (zio->io_error != 0) { |
5970 | /* | |
b128c09f | 5971 | * Error - drop L2ARC entry. |
34dc7c2f | 5972 | */ |
2a432414 | 5973 | list_remove(buflist, hdr); |
b9541d6b CW |
5974 | hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR; |
5975 | ||
5976 | ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize); | |
2a432414 | 5977 | ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); |
d962d5da PS |
5978 | |
5979 | bytes_dropped += hdr->b_l2hdr.b_asize; | |
5980 | (void) refcount_remove_many(&dev->l2ad_alloc, | |
5981 | hdr->b_l2hdr.b_asize, hdr); | |
34dc7c2f BB |
5982 | } |
5983 | ||
5984 | /* | |
ca0bf58d PS |
5985 | * Allow ARC to begin reads and ghost list evictions to |
5986 | * this L2ARC entry. | |
34dc7c2f | 5987 | */ |
2a432414 | 5988 | hdr->b_flags &= ~ARC_FLAG_L2_WRITING; |
34dc7c2f BB |
5989 | |
5990 | mutex_exit(hash_lock); | |
5991 | } | |
5992 | ||
5993 | atomic_inc_64(&l2arc_writes_done); | |
5994 | list_remove(buflist, head); | |
b9541d6b CW |
5995 | ASSERT(!HDR_HAS_L1HDR(head)); |
5996 | kmem_cache_free(hdr_l2only_cache, head); | |
5997 | mutex_exit(&dev->l2ad_mtx); | |
34dc7c2f | 5998 | |
3bec585e SK |
5999 | vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); |
6000 | ||
b128c09f | 6001 | l2arc_do_free_on_write(); |
34dc7c2f BB |
6002 | |
6003 | kmem_free(cb, sizeof (l2arc_write_callback_t)); | |
6004 | } | |
6005 | ||
6006 | /* | |
6007 | * A read to a cache device completed. Validate buffer contents before | |
6008 | * handing over to the regular ARC routines. | |
6009 | */ | |
6010 | static void | |
6011 | l2arc_read_done(zio_t *zio) | |
6012 | { | |
6013 | l2arc_read_callback_t *cb; | |
6014 | arc_buf_hdr_t *hdr; | |
6015 | arc_buf_t *buf; | |
34dc7c2f | 6016 | kmutex_t *hash_lock; |
b128c09f BB |
6017 | int equal; |
6018 | ||
6019 | ASSERT(zio->io_vd != NULL); | |
6020 | ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); | |
6021 | ||
6022 | spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); | |
34dc7c2f BB |
6023 | |
6024 | cb = zio->io_private; | |
6025 | ASSERT(cb != NULL); | |
6026 | buf = cb->l2rcb_buf; | |
6027 | ASSERT(buf != NULL); | |
34dc7c2f | 6028 | |
428870ff | 6029 | hash_lock = HDR_LOCK(buf->b_hdr); |
34dc7c2f | 6030 | mutex_enter(hash_lock); |
428870ff BB |
6031 | hdr = buf->b_hdr; |
6032 | ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); | |
34dc7c2f | 6033 | |
3a17a7a9 SK |
6034 | /* |
6035 | * If the buffer was compressed, decompress it first. | |
6036 | */ | |
6037 | if (cb->l2rcb_compress != ZIO_COMPRESS_OFF) | |
6038 | l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress); | |
6039 | ASSERT(zio->io_data != NULL); | |
6040 | ||
34dc7c2f BB |
6041 | /* |
6042 | * Check this survived the L2ARC journey. | |
6043 | */ | |
6044 | equal = arc_cksum_equal(buf); | |
6045 | if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { | |
6046 | mutex_exit(hash_lock); | |
6047 | zio->io_private = buf; | |
b128c09f BB |
6048 | zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ |
6049 | zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ | |
34dc7c2f BB |
6050 | arc_read_done(zio); |
6051 | } else { | |
6052 | mutex_exit(hash_lock); | |
6053 | /* | |
6054 | * Buffer didn't survive caching. Increment stats and | |
6055 | * reissue to the original storage device. | |
6056 | */ | |
b128c09f | 6057 | if (zio->io_error != 0) { |
34dc7c2f | 6058 | ARCSTAT_BUMP(arcstat_l2_io_error); |
b128c09f | 6059 | } else { |
2e528b49 | 6060 | zio->io_error = SET_ERROR(EIO); |
b128c09f | 6061 | } |
34dc7c2f BB |
6062 | if (!equal) |
6063 | ARCSTAT_BUMP(arcstat_l2_cksum_bad); | |
6064 | ||
34dc7c2f | 6065 | /* |
b128c09f BB |
6066 | * If there's no waiter, issue an async i/o to the primary |
6067 | * storage now. If there *is* a waiter, the caller must | |
6068 | * issue the i/o in a context where it's OK to block. | |
34dc7c2f | 6069 | */ |
d164b209 BB |
6070 | if (zio->io_waiter == NULL) { |
6071 | zio_t *pio = zio_unique_parent(zio); | |
6072 | ||
6073 | ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); | |
6074 | ||
6075 | zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, | |
b128c09f BB |
6076 | buf->b_data, zio->io_size, arc_read_done, buf, |
6077 | zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); | |
d164b209 | 6078 | } |
34dc7c2f BB |
6079 | } |
6080 | ||
6081 | kmem_free(cb, sizeof (l2arc_read_callback_t)); | |
6082 | } | |
6083 | ||
6084 | /* | |
6085 | * This is the list priority from which the L2ARC will search for pages to | |
6086 | * cache. This is used within loops (0..3) to cycle through lists in the | |
6087 | * desired order. This order can have a significant effect on cache | |
6088 | * performance. | |
6089 | * | |
6090 | * Currently the metadata lists are hit first, MFU then MRU, followed by | |
6091 | * the data lists. This function returns a locked list, and also returns | |
6092 | * the lock pointer. | |
6093 | */ | |
ca0bf58d PS |
6094 | static multilist_sublist_t * |
6095 | l2arc_sublist_lock(int list_num) | |
34dc7c2f | 6096 | { |
ca0bf58d PS |
6097 | multilist_t *ml = NULL; |
6098 | unsigned int idx; | |
34dc7c2f BB |
6099 | |
6100 | ASSERT(list_num >= 0 && list_num <= 3); | |
6101 | ||
6102 | switch (list_num) { | |
6103 | case 0: | |
ca0bf58d | 6104 | ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; |
34dc7c2f BB |
6105 | break; |
6106 | case 1: | |
ca0bf58d | 6107 | ml = &arc_mru->arcs_list[ARC_BUFC_METADATA]; |
34dc7c2f BB |
6108 | break; |
6109 | case 2: | |
ca0bf58d | 6110 | ml = &arc_mfu->arcs_list[ARC_BUFC_DATA]; |
34dc7c2f BB |
6111 | break; |
6112 | case 3: | |
ca0bf58d | 6113 | ml = &arc_mru->arcs_list[ARC_BUFC_DATA]; |
34dc7c2f BB |
6114 | break; |
6115 | } | |
6116 | ||
ca0bf58d PS |
6117 | /* |
6118 | * Return a randomly-selected sublist. This is acceptable | |
6119 | * because the caller feeds only a little bit of data for each | |
6120 | * call (8MB). Subsequent calls will result in different | |
6121 | * sublists being selected. | |
6122 | */ | |
6123 | idx = multilist_get_random_index(ml); | |
6124 | return (multilist_sublist_lock(ml, idx)); | |
34dc7c2f BB |
6125 | } |
6126 | ||
6127 | /* | |
6128 | * Evict buffers from the device write hand to the distance specified in | |
6129 | * bytes. This distance may span populated buffers, it may span nothing. | |
6130 | * This is clearing a region on the L2ARC device ready for writing. | |
6131 | * If the 'all' boolean is set, every buffer is evicted. | |
6132 | */ | |
6133 | static void | |
6134 | l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) | |
6135 | { | |
6136 | list_t *buflist; | |
2a432414 | 6137 | arc_buf_hdr_t *hdr, *hdr_prev; |
34dc7c2f BB |
6138 | kmutex_t *hash_lock; |
6139 | uint64_t taddr; | |
6140 | ||
b9541d6b | 6141 | buflist = &dev->l2ad_buflist; |
34dc7c2f BB |
6142 | |
6143 | if (!all && dev->l2ad_first) { | |
6144 | /* | |
6145 | * This is the first sweep through the device. There is | |
6146 | * nothing to evict. | |
6147 | */ | |
6148 | return; | |
6149 | } | |
6150 | ||
b128c09f | 6151 | if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { |
34dc7c2f BB |
6152 | /* |
6153 | * When nearing the end of the device, evict to the end | |
6154 | * before the device write hand jumps to the start. | |
6155 | */ | |
6156 | taddr = dev->l2ad_end; | |
6157 | } else { | |
6158 | taddr = dev->l2ad_hand + distance; | |
6159 | } | |
6160 | DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, | |
6161 | uint64_t, taddr, boolean_t, all); | |
6162 | ||
6163 | top: | |
b9541d6b | 6164 | mutex_enter(&dev->l2ad_mtx); |
2a432414 GW |
6165 | for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { |
6166 | hdr_prev = list_prev(buflist, hdr); | |
34dc7c2f | 6167 | |
2a432414 | 6168 | hash_lock = HDR_LOCK(hdr); |
ca0bf58d PS |
6169 | |
6170 | /* | |
6171 | * We cannot use mutex_enter or else we can deadlock | |
6172 | * with l2arc_write_buffers (due to swapping the order | |
6173 | * the hash lock and l2ad_mtx are taken). | |
6174 | */ | |
34dc7c2f BB |
6175 | if (!mutex_tryenter(hash_lock)) { |
6176 | /* | |
6177 | * Missed the hash lock. Retry. | |
6178 | */ | |
6179 | ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); | |
b9541d6b | 6180 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
6181 | mutex_enter(hash_lock); |
6182 | mutex_exit(hash_lock); | |
6183 | goto top; | |
6184 | } | |
6185 | ||
2a432414 | 6186 | if (HDR_L2_WRITE_HEAD(hdr)) { |
34dc7c2f BB |
6187 | /* |
6188 | * We hit a write head node. Leave it for | |
6189 | * l2arc_write_done(). | |
6190 | */ | |
2a432414 | 6191 | list_remove(buflist, hdr); |
34dc7c2f BB |
6192 | mutex_exit(hash_lock); |
6193 | continue; | |
6194 | } | |
6195 | ||
b9541d6b CW |
6196 | if (!all && HDR_HAS_L2HDR(hdr) && |
6197 | (hdr->b_l2hdr.b_daddr > taddr || | |
6198 | hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { | |
34dc7c2f BB |
6199 | /* |
6200 | * We've evicted to the target address, | |
6201 | * or the end of the device. | |
6202 | */ | |
6203 | mutex_exit(hash_lock); | |
6204 | break; | |
6205 | } | |
6206 | ||
b9541d6b CW |
6207 | ASSERT(HDR_HAS_L2HDR(hdr)); |
6208 | if (!HDR_HAS_L1HDR(hdr)) { | |
2a432414 | 6209 | ASSERT(!HDR_L2_READING(hdr)); |
34dc7c2f BB |
6210 | /* |
6211 | * This doesn't exist in the ARC. Destroy. | |
6212 | * arc_hdr_destroy() will call list_remove() | |
6213 | * and decrement arcstat_l2_size. | |
6214 | */ | |
2a432414 GW |
6215 | arc_change_state(arc_anon, hdr, hash_lock); |
6216 | arc_hdr_destroy(hdr); | |
34dc7c2f | 6217 | } else { |
b9541d6b CW |
6218 | ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); |
6219 | ARCSTAT_BUMP(arcstat_l2_evict_l1cached); | |
b128c09f BB |
6220 | /* |
6221 | * Invalidate issued or about to be issued | |
6222 | * reads, since we may be about to write | |
6223 | * over this location. | |
6224 | */ | |
2a432414 | 6225 | if (HDR_L2_READING(hdr)) { |
b128c09f | 6226 | ARCSTAT_BUMP(arcstat_l2_evict_reading); |
2a432414 | 6227 | hdr->b_flags |= ARC_FLAG_L2_EVICTED; |
b128c09f BB |
6228 | } |
6229 | ||
ca0bf58d PS |
6230 | /* Ensure this header has finished being written */ |
6231 | ASSERT(!HDR_L2_WRITING(hdr)); | |
6232 | ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
d962d5da PS |
6233 | |
6234 | arc_hdr_l2hdr_destroy(hdr); | |
34dc7c2f BB |
6235 | } |
6236 | mutex_exit(hash_lock); | |
6237 | } | |
b9541d6b | 6238 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
6239 | } |
6240 | ||
6241 | /* | |
6242 | * Find and write ARC buffers to the L2ARC device. | |
6243 | * | |
2a432414 | 6244 | * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid |
34dc7c2f | 6245 | * for reading until they have completed writing. |
3a17a7a9 SK |
6246 | * The headroom_boost is an in-out parameter used to maintain headroom boost |
6247 | * state between calls to this function. | |
6248 | * | |
6249 | * Returns the number of bytes actually written (which may be smaller than | |
6250 | * the delta by which the device hand has changed due to alignment). | |
34dc7c2f | 6251 | */ |
d164b209 | 6252 | static uint64_t |
3a17a7a9 SK |
6253 | l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, |
6254 | boolean_t *headroom_boost) | |
34dc7c2f | 6255 | { |
2a432414 | 6256 | arc_buf_hdr_t *hdr, *hdr_prev, *head; |
ef56b078 AG |
6257 | uint64_t write_asize, write_sz, headroom, buf_compress_minsz, |
6258 | stats_size; | |
34dc7c2f | 6259 | void *buf_data; |
3a17a7a9 | 6260 | boolean_t full; |
34dc7c2f BB |
6261 | l2arc_write_callback_t *cb; |
6262 | zio_t *pio, *wzio; | |
3541dc6d | 6263 | uint64_t guid = spa_load_guid(spa); |
d6320ddb | 6264 | int try; |
3a17a7a9 | 6265 | const boolean_t do_headroom_boost = *headroom_boost; |
34dc7c2f | 6266 | |
34dc7c2f BB |
6267 | ASSERT(dev->l2ad_vdev != NULL); |
6268 | ||
3a17a7a9 SK |
6269 | /* Lower the flag now, we might want to raise it again later. */ |
6270 | *headroom_boost = B_FALSE; | |
6271 | ||
34dc7c2f | 6272 | pio = NULL; |
ef56b078 | 6273 | write_sz = write_asize = 0; |
34dc7c2f | 6274 | full = B_FALSE; |
b9541d6b | 6275 | head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); |
2a432414 | 6276 | head->b_flags |= ARC_FLAG_L2_WRITE_HEAD; |
b9541d6b | 6277 | head->b_flags |= ARC_FLAG_HAS_L2HDR; |
34dc7c2f | 6278 | |
3a17a7a9 SK |
6279 | /* |
6280 | * We will want to try to compress buffers that are at least 2x the | |
6281 | * device sector size. | |
6282 | */ | |
6283 | buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift; | |
6284 | ||
34dc7c2f BB |
6285 | /* |
6286 | * Copy buffers for L2ARC writing. | |
6287 | */ | |
d6320ddb | 6288 | for (try = 0; try <= 3; try++) { |
ca0bf58d | 6289 | multilist_sublist_t *mls = l2arc_sublist_lock(try); |
3a17a7a9 SK |
6290 | uint64_t passed_sz = 0; |
6291 | ||
b128c09f BB |
6292 | /* |
6293 | * L2ARC fast warmup. | |
6294 | * | |
6295 | * Until the ARC is warm and starts to evict, read from the | |
6296 | * head of the ARC lists rather than the tail. | |
6297 | */ | |
b128c09f | 6298 | if (arc_warm == B_FALSE) |
ca0bf58d | 6299 | hdr = multilist_sublist_head(mls); |
b128c09f | 6300 | else |
ca0bf58d | 6301 | hdr = multilist_sublist_tail(mls); |
b128c09f | 6302 | |
3a17a7a9 SK |
6303 | headroom = target_sz * l2arc_headroom; |
6304 | if (do_headroom_boost) | |
6305 | headroom = (headroom * l2arc_headroom_boost) / 100; | |
6306 | ||
2a432414 | 6307 | for (; hdr; hdr = hdr_prev) { |
3a17a7a9 SK |
6308 | kmutex_t *hash_lock; |
6309 | uint64_t buf_sz; | |
ef56b078 | 6310 | uint64_t buf_a_sz; |
3a17a7a9 | 6311 | |
b128c09f | 6312 | if (arc_warm == B_FALSE) |
ca0bf58d | 6313 | hdr_prev = multilist_sublist_next(mls, hdr); |
b128c09f | 6314 | else |
ca0bf58d | 6315 | hdr_prev = multilist_sublist_prev(mls, hdr); |
34dc7c2f | 6316 | |
2a432414 | 6317 | hash_lock = HDR_LOCK(hdr); |
3a17a7a9 | 6318 | if (!mutex_tryenter(hash_lock)) { |
34dc7c2f BB |
6319 | /* |
6320 | * Skip this buffer rather than waiting. | |
6321 | */ | |
6322 | continue; | |
6323 | } | |
6324 | ||
2a432414 | 6325 | passed_sz += hdr->b_size; |
34dc7c2f BB |
6326 | if (passed_sz > headroom) { |
6327 | /* | |
6328 | * Searched too far. | |
6329 | */ | |
6330 | mutex_exit(hash_lock); | |
6331 | break; | |
6332 | } | |
6333 | ||
2a432414 | 6334 | if (!l2arc_write_eligible(guid, hdr)) { |
34dc7c2f BB |
6335 | mutex_exit(hash_lock); |
6336 | continue; | |
6337 | } | |
6338 | ||
ef56b078 AG |
6339 | /* |
6340 | * Assume that the buffer is not going to be compressed | |
6341 | * and could take more space on disk because of a larger | |
6342 | * disk block size. | |
6343 | */ | |
6344 | buf_sz = hdr->b_size; | |
6345 | buf_a_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); | |
6346 | ||
6347 | if ((write_asize + buf_a_sz) > target_sz) { | |
34dc7c2f BB |
6348 | full = B_TRUE; |
6349 | mutex_exit(hash_lock); | |
6350 | break; | |
6351 | } | |
6352 | ||
34dc7c2f BB |
6353 | if (pio == NULL) { |
6354 | /* | |
6355 | * Insert a dummy header on the buflist so | |
6356 | * l2arc_write_done() can find where the | |
6357 | * write buffers begin without searching. | |
6358 | */ | |
ca0bf58d | 6359 | mutex_enter(&dev->l2ad_mtx); |
b9541d6b | 6360 | list_insert_head(&dev->l2ad_buflist, head); |
ca0bf58d | 6361 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f | 6362 | |
96c080cb BB |
6363 | cb = kmem_alloc( |
6364 | sizeof (l2arc_write_callback_t), KM_SLEEP); | |
34dc7c2f BB |
6365 | cb->l2wcb_dev = dev; |
6366 | cb->l2wcb_head = head; | |
6367 | pio = zio_root(spa, l2arc_write_done, cb, | |
6368 | ZIO_FLAG_CANFAIL); | |
6369 | } | |
6370 | ||
6371 | /* | |
6372 | * Create and add a new L2ARC header. | |
6373 | */ | |
b9541d6b | 6374 | hdr->b_l2hdr.b_dev = dev; |
2a432414 | 6375 | hdr->b_flags |= ARC_FLAG_L2_WRITING; |
3a17a7a9 SK |
6376 | /* |
6377 | * Temporarily stash the data buffer in b_tmp_cdata. | |
6378 | * The subsequent write step will pick it up from | |
b9541d6b | 6379 | * there. This is because can't access b_l1hdr.b_buf |
3a17a7a9 SK |
6380 | * without holding the hash_lock, which we in turn |
6381 | * can't access without holding the ARC list locks | |
6382 | * (which we want to avoid during compression/writing) | |
6383 | */ | |
b9541d6b CW |
6384 | HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); |
6385 | hdr->b_l2hdr.b_asize = hdr->b_size; | |
6386 | hdr->b_l2hdr.b_hits = 0; | |
6387 | hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data; | |
3a17a7a9 | 6388 | |
d962d5da PS |
6389 | /* |
6390 | * Explicitly set the b_daddr field to a known | |
6391 | * value which means "invalid address". This | |
6392 | * enables us to differentiate which stage of | |
6393 | * l2arc_write_buffers() the particular header | |
6394 | * is in (e.g. this loop, or the one below). | |
6395 | * ARC_FLAG_L2_WRITING is not enough to make | |
6396 | * this distinction, and we need to know in | |
6397 | * order to do proper l2arc vdev accounting in | |
6398 | * arc_release() and arc_hdr_destroy(). | |
6399 | * | |
6400 | * Note, we can't use a new flag to distinguish | |
6401 | * the two stages because we don't hold the | |
6402 | * header's hash_lock below, in the second stage | |
6403 | * of this function. Thus, we can't simply | |
6404 | * change the b_flags field to denote that the | |
6405 | * IO has been sent. We can change the b_daddr | |
6406 | * field of the L2 portion, though, since we'll | |
6407 | * be holding the l2ad_mtx; which is why we're | |
6408 | * using it to denote the header's state change. | |
6409 | */ | |
6410 | hdr->b_l2hdr.b_daddr = L2ARC_ADDR_UNSET; | |
b9541d6b | 6411 | hdr->b_flags |= ARC_FLAG_HAS_L2HDR; |
3a17a7a9 | 6412 | |
ca0bf58d | 6413 | mutex_enter(&dev->l2ad_mtx); |
b9541d6b | 6414 | list_insert_head(&dev->l2ad_buflist, hdr); |
ca0bf58d | 6415 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f BB |
6416 | |
6417 | /* | |
6418 | * Compute and store the buffer cksum before | |
6419 | * writing. On debug the cksum is verified first. | |
6420 | */ | |
b9541d6b CW |
6421 | arc_cksum_verify(hdr->b_l1hdr.b_buf); |
6422 | arc_cksum_compute(hdr->b_l1hdr.b_buf, B_TRUE); | |
34dc7c2f BB |
6423 | |
6424 | mutex_exit(hash_lock); | |
6425 | ||
3a17a7a9 | 6426 | write_sz += buf_sz; |
ef56b078 | 6427 | write_asize += buf_a_sz; |
3a17a7a9 SK |
6428 | } |
6429 | ||
ca0bf58d | 6430 | multilist_sublist_unlock(mls); |
3a17a7a9 SK |
6431 | |
6432 | if (full == B_TRUE) | |
6433 | break; | |
6434 | } | |
6435 | ||
6436 | /* No buffers selected for writing? */ | |
6437 | if (pio == NULL) { | |
6438 | ASSERT0(write_sz); | |
b9541d6b CW |
6439 | ASSERT(!HDR_HAS_L1HDR(head)); |
6440 | kmem_cache_free(hdr_l2only_cache, head); | |
3a17a7a9 SK |
6441 | return (0); |
6442 | } | |
6443 | ||
ca0bf58d PS |
6444 | mutex_enter(&dev->l2ad_mtx); |
6445 | ||
ef56b078 AG |
6446 | /* |
6447 | * Note that elsewhere in this file arcstat_l2_asize | |
6448 | * and the used space on l2ad_vdev are updated using b_asize, | |
6449 | * which is not necessarily rounded up to the device block size. | |
6450 | * Too keep accounting consistent we do the same here as well: | |
6451 | * stats_size accumulates the sum of b_asize of the written buffers, | |
6452 | * while write_asize accumulates the sum of b_asize rounded up | |
6453 | * to the device block size. | |
6454 | * The latter sum is used only to validate the corectness of the code. | |
6455 | */ | |
6456 | stats_size = 0; | |
6457 | write_asize = 0; | |
6458 | ||
3a17a7a9 SK |
6459 | /* |
6460 | * Now start writing the buffers. We're starting at the write head | |
6461 | * and work backwards, retracing the course of the buffer selector | |
6462 | * loop above. | |
6463 | */ | |
b9541d6b CW |
6464 | for (hdr = list_prev(&dev->l2ad_buflist, head); hdr; |
6465 | hdr = list_prev(&dev->l2ad_buflist, hdr)) { | |
3a17a7a9 SK |
6466 | uint64_t buf_sz; |
6467 | ||
ca0bf58d PS |
6468 | /* |
6469 | * We rely on the L1 portion of the header below, so | |
6470 | * it's invalid for this header to have been evicted out | |
6471 | * of the ghost cache, prior to being written out. The | |
6472 | * ARC_FLAG_L2_WRITING bit ensures this won't happen. | |
6473 | */ | |
6474 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
6475 | ||
3a17a7a9 SK |
6476 | /* |
6477 | * We shouldn't need to lock the buffer here, since we flagged | |
2a432414 GW |
6478 | * it as ARC_FLAG_L2_WRITING in the previous step, but we must |
6479 | * take care to only access its L2 cache parameters. In | |
b9541d6b | 6480 | * particular, hdr->l1hdr.b_buf may be invalid by now due to |
2a432414 | 6481 | * ARC eviction. |
3a17a7a9 | 6482 | */ |
b9541d6b | 6483 | hdr->b_l2hdr.b_daddr = dev->l2ad_hand; |
3a17a7a9 | 6484 | |
b9541d6b CW |
6485 | if ((!l2arc_nocompress && HDR_L2COMPRESS(hdr)) && |
6486 | hdr->b_l2hdr.b_asize >= buf_compress_minsz) { | |
6487 | if (l2arc_compress_buf(hdr)) { | |
3a17a7a9 SK |
6488 | /* |
6489 | * If compression succeeded, enable headroom | |
6490 | * boost on the next scan cycle. | |
6491 | */ | |
6492 | *headroom_boost = B_TRUE; | |
6493 | } | |
6494 | } | |
6495 | ||
6496 | /* | |
6497 | * Pick up the buffer data we had previously stashed away | |
6498 | * (and now potentially also compressed). | |
6499 | */ | |
b9541d6b CW |
6500 | buf_data = hdr->b_l1hdr.b_tmp_cdata; |
6501 | buf_sz = hdr->b_l2hdr.b_asize; | |
3a17a7a9 | 6502 | |
d962d5da PS |
6503 | /* |
6504 | * We need to do this regardless if buf_sz is zero or | |
6505 | * not, otherwise, when this l2hdr is evicted we'll | |
6506 | * remove a reference that was never added. | |
6507 | */ | |
6508 | (void) refcount_add_many(&dev->l2ad_alloc, buf_sz, hdr); | |
6509 | ||
3a17a7a9 SK |
6510 | /* Compression may have squashed the buffer to zero length. */ |
6511 | if (buf_sz != 0) { | |
ef56b078 | 6512 | uint64_t buf_a_sz; |
3a17a7a9 | 6513 | |
34dc7c2f BB |
6514 | wzio = zio_write_phys(pio, dev->l2ad_vdev, |
6515 | dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, | |
6516 | NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, | |
6517 | ZIO_FLAG_CANFAIL, B_FALSE); | |
6518 | ||
6519 | DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, | |
6520 | zio_t *, wzio); | |
6521 | (void) zio_nowait(wzio); | |
6522 | ||
ef56b078 | 6523 | stats_size += buf_sz; |
d962d5da | 6524 | |
b128c09f BB |
6525 | /* |
6526 | * Keep the clock hand suitably device-aligned. | |
6527 | */ | |
ef56b078 AG |
6528 | buf_a_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); |
6529 | write_asize += buf_a_sz; | |
6530 | dev->l2ad_hand += buf_a_sz; | |
34dc7c2f | 6531 | } |
34dc7c2f | 6532 | } |
34dc7c2f | 6533 | |
b9541d6b | 6534 | mutex_exit(&dev->l2ad_mtx); |
34dc7c2f | 6535 | |
3a17a7a9 | 6536 | ASSERT3U(write_asize, <=, target_sz); |
34dc7c2f | 6537 | ARCSTAT_BUMP(arcstat_l2_writes_sent); |
3a17a7a9 | 6538 | ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); |
34dc7c2f | 6539 | ARCSTAT_INCR(arcstat_l2_size, write_sz); |
ef56b078 AG |
6540 | ARCSTAT_INCR(arcstat_l2_asize, stats_size); |
6541 | vdev_space_update(dev->l2ad_vdev, stats_size, 0, 0); | |
34dc7c2f BB |
6542 | |
6543 | /* | |
6544 | * Bump device hand to the device start if it is approaching the end. | |
6545 | * l2arc_evict() will already have evicted ahead for this case. | |
6546 | */ | |
b128c09f | 6547 | if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { |
34dc7c2f | 6548 | dev->l2ad_hand = dev->l2ad_start; |
34dc7c2f BB |
6549 | dev->l2ad_first = B_FALSE; |
6550 | } | |
6551 | ||
d164b209 | 6552 | dev->l2ad_writing = B_TRUE; |
34dc7c2f | 6553 | (void) zio_wait(pio); |
d164b209 BB |
6554 | dev->l2ad_writing = B_FALSE; |
6555 | ||
3a17a7a9 SK |
6556 | return (write_asize); |
6557 | } | |
6558 | ||
6559 | /* | |
6560 | * Compresses an L2ARC buffer. | |
b9541d6b | 6561 | * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its |
3a17a7a9 SK |
6562 | * size in l2hdr->b_asize. This routine tries to compress the data and |
6563 | * depending on the compression result there are three possible outcomes: | |
6564 | * *) The buffer was incompressible. The original l2hdr contents were left | |
6565 | * untouched and are ready for writing to an L2 device. | |
6566 | * *) The buffer was all-zeros, so there is no need to write it to an L2 | |
6567 | * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is | |
6568 | * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY. | |
6569 | * *) Compression succeeded and b_tmp_cdata was replaced with a temporary | |
6570 | * data buffer which holds the compressed data to be written, and b_asize | |
6571 | * tells us how much data there is. b_compress is set to the appropriate | |
6572 | * compression algorithm. Once writing is done, invoke | |
6573 | * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer. | |
6574 | * | |
6575 | * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the | |
6576 | * buffer was incompressible). | |
6577 | */ | |
6578 | static boolean_t | |
b9541d6b | 6579 | l2arc_compress_buf(arc_buf_hdr_t *hdr) |
3a17a7a9 SK |
6580 | { |
6581 | void *cdata; | |
9b67f605 | 6582 | size_t csize, len, rounded; |
b9541d6b | 6583 | l2arc_buf_hdr_t *l2hdr; |
3a17a7a9 | 6584 | |
b9541d6b CW |
6585 | ASSERT(HDR_HAS_L2HDR(hdr)); |
6586 | ||
6587 | l2hdr = &hdr->b_l2hdr; | |
6588 | ||
6589 | ASSERT(HDR_HAS_L1HDR(hdr)); | |
6590 | ASSERT(HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF); | |
6591 | ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL); | |
3a17a7a9 SK |
6592 | |
6593 | len = l2hdr->b_asize; | |
6594 | cdata = zio_data_buf_alloc(len); | |
b9541d6b CW |
6595 | ASSERT3P(cdata, !=, NULL); |
6596 | csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata, | |
3a17a7a9 SK |
6597 | cdata, l2hdr->b_asize); |
6598 | ||
9b67f605 MA |
6599 | rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE); |
6600 | if (rounded > csize) { | |
6601 | bzero((char *)cdata + csize, rounded - csize); | |
6602 | csize = rounded; | |
6603 | } | |
6604 | ||
3a17a7a9 SK |
6605 | if (csize == 0) { |
6606 | /* zero block, indicate that there's nothing to write */ | |
6607 | zio_data_buf_free(cdata, len); | |
b9541d6b | 6608 | HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_EMPTY); |
3a17a7a9 | 6609 | l2hdr->b_asize = 0; |
b9541d6b | 6610 | hdr->b_l1hdr.b_tmp_cdata = NULL; |
3a17a7a9 SK |
6611 | ARCSTAT_BUMP(arcstat_l2_compress_zeros); |
6612 | return (B_TRUE); | |
6613 | } else if (csize > 0 && csize < len) { | |
6614 | /* | |
6615 | * Compression succeeded, we'll keep the cdata around for | |
6616 | * writing and release it afterwards. | |
6617 | */ | |
b9541d6b | 6618 | HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_LZ4); |
3a17a7a9 | 6619 | l2hdr->b_asize = csize; |
b9541d6b | 6620 | hdr->b_l1hdr.b_tmp_cdata = cdata; |
3a17a7a9 SK |
6621 | ARCSTAT_BUMP(arcstat_l2_compress_successes); |
6622 | return (B_TRUE); | |
6623 | } else { | |
6624 | /* | |
6625 | * Compression failed, release the compressed buffer. | |
6626 | * l2hdr will be left unmodified. | |
6627 | */ | |
6628 | zio_data_buf_free(cdata, len); | |
6629 | ARCSTAT_BUMP(arcstat_l2_compress_failures); | |
6630 | return (B_FALSE); | |
6631 | } | |
6632 | } | |
6633 | ||
6634 | /* | |
6635 | * Decompresses a zio read back from an l2arc device. On success, the | |
6636 | * underlying zio's io_data buffer is overwritten by the uncompressed | |
6637 | * version. On decompression error (corrupt compressed stream), the | |
6638 | * zio->io_error value is set to signal an I/O error. | |
6639 | * | |
6640 | * Please note that the compressed data stream is not checksummed, so | |
6641 | * if the underlying device is experiencing data corruption, we may feed | |
6642 | * corrupt data to the decompressor, so the decompressor needs to be | |
6643 | * able to handle this situation (LZ4 does). | |
6644 | */ | |
6645 | static void | |
6646 | l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c) | |
6647 | { | |
6648 | uint64_t csize; | |
6649 | void *cdata; | |
6650 | ||
6651 | ASSERT(L2ARC_IS_VALID_COMPRESS(c)); | |
6652 | ||
6653 | if (zio->io_error != 0) { | |
6654 | /* | |
6655 | * An io error has occured, just restore the original io | |
6656 | * size in preparation for a main pool read. | |
6657 | */ | |
6658 | zio->io_orig_size = zio->io_size = hdr->b_size; | |
6659 | return; | |
6660 | } | |
6661 | ||
6662 | if (c == ZIO_COMPRESS_EMPTY) { | |
6663 | /* | |
6664 | * An empty buffer results in a null zio, which means we | |
6665 | * need to fill its io_data after we're done restoring the | |
6666 | * buffer's contents. | |
6667 | */ | |
b9541d6b CW |
6668 | ASSERT(hdr->b_l1hdr.b_buf != NULL); |
6669 | bzero(hdr->b_l1hdr.b_buf->b_data, hdr->b_size); | |
6670 | zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_buf->b_data; | |
3a17a7a9 SK |
6671 | } else { |
6672 | ASSERT(zio->io_data != NULL); | |
6673 | /* | |
6674 | * We copy the compressed data from the start of the arc buffer | |
6675 | * (the zio_read will have pulled in only what we need, the | |
6676 | * rest is garbage which we will overwrite at decompression) | |
6677 | * and then decompress back to the ARC data buffer. This way we | |
6678 | * can minimize copying by simply decompressing back over the | |
6679 | * original compressed data (rather than decompressing to an | |
6680 | * aux buffer and then copying back the uncompressed buffer, | |
6681 | * which is likely to be much larger). | |
6682 | */ | |
6683 | csize = zio->io_size; | |
6684 | cdata = zio_data_buf_alloc(csize); | |
6685 | bcopy(zio->io_data, cdata, csize); | |
6686 | if (zio_decompress_data(c, cdata, zio->io_data, csize, | |
6687 | hdr->b_size) != 0) | |
96c080cb | 6688 | zio->io_error = EIO; |
3a17a7a9 SK |
6689 | zio_data_buf_free(cdata, csize); |
6690 | } | |
6691 | ||
6692 | /* Restore the expected uncompressed IO size. */ | |
6693 | zio->io_orig_size = zio->io_size = hdr->b_size; | |
6694 | } | |
6695 | ||
6696 | /* | |
6697 | * Releases the temporary b_tmp_cdata buffer in an l2arc header structure. | |
6698 | * This buffer serves as a temporary holder of compressed data while | |
6699 | * the buffer entry is being written to an l2arc device. Once that is | |
6700 | * done, we can dispose of it. | |
6701 | */ | |
6702 | static void | |
2a432414 | 6703 | l2arc_release_cdata_buf(arc_buf_hdr_t *hdr) |
3a17a7a9 | 6704 | { |
ca0bf58d PS |
6705 | enum zio_compress comp = HDR_GET_COMPRESS(hdr); |
6706 | ||
b9541d6b | 6707 | ASSERT(HDR_HAS_L1HDR(hdr)); |
ca0bf58d PS |
6708 | ASSERT(comp == ZIO_COMPRESS_OFF || L2ARC_IS_VALID_COMPRESS(comp)); |
6709 | ||
6710 | if (comp == ZIO_COMPRESS_OFF) { | |
6711 | /* | |
6712 | * In this case, b_tmp_cdata points to the same buffer | |
6713 | * as the arc_buf_t's b_data field. We don't want to | |
6714 | * free it, since the arc_buf_t will handle that. | |
6715 | */ | |
6716 | hdr->b_l1hdr.b_tmp_cdata = NULL; | |
6717 | } else if (comp == ZIO_COMPRESS_EMPTY) { | |
6718 | /* | |
6719 | * In this case, b_tmp_cdata was compressed to an empty | |
6720 | * buffer, thus there's nothing to free and b_tmp_cdata | |
6721 | * should have been set to NULL in l2arc_write_buffers(). | |
6722 | */ | |
6723 | ASSERT3P(hdr->b_l1hdr.b_tmp_cdata, ==, NULL); | |
6724 | } else { | |
3a17a7a9 SK |
6725 | /* |
6726 | * If the data was compressed, then we've allocated a | |
6727 | * temporary buffer for it, so now we need to release it. | |
6728 | */ | |
b9541d6b CW |
6729 | ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL); |
6730 | zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata, | |
6731 | hdr->b_size); | |
ca0bf58d | 6732 | hdr->b_l1hdr.b_tmp_cdata = NULL; |
3a17a7a9 | 6733 | } |
ca0bf58d | 6734 | |
34dc7c2f BB |
6735 | } |
6736 | ||
6737 | /* | |
6738 | * This thread feeds the L2ARC at regular intervals. This is the beating | |
6739 | * heart of the L2ARC. | |
6740 | */ | |
6741 | static void | |
6742 | l2arc_feed_thread(void) | |
6743 | { | |
6744 | callb_cpr_t cpr; | |
6745 | l2arc_dev_t *dev; | |
6746 | spa_t *spa; | |
d164b209 | 6747 | uint64_t size, wrote; |
428870ff | 6748 | clock_t begin, next = ddi_get_lbolt(); |
3a17a7a9 | 6749 | boolean_t headroom_boost = B_FALSE; |
40d06e3c | 6750 | fstrans_cookie_t cookie; |
34dc7c2f BB |
6751 | |
6752 | CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); | |
6753 | ||
6754 | mutex_enter(&l2arc_feed_thr_lock); | |
6755 | ||
40d06e3c | 6756 | cookie = spl_fstrans_mark(); |
34dc7c2f | 6757 | while (l2arc_thread_exit == 0) { |
34dc7c2f | 6758 | CALLB_CPR_SAFE_BEGIN(&cpr); |
b64ccd6c | 6759 | (void) cv_timedwait_sig(&l2arc_feed_thr_cv, |
5b63b3eb | 6760 | &l2arc_feed_thr_lock, next); |
34dc7c2f | 6761 | CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); |
428870ff | 6762 | next = ddi_get_lbolt() + hz; |
34dc7c2f BB |
6763 | |
6764 | /* | |
b128c09f | 6765 | * Quick check for L2ARC devices. |
34dc7c2f BB |
6766 | */ |
6767 | mutex_enter(&l2arc_dev_mtx); | |
6768 | if (l2arc_ndev == 0) { | |
6769 | mutex_exit(&l2arc_dev_mtx); | |
6770 | continue; | |
6771 | } | |
b128c09f | 6772 | mutex_exit(&l2arc_dev_mtx); |
428870ff | 6773 | begin = ddi_get_lbolt(); |
34dc7c2f BB |
6774 | |
6775 | /* | |
b128c09f BB |
6776 | * This selects the next l2arc device to write to, and in |
6777 | * doing so the next spa to feed from: dev->l2ad_spa. This | |
6778 | * will return NULL if there are now no l2arc devices or if | |
6779 | * they are all faulted. | |
6780 | * | |
6781 | * If a device is returned, its spa's config lock is also | |
6782 | * held to prevent device removal. l2arc_dev_get_next() | |
6783 | * will grab and release l2arc_dev_mtx. | |
34dc7c2f | 6784 | */ |
b128c09f | 6785 | if ((dev = l2arc_dev_get_next()) == NULL) |
34dc7c2f | 6786 | continue; |
b128c09f BB |
6787 | |
6788 | spa = dev->l2ad_spa; | |
6789 | ASSERT(spa != NULL); | |
34dc7c2f | 6790 | |
572e2857 BB |
6791 | /* |
6792 | * If the pool is read-only then force the feed thread to | |
6793 | * sleep a little longer. | |
6794 | */ | |
6795 | if (!spa_writeable(spa)) { | |
6796 | next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; | |
6797 | spa_config_exit(spa, SCL_L2ARC, dev); | |
6798 | continue; | |
6799 | } | |
6800 | ||
34dc7c2f | 6801 | /* |
b128c09f | 6802 | * Avoid contributing to memory pressure. |
34dc7c2f | 6803 | */ |
ca67b33a | 6804 | if (arc_reclaim_needed()) { |
b128c09f BB |
6805 | ARCSTAT_BUMP(arcstat_l2_abort_lowmem); |
6806 | spa_config_exit(spa, SCL_L2ARC, dev); | |
34dc7c2f BB |
6807 | continue; |
6808 | } | |
b128c09f | 6809 | |
34dc7c2f BB |
6810 | ARCSTAT_BUMP(arcstat_l2_feeds); |
6811 | ||
3a17a7a9 | 6812 | size = l2arc_write_size(); |
b128c09f | 6813 | |
34dc7c2f BB |
6814 | /* |
6815 | * Evict L2ARC buffers that will be overwritten. | |
6816 | */ | |
b128c09f | 6817 | l2arc_evict(dev, size, B_FALSE); |
34dc7c2f BB |
6818 | |
6819 | /* | |
6820 | * Write ARC buffers. | |
6821 | */ | |
3a17a7a9 | 6822 | wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost); |
d164b209 BB |
6823 | |
6824 | /* | |
6825 | * Calculate interval between writes. | |
6826 | */ | |
6827 | next = l2arc_write_interval(begin, size, wrote); | |
b128c09f | 6828 | spa_config_exit(spa, SCL_L2ARC, dev); |
34dc7c2f | 6829 | } |
40d06e3c | 6830 | spl_fstrans_unmark(cookie); |
34dc7c2f BB |
6831 | |
6832 | l2arc_thread_exit = 0; | |
6833 | cv_broadcast(&l2arc_feed_thr_cv); | |
6834 | CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ | |
6835 | thread_exit(); | |
6836 | } | |
6837 | ||
b128c09f BB |
6838 | boolean_t |
6839 | l2arc_vdev_present(vdev_t *vd) | |
6840 | { | |
6841 | l2arc_dev_t *dev; | |
6842 | ||
6843 | mutex_enter(&l2arc_dev_mtx); | |
6844 | for (dev = list_head(l2arc_dev_list); dev != NULL; | |
6845 | dev = list_next(l2arc_dev_list, dev)) { | |
6846 | if (dev->l2ad_vdev == vd) | |
6847 | break; | |
6848 | } | |
6849 | mutex_exit(&l2arc_dev_mtx); | |
6850 | ||
6851 | return (dev != NULL); | |
6852 | } | |
6853 | ||
34dc7c2f BB |
6854 | /* |
6855 | * Add a vdev for use by the L2ARC. By this point the spa has already | |
6856 | * validated the vdev and opened it. | |
6857 | */ | |
6858 | void | |
9babb374 | 6859 | l2arc_add_vdev(spa_t *spa, vdev_t *vd) |
34dc7c2f BB |
6860 | { |
6861 | l2arc_dev_t *adddev; | |
6862 | ||
b128c09f BB |
6863 | ASSERT(!l2arc_vdev_present(vd)); |
6864 | ||
34dc7c2f BB |
6865 | /* |
6866 | * Create a new l2arc device entry. | |
6867 | */ | |
6868 | adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); | |
6869 | adddev->l2ad_spa = spa; | |
6870 | adddev->l2ad_vdev = vd; | |
9babb374 BB |
6871 | adddev->l2ad_start = VDEV_LABEL_START_SIZE; |
6872 | adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); | |
34dc7c2f | 6873 | adddev->l2ad_hand = adddev->l2ad_start; |
34dc7c2f | 6874 | adddev->l2ad_first = B_TRUE; |
d164b209 | 6875 | adddev->l2ad_writing = B_FALSE; |
98f72a53 | 6876 | list_link_init(&adddev->l2ad_node); |
34dc7c2f | 6877 | |
b9541d6b | 6878 | mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
6879 | /* |
6880 | * This is a list of all ARC buffers that are still valid on the | |
6881 | * device. | |
6882 | */ | |
b9541d6b CW |
6883 | list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), |
6884 | offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); | |
34dc7c2f | 6885 | |
428870ff | 6886 | vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); |
d962d5da | 6887 | refcount_create(&adddev->l2ad_alloc); |
34dc7c2f BB |
6888 | |
6889 | /* | |
6890 | * Add device to global list | |
6891 | */ | |
6892 | mutex_enter(&l2arc_dev_mtx); | |
6893 | list_insert_head(l2arc_dev_list, adddev); | |
6894 | atomic_inc_64(&l2arc_ndev); | |
6895 | mutex_exit(&l2arc_dev_mtx); | |
6896 | } | |
6897 | ||
6898 | /* | |
6899 | * Remove a vdev from the L2ARC. | |
6900 | */ | |
6901 | void | |
6902 | l2arc_remove_vdev(vdev_t *vd) | |
6903 | { | |
6904 | l2arc_dev_t *dev, *nextdev, *remdev = NULL; | |
6905 | ||
34dc7c2f BB |
6906 | /* |
6907 | * Find the device by vdev | |
6908 | */ | |
6909 | mutex_enter(&l2arc_dev_mtx); | |
6910 | for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { | |
6911 | nextdev = list_next(l2arc_dev_list, dev); | |
6912 | if (vd == dev->l2ad_vdev) { | |
6913 | remdev = dev; | |
6914 | break; | |
6915 | } | |
6916 | } | |
6917 | ASSERT(remdev != NULL); | |
6918 | ||
6919 | /* | |
6920 | * Remove device from global list | |
6921 | */ | |
6922 | list_remove(l2arc_dev_list, remdev); | |
6923 | l2arc_dev_last = NULL; /* may have been invalidated */ | |
b128c09f BB |
6924 | atomic_dec_64(&l2arc_ndev); |
6925 | mutex_exit(&l2arc_dev_mtx); | |
34dc7c2f BB |
6926 | |
6927 | /* | |
6928 | * Clear all buflists and ARC references. L2ARC device flush. | |
6929 | */ | |
6930 | l2arc_evict(remdev, 0, B_TRUE); | |
b9541d6b CW |
6931 | list_destroy(&remdev->l2ad_buflist); |
6932 | mutex_destroy(&remdev->l2ad_mtx); | |
d962d5da | 6933 | refcount_destroy(&remdev->l2ad_alloc); |
34dc7c2f | 6934 | kmem_free(remdev, sizeof (l2arc_dev_t)); |
34dc7c2f BB |
6935 | } |
6936 | ||
6937 | void | |
b128c09f | 6938 | l2arc_init(void) |
34dc7c2f BB |
6939 | { |
6940 | l2arc_thread_exit = 0; | |
6941 | l2arc_ndev = 0; | |
6942 | l2arc_writes_sent = 0; | |
6943 | l2arc_writes_done = 0; | |
6944 | ||
6945 | mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
6946 | cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); | |
6947 | mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); | |
34dc7c2f BB |
6948 | mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); |
6949 | ||
6950 | l2arc_dev_list = &L2ARC_dev_list; | |
6951 | l2arc_free_on_write = &L2ARC_free_on_write; | |
6952 | list_create(l2arc_dev_list, sizeof (l2arc_dev_t), | |
6953 | offsetof(l2arc_dev_t, l2ad_node)); | |
6954 | list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), | |
6955 | offsetof(l2arc_data_free_t, l2df_list_node)); | |
34dc7c2f BB |
6956 | } |
6957 | ||
6958 | void | |
b128c09f | 6959 | l2arc_fini(void) |
34dc7c2f | 6960 | { |
b128c09f BB |
6961 | /* |
6962 | * This is called from dmu_fini(), which is called from spa_fini(); | |
6963 | * Because of this, we can assume that all l2arc devices have | |
6964 | * already been removed when the pools themselves were removed. | |
6965 | */ | |
6966 | ||
6967 | l2arc_do_free_on_write(); | |
34dc7c2f BB |
6968 | |
6969 | mutex_destroy(&l2arc_feed_thr_lock); | |
6970 | cv_destroy(&l2arc_feed_thr_cv); | |
6971 | mutex_destroy(&l2arc_dev_mtx); | |
34dc7c2f BB |
6972 | mutex_destroy(&l2arc_free_on_write_mtx); |
6973 | ||
6974 | list_destroy(l2arc_dev_list); | |
6975 | list_destroy(l2arc_free_on_write); | |
6976 | } | |
b128c09f BB |
6977 | |
6978 | void | |
6979 | l2arc_start(void) | |
6980 | { | |
fb5f0bc8 | 6981 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
6982 | return; |
6983 | ||
6984 | (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, | |
1229323d | 6985 | TS_RUN, defclsyspri); |
b128c09f BB |
6986 | } |
6987 | ||
6988 | void | |
6989 | l2arc_stop(void) | |
6990 | { | |
fb5f0bc8 | 6991 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
6992 | return; |
6993 | ||
6994 | mutex_enter(&l2arc_feed_thr_lock); | |
6995 | cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ | |
6996 | l2arc_thread_exit = 1; | |
6997 | while (l2arc_thread_exit != 0) | |
6998 | cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); | |
6999 | mutex_exit(&l2arc_feed_thr_lock); | |
7000 | } | |
c28b2279 BB |
7001 | |
7002 | #if defined(_KERNEL) && defined(HAVE_SPL) | |
0f699108 AZ |
7003 | EXPORT_SYMBOL(arc_buf_size); |
7004 | EXPORT_SYMBOL(arc_write); | |
c28b2279 BB |
7005 | EXPORT_SYMBOL(arc_read); |
7006 | EXPORT_SYMBOL(arc_buf_remove_ref); | |
e0b0ca98 | 7007 | EXPORT_SYMBOL(arc_buf_info); |
c28b2279 | 7008 | EXPORT_SYMBOL(arc_getbuf_func); |
ab26409d BB |
7009 | EXPORT_SYMBOL(arc_add_prune_callback); |
7010 | EXPORT_SYMBOL(arc_remove_prune_callback); | |
c28b2279 | 7011 | |
bce45ec9 | 7012 | module_param(zfs_arc_min, ulong, 0644); |
c409e464 | 7013 | MODULE_PARM_DESC(zfs_arc_min, "Min arc size"); |
c28b2279 | 7014 | |
bce45ec9 | 7015 | module_param(zfs_arc_max, ulong, 0644); |
c409e464 | 7016 | MODULE_PARM_DESC(zfs_arc_max, "Max arc size"); |
c28b2279 | 7017 | |
bce45ec9 | 7018 | module_param(zfs_arc_meta_limit, ulong, 0644); |
c28b2279 | 7019 | MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size"); |
6a8f9b6b | 7020 | |
ca0bf58d PS |
7021 | module_param(zfs_arc_meta_min, ulong, 0644); |
7022 | MODULE_PARM_DESC(zfs_arc_meta_min, "Min arc metadata"); | |
7023 | ||
bce45ec9 | 7024 | module_param(zfs_arc_meta_prune, int, 0644); |
2cbb06b5 | 7025 | MODULE_PARM_DESC(zfs_arc_meta_prune, "Meta objects to scan for prune"); |
c409e464 | 7026 | |
ca67b33a | 7027 | module_param(zfs_arc_meta_adjust_restarts, int, 0644); |
bc888666 BB |
7028 | MODULE_PARM_DESC(zfs_arc_meta_adjust_restarts, |
7029 | "Limit number of restarts in arc_adjust_meta"); | |
7030 | ||
f6046738 BB |
7031 | module_param(zfs_arc_meta_strategy, int, 0644); |
7032 | MODULE_PARM_DESC(zfs_arc_meta_strategy, "Meta reclaim strategy"); | |
7033 | ||
bce45ec9 | 7034 | module_param(zfs_arc_grow_retry, int, 0644); |
c409e464 BB |
7035 | MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size"); |
7036 | ||
89c8cac4 PS |
7037 | module_param(zfs_arc_p_aggressive_disable, int, 0644); |
7038 | MODULE_PARM_DESC(zfs_arc_p_aggressive_disable, "disable aggressive arc_p grow"); | |
7039 | ||
62422785 PS |
7040 | module_param(zfs_arc_p_dampener_disable, int, 0644); |
7041 | MODULE_PARM_DESC(zfs_arc_p_dampener_disable, "disable arc_p adapt dampener"); | |
7042 | ||
bce45ec9 | 7043 | module_param(zfs_arc_shrink_shift, int, 0644); |
c409e464 BB |
7044 | MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)"); |
7045 | ||
728d6ae9 BB |
7046 | module_param(zfs_arc_p_min_shift, int, 0644); |
7047 | MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p"); | |
7048 | ||
1f7c30df BB |
7049 | module_param(zfs_disable_dup_eviction, int, 0644); |
7050 | MODULE_PARM_DESC(zfs_disable_dup_eviction, "disable duplicate buffer eviction"); | |
7051 | ||
49ddb315 MA |
7052 | module_param(zfs_arc_average_blocksize, int, 0444); |
7053 | MODULE_PARM_DESC(zfs_arc_average_blocksize, "Target average block size"); | |
7054 | ||
bce45ec9 BB |
7055 | module_param(zfs_arc_min_prefetch_lifespan, int, 0644); |
7056 | MODULE_PARM_DESC(zfs_arc_min_prefetch_lifespan, "Min life of prefetch block"); | |
7057 | ||
ca0bf58d PS |
7058 | module_param(zfs_arc_num_sublists_per_state, int, 0644); |
7059 | MODULE_PARM_DESC(zfs_arc_num_sublists_per_state, | |
7060 | "Number of sublists used in each of the ARC state lists"); | |
7061 | ||
bce45ec9 | 7062 | module_param(l2arc_write_max, ulong, 0644); |
abd8610c BB |
7063 | MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval"); |
7064 | ||
bce45ec9 | 7065 | module_param(l2arc_write_boost, ulong, 0644); |
abd8610c BB |
7066 | MODULE_PARM_DESC(l2arc_write_boost, "Extra write bytes during device warmup"); |
7067 | ||
bce45ec9 | 7068 | module_param(l2arc_headroom, ulong, 0644); |
abd8610c BB |
7069 | MODULE_PARM_DESC(l2arc_headroom, "Number of max device writes to precache"); |
7070 | ||
3a17a7a9 SK |
7071 | module_param(l2arc_headroom_boost, ulong, 0644); |
7072 | MODULE_PARM_DESC(l2arc_headroom_boost, "Compressed l2arc_headroom multiplier"); | |
7073 | ||
bce45ec9 | 7074 | module_param(l2arc_feed_secs, ulong, 0644); |
abd8610c BB |
7075 | MODULE_PARM_DESC(l2arc_feed_secs, "Seconds between L2ARC writing"); |
7076 | ||
bce45ec9 | 7077 | module_param(l2arc_feed_min_ms, ulong, 0644); |
abd8610c BB |
7078 | MODULE_PARM_DESC(l2arc_feed_min_ms, "Min feed interval in milliseconds"); |
7079 | ||
bce45ec9 | 7080 | module_param(l2arc_noprefetch, int, 0644); |
abd8610c BB |
7081 | MODULE_PARM_DESC(l2arc_noprefetch, "Skip caching prefetched buffers"); |
7082 | ||
3a17a7a9 SK |
7083 | module_param(l2arc_nocompress, int, 0644); |
7084 | MODULE_PARM_DESC(l2arc_nocompress, "Skip compressing L2ARC buffers"); | |
7085 | ||
bce45ec9 | 7086 | module_param(l2arc_feed_again, int, 0644); |
abd8610c BB |
7087 | MODULE_PARM_DESC(l2arc_feed_again, "Turbo L2ARC warmup"); |
7088 | ||
bce45ec9 | 7089 | module_param(l2arc_norw, int, 0644); |
abd8610c BB |
7090 | MODULE_PARM_DESC(l2arc_norw, "No reads during writes"); |
7091 | ||
7e8bddd0 BB |
7092 | module_param(zfs_arc_lotsfree_percent, int, 0644); |
7093 | MODULE_PARM_DESC(zfs_arc_lotsfree_percent, | |
7094 | "System free memory I/O throttle in bytes"); | |
7095 | ||
11f552fa BB |
7096 | module_param(zfs_arc_sys_free, ulong, 0644); |
7097 | MODULE_PARM_DESC(zfs_arc_sys_free, "System free memory target size in bytes"); | |
7098 | ||
c28b2279 | 7099 | #endif |