]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
d164b209 | 22 | * Copyright 2009 Sun Microsystems, Inc. All rights reserved. |
34dc7c2f BB |
23 | * Use is subject to license terms. |
24 | */ | |
25 | ||
34dc7c2f BB |
26 | /* |
27 | * DVA-based Adjustable Replacement Cache | |
28 | * | |
29 | * While much of the theory of operation used here is | |
30 | * based on the self-tuning, low overhead replacement cache | |
31 | * presented by Megiddo and Modha at FAST 2003, there are some | |
32 | * significant differences: | |
33 | * | |
34 | * 1. The Megiddo and Modha model assumes any page is evictable. | |
35 | * Pages in its cache cannot be "locked" into memory. This makes | |
36 | * the eviction algorithm simple: evict the last page in the list. | |
37 | * This also make the performance characteristics easy to reason | |
38 | * about. Our cache is not so simple. At any given moment, some | |
39 | * subset of the blocks in the cache are un-evictable because we | |
40 | * have handed out a reference to them. Blocks are only evictable | |
41 | * when there are no external references active. This makes | |
42 | * eviction far more problematic: we choose to evict the evictable | |
43 | * blocks that are the "lowest" in the list. | |
44 | * | |
45 | * There are times when it is not possible to evict the requested | |
46 | * space. In these circumstances we are unable to adjust the cache | |
47 | * size. To prevent the cache growing unbounded at these times we | |
48 | * implement a "cache throttle" that slows the flow of new data | |
49 | * into the cache until we can make space available. | |
50 | * | |
51 | * 2. The Megiddo and Modha model assumes a fixed cache size. | |
52 | * Pages are evicted when the cache is full and there is a cache | |
53 | * miss. Our model has a variable sized cache. It grows with | |
54 | * high use, but also tries to react to memory pressure from the | |
55 | * operating system: decreasing its size when system memory is | |
56 | * tight. | |
57 | * | |
58 | * 3. The Megiddo and Modha model assumes a fixed page size. All | |
59 | * elements of the cache are therefor exactly the same size. So | |
60 | * when adjusting the cache size following a cache miss, its simply | |
61 | * a matter of choosing a single page to evict. In our model, we | |
62 | * have variable sized cache blocks (rangeing from 512 bytes to | |
63 | * 128K bytes). We therefor choose a set of blocks to evict to make | |
64 | * space for a cache miss that approximates as closely as possible | |
65 | * the space used by the new block. | |
66 | * | |
67 | * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" | |
68 | * by N. Megiddo & D. Modha, FAST 2003 | |
69 | */ | |
70 | ||
71 | /* | |
72 | * The locking model: | |
73 | * | |
74 | * A new reference to a cache buffer can be obtained in two | |
75 | * ways: 1) via a hash table lookup using the DVA as a key, | |
76 | * or 2) via one of the ARC lists. The arc_read() interface | |
77 | * uses method 1, while the internal arc algorithms for | |
78 | * adjusting the cache use method 2. We therefor provide two | |
79 | * types of locks: 1) the hash table lock array, and 2) the | |
80 | * arc list locks. | |
81 | * | |
82 | * Buffers do not have their own mutexs, rather they rely on the | |
83 | * hash table mutexs for the bulk of their protection (i.e. most | |
84 | * fields in the arc_buf_hdr_t are protected by these mutexs). | |
85 | * | |
86 | * buf_hash_find() returns the appropriate mutex (held) when it | |
87 | * locates the requested buffer in the hash table. It returns | |
88 | * NULL for the mutex if the buffer was not in the table. | |
89 | * | |
90 | * buf_hash_remove() expects the appropriate hash mutex to be | |
91 | * already held before it is invoked. | |
92 | * | |
93 | * Each arc state also has a mutex which is used to protect the | |
94 | * buffer list associated with the state. When attempting to | |
95 | * obtain a hash table lock while holding an arc list lock you | |
96 | * must use: mutex_tryenter() to avoid deadlock. Also note that | |
97 | * the active state mutex must be held before the ghost state mutex. | |
98 | * | |
99 | * Arc buffers may have an associated eviction callback function. | |
100 | * This function will be invoked prior to removing the buffer (e.g. | |
101 | * in arc_do_user_evicts()). Note however that the data associated | |
102 | * with the buffer may be evicted prior to the callback. The callback | |
103 | * must be made with *no locks held* (to prevent deadlock). Additionally, | |
104 | * the users of callbacks must ensure that their private data is | |
105 | * protected from simultaneous callbacks from arc_buf_evict() | |
106 | * and arc_do_user_evicts(). | |
107 | * | |
108 | * Note that the majority of the performance stats are manipulated | |
109 | * with atomic operations. | |
110 | * | |
111 | * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: | |
112 | * | |
113 | * - L2ARC buflist creation | |
114 | * - L2ARC buflist eviction | |
115 | * - L2ARC write completion, which walks L2ARC buflists | |
116 | * - ARC header destruction, as it removes from L2ARC buflists | |
117 | * - ARC header release, as it removes from L2ARC buflists | |
118 | */ | |
119 | ||
120 | #include <sys/spa.h> | |
121 | #include <sys/zio.h> | |
122 | #include <sys/zio_checksum.h> | |
123 | #include <sys/zfs_context.h> | |
124 | #include <sys/arc.h> | |
125 | #include <sys/refcount.h> | |
b128c09f | 126 | #include <sys/vdev.h> |
34dc7c2f BB |
127 | #ifdef _KERNEL |
128 | #include <sys/vmsystm.h> | |
129 | #include <vm/anon.h> | |
130 | #include <sys/fs/swapnode.h> | |
131 | #include <sys/dnlc.h> | |
132 | #endif | |
133 | #include <sys/callb.h> | |
134 | #include <sys/kstat.h> | |
135 | ||
136 | static kmutex_t arc_reclaim_thr_lock; | |
137 | static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ | |
138 | static uint8_t arc_thread_exit; | |
139 | ||
140 | extern int zfs_write_limit_shift; | |
141 | extern uint64_t zfs_write_limit_max; | |
b128c09f | 142 | extern kmutex_t zfs_write_limit_lock; |
34dc7c2f BB |
143 | |
144 | #define ARC_REDUCE_DNLC_PERCENT 3 | |
145 | uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; | |
146 | ||
147 | typedef enum arc_reclaim_strategy { | |
148 | ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ | |
149 | ARC_RECLAIM_CONS /* Conservative reclaim strategy */ | |
150 | } arc_reclaim_strategy_t; | |
151 | ||
152 | /* number of seconds before growing cache again */ | |
153 | static int arc_grow_retry = 60; | |
154 | ||
d164b209 BB |
155 | /* shift of arc_c for calculating both min and max arc_p */ |
156 | static int arc_p_min_shift = 4; | |
157 | ||
158 | /* log2(fraction of arc to reclaim) */ | |
159 | static int arc_shrink_shift = 5; | |
160 | ||
34dc7c2f BB |
161 | /* |
162 | * minimum lifespan of a prefetch block in clock ticks | |
163 | * (initialized in arc_init()) | |
164 | */ | |
165 | static int arc_min_prefetch_lifespan; | |
166 | ||
167 | static int arc_dead; | |
168 | ||
b128c09f BB |
169 | /* |
170 | * The arc has filled available memory and has now warmed up. | |
171 | */ | |
172 | static boolean_t arc_warm; | |
173 | ||
34dc7c2f BB |
174 | /* |
175 | * These tunables are for performance analysis. | |
176 | */ | |
177 | uint64_t zfs_arc_max; | |
178 | uint64_t zfs_arc_min; | |
179 | uint64_t zfs_arc_meta_limit = 0; | |
b128c09f | 180 | int zfs_mdcomp_disable = 0; |
d164b209 BB |
181 | int zfs_arc_grow_retry = 0; |
182 | int zfs_arc_shrink_shift = 0; | |
183 | int zfs_arc_p_min_shift = 0; | |
34dc7c2f BB |
184 | |
185 | /* | |
186 | * Note that buffers can be in one of 6 states: | |
187 | * ARC_anon - anonymous (discussed below) | |
188 | * ARC_mru - recently used, currently cached | |
189 | * ARC_mru_ghost - recentely used, no longer in cache | |
190 | * ARC_mfu - frequently used, currently cached | |
191 | * ARC_mfu_ghost - frequently used, no longer in cache | |
192 | * ARC_l2c_only - exists in L2ARC but not other states | |
193 | * When there are no active references to the buffer, they are | |
194 | * are linked onto a list in one of these arc states. These are | |
195 | * the only buffers that can be evicted or deleted. Within each | |
196 | * state there are multiple lists, one for meta-data and one for | |
197 | * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, | |
198 | * etc.) is tracked separately so that it can be managed more | |
199 | * explicitly: favored over data, limited explicitly. | |
200 | * | |
201 | * Anonymous buffers are buffers that are not associated with | |
202 | * a DVA. These are buffers that hold dirty block copies | |
203 | * before they are written to stable storage. By definition, | |
204 | * they are "ref'd" and are considered part of arc_mru | |
205 | * that cannot be freed. Generally, they will aquire a DVA | |
206 | * as they are written and migrate onto the arc_mru list. | |
207 | * | |
208 | * The ARC_l2c_only state is for buffers that are in the second | |
209 | * level ARC but no longer in any of the ARC_m* lists. The second | |
210 | * level ARC itself may also contain buffers that are in any of | |
211 | * the ARC_m* states - meaning that a buffer can exist in two | |
212 | * places. The reason for the ARC_l2c_only state is to keep the | |
213 | * buffer header in the hash table, so that reads that hit the | |
214 | * second level ARC benefit from these fast lookups. | |
215 | */ | |
216 | ||
217 | typedef struct arc_state { | |
218 | list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ | |
219 | uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ | |
220 | uint64_t arcs_size; /* total amount of data in this state */ | |
221 | kmutex_t arcs_mtx; | |
222 | } arc_state_t; | |
223 | ||
224 | /* The 6 states: */ | |
225 | static arc_state_t ARC_anon; | |
226 | static arc_state_t ARC_mru; | |
227 | static arc_state_t ARC_mru_ghost; | |
228 | static arc_state_t ARC_mfu; | |
229 | static arc_state_t ARC_mfu_ghost; | |
230 | static arc_state_t ARC_l2c_only; | |
231 | ||
232 | typedef struct arc_stats { | |
233 | kstat_named_t arcstat_hits; | |
234 | kstat_named_t arcstat_misses; | |
235 | kstat_named_t arcstat_demand_data_hits; | |
236 | kstat_named_t arcstat_demand_data_misses; | |
237 | kstat_named_t arcstat_demand_metadata_hits; | |
238 | kstat_named_t arcstat_demand_metadata_misses; | |
239 | kstat_named_t arcstat_prefetch_data_hits; | |
240 | kstat_named_t arcstat_prefetch_data_misses; | |
241 | kstat_named_t arcstat_prefetch_metadata_hits; | |
242 | kstat_named_t arcstat_prefetch_metadata_misses; | |
243 | kstat_named_t arcstat_mru_hits; | |
244 | kstat_named_t arcstat_mru_ghost_hits; | |
245 | kstat_named_t arcstat_mfu_hits; | |
246 | kstat_named_t arcstat_mfu_ghost_hits; | |
247 | kstat_named_t arcstat_deleted; | |
248 | kstat_named_t arcstat_recycle_miss; | |
249 | kstat_named_t arcstat_mutex_miss; | |
250 | kstat_named_t arcstat_evict_skip; | |
251 | kstat_named_t arcstat_hash_elements; | |
252 | kstat_named_t arcstat_hash_elements_max; | |
253 | kstat_named_t arcstat_hash_collisions; | |
254 | kstat_named_t arcstat_hash_chains; | |
255 | kstat_named_t arcstat_hash_chain_max; | |
256 | kstat_named_t arcstat_p; | |
257 | kstat_named_t arcstat_c; | |
258 | kstat_named_t arcstat_c_min; | |
259 | kstat_named_t arcstat_c_max; | |
260 | kstat_named_t arcstat_size; | |
261 | kstat_named_t arcstat_hdr_size; | |
d164b209 BB |
262 | kstat_named_t arcstat_data_size; |
263 | kstat_named_t arcstat_other_size; | |
34dc7c2f BB |
264 | kstat_named_t arcstat_l2_hits; |
265 | kstat_named_t arcstat_l2_misses; | |
266 | kstat_named_t arcstat_l2_feeds; | |
267 | kstat_named_t arcstat_l2_rw_clash; | |
d164b209 BB |
268 | kstat_named_t arcstat_l2_read_bytes; |
269 | kstat_named_t arcstat_l2_write_bytes; | |
34dc7c2f BB |
270 | kstat_named_t arcstat_l2_writes_sent; |
271 | kstat_named_t arcstat_l2_writes_done; | |
272 | kstat_named_t arcstat_l2_writes_error; | |
273 | kstat_named_t arcstat_l2_writes_hdr_miss; | |
274 | kstat_named_t arcstat_l2_evict_lock_retry; | |
275 | kstat_named_t arcstat_l2_evict_reading; | |
276 | kstat_named_t arcstat_l2_free_on_write; | |
277 | kstat_named_t arcstat_l2_abort_lowmem; | |
278 | kstat_named_t arcstat_l2_cksum_bad; | |
279 | kstat_named_t arcstat_l2_io_error; | |
280 | kstat_named_t arcstat_l2_size; | |
281 | kstat_named_t arcstat_l2_hdr_size; | |
282 | kstat_named_t arcstat_memory_throttle_count; | |
283 | } arc_stats_t; | |
284 | ||
285 | static arc_stats_t arc_stats = { | |
286 | { "hits", KSTAT_DATA_UINT64 }, | |
287 | { "misses", KSTAT_DATA_UINT64 }, | |
288 | { "demand_data_hits", KSTAT_DATA_UINT64 }, | |
289 | { "demand_data_misses", KSTAT_DATA_UINT64 }, | |
290 | { "demand_metadata_hits", KSTAT_DATA_UINT64 }, | |
291 | { "demand_metadata_misses", KSTAT_DATA_UINT64 }, | |
292 | { "prefetch_data_hits", KSTAT_DATA_UINT64 }, | |
293 | { "prefetch_data_misses", KSTAT_DATA_UINT64 }, | |
294 | { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, | |
295 | { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, | |
296 | { "mru_hits", KSTAT_DATA_UINT64 }, | |
297 | { "mru_ghost_hits", KSTAT_DATA_UINT64 }, | |
298 | { "mfu_hits", KSTAT_DATA_UINT64 }, | |
299 | { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, | |
300 | { "deleted", KSTAT_DATA_UINT64 }, | |
301 | { "recycle_miss", KSTAT_DATA_UINT64 }, | |
302 | { "mutex_miss", KSTAT_DATA_UINT64 }, | |
303 | { "evict_skip", KSTAT_DATA_UINT64 }, | |
304 | { "hash_elements", KSTAT_DATA_UINT64 }, | |
305 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
306 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
307 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
308 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
309 | { "p", KSTAT_DATA_UINT64 }, | |
310 | { "c", KSTAT_DATA_UINT64 }, | |
311 | { "c_min", KSTAT_DATA_UINT64 }, | |
312 | { "c_max", KSTAT_DATA_UINT64 }, | |
313 | { "size", KSTAT_DATA_UINT64 }, | |
314 | { "hdr_size", KSTAT_DATA_UINT64 }, | |
d164b209 BB |
315 | { "data_size", KSTAT_DATA_UINT64 }, |
316 | { "other_size", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
317 | { "l2_hits", KSTAT_DATA_UINT64 }, |
318 | { "l2_misses", KSTAT_DATA_UINT64 }, | |
319 | { "l2_feeds", KSTAT_DATA_UINT64 }, | |
320 | { "l2_rw_clash", KSTAT_DATA_UINT64 }, | |
d164b209 BB |
321 | { "l2_read_bytes", KSTAT_DATA_UINT64 }, |
322 | { "l2_write_bytes", KSTAT_DATA_UINT64 }, | |
34dc7c2f BB |
323 | { "l2_writes_sent", KSTAT_DATA_UINT64 }, |
324 | { "l2_writes_done", KSTAT_DATA_UINT64 }, | |
325 | { "l2_writes_error", KSTAT_DATA_UINT64 }, | |
326 | { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, | |
327 | { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, | |
328 | { "l2_evict_reading", KSTAT_DATA_UINT64 }, | |
329 | { "l2_free_on_write", KSTAT_DATA_UINT64 }, | |
330 | { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, | |
331 | { "l2_cksum_bad", KSTAT_DATA_UINT64 }, | |
332 | { "l2_io_error", KSTAT_DATA_UINT64 }, | |
333 | { "l2_size", KSTAT_DATA_UINT64 }, | |
334 | { "l2_hdr_size", KSTAT_DATA_UINT64 }, | |
335 | { "memory_throttle_count", KSTAT_DATA_UINT64 } | |
336 | }; | |
337 | ||
338 | #define ARCSTAT(stat) (arc_stats.stat.value.ui64) | |
339 | ||
340 | #define ARCSTAT_INCR(stat, val) \ | |
341 | atomic_add_64(&arc_stats.stat.value.ui64, (val)); | |
342 | ||
343 | #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) | |
344 | #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) | |
345 | ||
346 | #define ARCSTAT_MAX(stat, val) { \ | |
347 | uint64_t m; \ | |
348 | while ((val) > (m = arc_stats.stat.value.ui64) && \ | |
349 | (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ | |
350 | continue; \ | |
351 | } | |
352 | ||
353 | #define ARCSTAT_MAXSTAT(stat) \ | |
354 | ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) | |
355 | ||
356 | /* | |
357 | * We define a macro to allow ARC hits/misses to be easily broken down by | |
358 | * two separate conditions, giving a total of four different subtypes for | |
359 | * each of hits and misses (so eight statistics total). | |
360 | */ | |
361 | #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ | |
362 | if (cond1) { \ | |
363 | if (cond2) { \ | |
364 | ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ | |
365 | } else { \ | |
366 | ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ | |
367 | } \ | |
368 | } else { \ | |
369 | if (cond2) { \ | |
370 | ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ | |
371 | } else { \ | |
372 | ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ | |
373 | } \ | |
374 | } | |
375 | ||
376 | kstat_t *arc_ksp; | |
377 | static arc_state_t *arc_anon; | |
378 | static arc_state_t *arc_mru; | |
379 | static arc_state_t *arc_mru_ghost; | |
380 | static arc_state_t *arc_mfu; | |
381 | static arc_state_t *arc_mfu_ghost; | |
382 | static arc_state_t *arc_l2c_only; | |
383 | ||
384 | /* | |
385 | * There are several ARC variables that are critical to export as kstats -- | |
386 | * but we don't want to have to grovel around in the kstat whenever we wish to | |
387 | * manipulate them. For these variables, we therefore define them to be in | |
388 | * terms of the statistic variable. This assures that we are not introducing | |
389 | * the possibility of inconsistency by having shadow copies of the variables, | |
390 | * while still allowing the code to be readable. | |
391 | */ | |
392 | #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ | |
393 | #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ | |
394 | #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ | |
395 | #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ | |
396 | #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ | |
397 | ||
398 | static int arc_no_grow; /* Don't try to grow cache size */ | |
399 | static uint64_t arc_tempreserve; | |
400 | static uint64_t arc_meta_used; | |
401 | static uint64_t arc_meta_limit; | |
402 | static uint64_t arc_meta_max = 0; | |
403 | ||
404 | typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; | |
405 | ||
406 | typedef struct arc_callback arc_callback_t; | |
407 | ||
408 | struct arc_callback { | |
409 | void *acb_private; | |
410 | arc_done_func_t *acb_done; | |
34dc7c2f BB |
411 | arc_buf_t *acb_buf; |
412 | zio_t *acb_zio_dummy; | |
413 | arc_callback_t *acb_next; | |
414 | }; | |
415 | ||
416 | typedef struct arc_write_callback arc_write_callback_t; | |
417 | ||
418 | struct arc_write_callback { | |
419 | void *awcb_private; | |
420 | arc_done_func_t *awcb_ready; | |
421 | arc_done_func_t *awcb_done; | |
422 | arc_buf_t *awcb_buf; | |
423 | }; | |
424 | ||
425 | struct arc_buf_hdr { | |
426 | /* protected by hash lock */ | |
427 | dva_t b_dva; | |
428 | uint64_t b_birth; | |
429 | uint64_t b_cksum0; | |
430 | ||
431 | kmutex_t b_freeze_lock; | |
432 | zio_cksum_t *b_freeze_cksum; | |
433 | ||
434 | arc_buf_hdr_t *b_hash_next; | |
435 | arc_buf_t *b_buf; | |
436 | uint32_t b_flags; | |
437 | uint32_t b_datacnt; | |
438 | ||
439 | arc_callback_t *b_acb; | |
440 | kcondvar_t b_cv; | |
441 | ||
442 | /* immutable */ | |
443 | arc_buf_contents_t b_type; | |
444 | uint64_t b_size; | |
d164b209 | 445 | uint64_t b_spa; |
34dc7c2f BB |
446 | |
447 | /* protected by arc state mutex */ | |
448 | arc_state_t *b_state; | |
449 | list_node_t b_arc_node; | |
450 | ||
451 | /* updated atomically */ | |
452 | clock_t b_arc_access; | |
453 | ||
454 | /* self protecting */ | |
455 | refcount_t b_refcnt; | |
456 | ||
457 | l2arc_buf_hdr_t *b_l2hdr; | |
458 | list_node_t b_l2node; | |
459 | }; | |
460 | ||
461 | static arc_buf_t *arc_eviction_list; | |
462 | static kmutex_t arc_eviction_mtx; | |
463 | static arc_buf_hdr_t arc_eviction_hdr; | |
464 | static void arc_get_data_buf(arc_buf_t *buf); | |
465 | static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); | |
466 | static int arc_evict_needed(arc_buf_contents_t type); | |
d164b209 | 467 | static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); |
34dc7c2f BB |
468 | |
469 | #define GHOST_STATE(state) \ | |
470 | ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ | |
471 | (state) == arc_l2c_only) | |
472 | ||
473 | /* | |
474 | * Private ARC flags. These flags are private ARC only flags that will show up | |
475 | * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can | |
476 | * be passed in as arc_flags in things like arc_read. However, these flags | |
477 | * should never be passed and should only be set by ARC code. When adding new | |
478 | * public flags, make sure not to smash the private ones. | |
479 | */ | |
480 | ||
481 | #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ | |
482 | #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ | |
483 | #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ | |
484 | #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ | |
485 | #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ | |
486 | #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ | |
487 | #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ | |
b128c09f BB |
488 | #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ |
489 | #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ | |
490 | #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ | |
491 | #define ARC_STORED (1 << 19) /* has been store()d to */ | |
34dc7c2f BB |
492 | |
493 | #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) | |
494 | #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) | |
495 | #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) | |
d164b209 | 496 | #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) |
34dc7c2f BB |
497 | #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) |
498 | #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) | |
499 | #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) | |
b128c09f BB |
500 | #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) |
501 | #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ | |
502 | (hdr)->b_l2hdr != NULL) | |
34dc7c2f BB |
503 | #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) |
504 | #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) | |
505 | #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) | |
506 | ||
507 | /* | |
508 | * Other sizes | |
509 | */ | |
510 | ||
511 | #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) | |
512 | #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) | |
513 | ||
514 | /* | |
515 | * Hash table routines | |
516 | */ | |
517 | ||
518 | #define HT_LOCK_PAD 64 | |
519 | ||
520 | struct ht_lock { | |
521 | kmutex_t ht_lock; | |
522 | #ifdef _KERNEL | |
523 | unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; | |
524 | #endif | |
525 | }; | |
526 | ||
527 | #define BUF_LOCKS 256 | |
528 | typedef struct buf_hash_table { | |
529 | uint64_t ht_mask; | |
530 | arc_buf_hdr_t **ht_table; | |
531 | struct ht_lock ht_locks[BUF_LOCKS]; | |
532 | } buf_hash_table_t; | |
533 | ||
534 | static buf_hash_table_t buf_hash_table; | |
535 | ||
536 | #define BUF_HASH_INDEX(spa, dva, birth) \ | |
537 | (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) | |
538 | #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) | |
539 | #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) | |
540 | #define HDR_LOCK(buf) \ | |
541 | (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) | |
542 | ||
543 | uint64_t zfs_crc64_table[256]; | |
544 | ||
545 | /* | |
546 | * Level 2 ARC | |
547 | */ | |
548 | ||
549 | #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ | |
d164b209 BB |
550 | #define L2ARC_HEADROOM 2 /* num of writes */ |
551 | #define L2ARC_FEED_SECS 1 /* caching interval secs */ | |
552 | #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ | |
34dc7c2f BB |
553 | |
554 | #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) | |
555 | #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) | |
556 | ||
557 | /* | |
558 | * L2ARC Performance Tunables | |
559 | */ | |
560 | uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ | |
b128c09f | 561 | uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ |
34dc7c2f BB |
562 | uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ |
563 | uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ | |
d164b209 | 564 | uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ |
34dc7c2f | 565 | boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ |
d164b209 BB |
566 | boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ |
567 | boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ | |
34dc7c2f BB |
568 | |
569 | /* | |
570 | * L2ARC Internals | |
571 | */ | |
572 | typedef struct l2arc_dev { | |
573 | vdev_t *l2ad_vdev; /* vdev */ | |
574 | spa_t *l2ad_spa; /* spa */ | |
575 | uint64_t l2ad_hand; /* next write location */ | |
576 | uint64_t l2ad_write; /* desired write size, bytes */ | |
b128c09f | 577 | uint64_t l2ad_boost; /* warmup write boost, bytes */ |
34dc7c2f BB |
578 | uint64_t l2ad_start; /* first addr on device */ |
579 | uint64_t l2ad_end; /* last addr on device */ | |
580 | uint64_t l2ad_evict; /* last addr eviction reached */ | |
581 | boolean_t l2ad_first; /* first sweep through */ | |
d164b209 | 582 | boolean_t l2ad_writing; /* currently writing */ |
34dc7c2f BB |
583 | list_t *l2ad_buflist; /* buffer list */ |
584 | list_node_t l2ad_node; /* device list node */ | |
585 | } l2arc_dev_t; | |
586 | ||
587 | static list_t L2ARC_dev_list; /* device list */ | |
588 | static list_t *l2arc_dev_list; /* device list pointer */ | |
589 | static kmutex_t l2arc_dev_mtx; /* device list mutex */ | |
590 | static l2arc_dev_t *l2arc_dev_last; /* last device used */ | |
591 | static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ | |
592 | static list_t L2ARC_free_on_write; /* free after write buf list */ | |
593 | static list_t *l2arc_free_on_write; /* free after write list ptr */ | |
594 | static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ | |
595 | static uint64_t l2arc_ndev; /* number of devices */ | |
596 | ||
597 | typedef struct l2arc_read_callback { | |
598 | arc_buf_t *l2rcb_buf; /* read buffer */ | |
599 | spa_t *l2rcb_spa; /* spa */ | |
600 | blkptr_t l2rcb_bp; /* original blkptr */ | |
601 | zbookmark_t l2rcb_zb; /* original bookmark */ | |
602 | int l2rcb_flags; /* original flags */ | |
603 | } l2arc_read_callback_t; | |
604 | ||
605 | typedef struct l2arc_write_callback { | |
606 | l2arc_dev_t *l2wcb_dev; /* device info */ | |
607 | arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ | |
608 | } l2arc_write_callback_t; | |
609 | ||
610 | struct l2arc_buf_hdr { | |
611 | /* protected by arc_buf_hdr mutex */ | |
612 | l2arc_dev_t *b_dev; /* L2ARC device */ | |
613 | daddr_t b_daddr; /* disk address, offset byte */ | |
614 | }; | |
615 | ||
616 | typedef struct l2arc_data_free { | |
617 | /* protected by l2arc_free_on_write_mtx */ | |
618 | void *l2df_data; | |
619 | size_t l2df_size; | |
620 | void (*l2df_func)(void *, size_t); | |
621 | list_node_t l2df_list_node; | |
622 | } l2arc_data_free_t; | |
623 | ||
624 | static kmutex_t l2arc_feed_thr_lock; | |
625 | static kcondvar_t l2arc_feed_thr_cv; | |
626 | static uint8_t l2arc_thread_exit; | |
627 | ||
628 | static void l2arc_read_done(zio_t *zio); | |
629 | static void l2arc_hdr_stat_add(void); | |
630 | static void l2arc_hdr_stat_remove(void); | |
631 | ||
632 | static uint64_t | |
d164b209 | 633 | buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) |
34dc7c2f | 634 | { |
34dc7c2f BB |
635 | uint8_t *vdva = (uint8_t *)dva; |
636 | uint64_t crc = -1ULL; | |
637 | int i; | |
638 | ||
639 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
640 | ||
641 | for (i = 0; i < sizeof (dva_t); i++) | |
642 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; | |
643 | ||
d164b209 | 644 | crc ^= (spa>>8) ^ birth; |
34dc7c2f BB |
645 | |
646 | return (crc); | |
647 | } | |
648 | ||
649 | #define BUF_EMPTY(buf) \ | |
650 | ((buf)->b_dva.dva_word[0] == 0 && \ | |
651 | (buf)->b_dva.dva_word[1] == 0 && \ | |
652 | (buf)->b_birth == 0) | |
653 | ||
654 | #define BUF_EQUAL(spa, dva, birth, buf) \ | |
655 | ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ | |
656 | ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ | |
657 | ((buf)->b_birth == birth) && ((buf)->b_spa == spa) | |
658 | ||
659 | static arc_buf_hdr_t * | |
d164b209 | 660 | buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) |
34dc7c2f BB |
661 | { |
662 | uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); | |
663 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
664 | arc_buf_hdr_t *buf; | |
665 | ||
666 | mutex_enter(hash_lock); | |
667 | for (buf = buf_hash_table.ht_table[idx]; buf != NULL; | |
668 | buf = buf->b_hash_next) { | |
669 | if (BUF_EQUAL(spa, dva, birth, buf)) { | |
670 | *lockp = hash_lock; | |
671 | return (buf); | |
672 | } | |
673 | } | |
674 | mutex_exit(hash_lock); | |
675 | *lockp = NULL; | |
676 | return (NULL); | |
677 | } | |
678 | ||
679 | /* | |
680 | * Insert an entry into the hash table. If there is already an element | |
681 | * equal to elem in the hash table, then the already existing element | |
682 | * will be returned and the new element will not be inserted. | |
683 | * Otherwise returns NULL. | |
684 | */ | |
685 | static arc_buf_hdr_t * | |
686 | buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) | |
687 | { | |
688 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
689 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
690 | arc_buf_hdr_t *fbuf; | |
691 | uint32_t i; | |
692 | ||
693 | ASSERT(!HDR_IN_HASH_TABLE(buf)); | |
694 | *lockp = hash_lock; | |
695 | mutex_enter(hash_lock); | |
696 | for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; | |
697 | fbuf = fbuf->b_hash_next, i++) { | |
698 | if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) | |
699 | return (fbuf); | |
700 | } | |
701 | ||
702 | buf->b_hash_next = buf_hash_table.ht_table[idx]; | |
703 | buf_hash_table.ht_table[idx] = buf; | |
704 | buf->b_flags |= ARC_IN_HASH_TABLE; | |
705 | ||
706 | /* collect some hash table performance data */ | |
707 | if (i > 0) { | |
708 | ARCSTAT_BUMP(arcstat_hash_collisions); | |
709 | if (i == 1) | |
710 | ARCSTAT_BUMP(arcstat_hash_chains); | |
711 | ||
712 | ARCSTAT_MAX(arcstat_hash_chain_max, i); | |
713 | } | |
714 | ||
715 | ARCSTAT_BUMP(arcstat_hash_elements); | |
716 | ARCSTAT_MAXSTAT(arcstat_hash_elements); | |
717 | ||
718 | return (NULL); | |
719 | } | |
720 | ||
721 | static void | |
722 | buf_hash_remove(arc_buf_hdr_t *buf) | |
723 | { | |
724 | arc_buf_hdr_t *fbuf, **bufp; | |
725 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
726 | ||
727 | ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); | |
728 | ASSERT(HDR_IN_HASH_TABLE(buf)); | |
729 | ||
730 | bufp = &buf_hash_table.ht_table[idx]; | |
731 | while ((fbuf = *bufp) != buf) { | |
732 | ASSERT(fbuf != NULL); | |
733 | bufp = &fbuf->b_hash_next; | |
734 | } | |
735 | *bufp = buf->b_hash_next; | |
736 | buf->b_hash_next = NULL; | |
737 | buf->b_flags &= ~ARC_IN_HASH_TABLE; | |
738 | ||
739 | /* collect some hash table performance data */ | |
740 | ARCSTAT_BUMPDOWN(arcstat_hash_elements); | |
741 | ||
742 | if (buf_hash_table.ht_table[idx] && | |
743 | buf_hash_table.ht_table[idx]->b_hash_next == NULL) | |
744 | ARCSTAT_BUMPDOWN(arcstat_hash_chains); | |
745 | } | |
746 | ||
747 | /* | |
748 | * Global data structures and functions for the buf kmem cache. | |
749 | */ | |
750 | static kmem_cache_t *hdr_cache; | |
751 | static kmem_cache_t *buf_cache; | |
752 | ||
753 | static void | |
754 | buf_fini(void) | |
755 | { | |
756 | int i; | |
757 | ||
758 | kmem_free(buf_hash_table.ht_table, | |
759 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
760 | for (i = 0; i < BUF_LOCKS; i++) | |
761 | mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); | |
762 | kmem_cache_destroy(hdr_cache); | |
763 | kmem_cache_destroy(buf_cache); | |
764 | } | |
765 | ||
766 | /* | |
767 | * Constructor callback - called when the cache is empty | |
768 | * and a new buf is requested. | |
769 | */ | |
770 | /* ARGSUSED */ | |
771 | static int | |
772 | hdr_cons(void *vbuf, void *unused, int kmflag) | |
773 | { | |
774 | arc_buf_hdr_t *buf = vbuf; | |
775 | ||
776 | bzero(buf, sizeof (arc_buf_hdr_t)); | |
777 | refcount_create(&buf->b_refcnt); | |
778 | cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); | |
779 | mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); | |
d164b209 | 780 | arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); |
34dc7c2f | 781 | |
34dc7c2f BB |
782 | return (0); |
783 | } | |
784 | ||
b128c09f BB |
785 | /* ARGSUSED */ |
786 | static int | |
787 | buf_cons(void *vbuf, void *unused, int kmflag) | |
788 | { | |
789 | arc_buf_t *buf = vbuf; | |
790 | ||
791 | bzero(buf, sizeof (arc_buf_t)); | |
792 | rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); | |
d164b209 BB |
793 | arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
794 | ||
b128c09f BB |
795 | return (0); |
796 | } | |
797 | ||
34dc7c2f BB |
798 | /* |
799 | * Destructor callback - called when a cached buf is | |
800 | * no longer required. | |
801 | */ | |
802 | /* ARGSUSED */ | |
803 | static void | |
804 | hdr_dest(void *vbuf, void *unused) | |
805 | { | |
806 | arc_buf_hdr_t *buf = vbuf; | |
807 | ||
808 | refcount_destroy(&buf->b_refcnt); | |
809 | cv_destroy(&buf->b_cv); | |
810 | mutex_destroy(&buf->b_freeze_lock); | |
d164b209 | 811 | arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); |
34dc7c2f BB |
812 | } |
813 | ||
b128c09f BB |
814 | /* ARGSUSED */ |
815 | static void | |
816 | buf_dest(void *vbuf, void *unused) | |
817 | { | |
818 | arc_buf_t *buf = vbuf; | |
819 | ||
820 | rw_destroy(&buf->b_lock); | |
d164b209 | 821 | arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); |
b128c09f BB |
822 | } |
823 | ||
34dc7c2f BB |
824 | /* |
825 | * Reclaim callback -- invoked when memory is low. | |
826 | */ | |
827 | /* ARGSUSED */ | |
828 | static void | |
829 | hdr_recl(void *unused) | |
830 | { | |
831 | dprintf("hdr_recl called\n"); | |
832 | /* | |
833 | * umem calls the reclaim func when we destroy the buf cache, | |
834 | * which is after we do arc_fini(). | |
835 | */ | |
836 | if (!arc_dead) | |
837 | cv_signal(&arc_reclaim_thr_cv); | |
838 | } | |
839 | ||
840 | static void | |
841 | buf_init(void) | |
842 | { | |
843 | uint64_t *ct; | |
844 | uint64_t hsize = 1ULL << 12; | |
845 | int i, j; | |
846 | ||
847 | /* | |
848 | * The hash table is big enough to fill all of physical memory | |
849 | * with an average 64K block size. The table will take up | |
850 | * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). | |
851 | */ | |
852 | while (hsize * 65536 < physmem * PAGESIZE) | |
853 | hsize <<= 1; | |
854 | retry: | |
855 | buf_hash_table.ht_mask = hsize - 1; | |
856 | buf_hash_table.ht_table = | |
857 | kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); | |
858 | if (buf_hash_table.ht_table == NULL) { | |
859 | ASSERT(hsize > (1ULL << 8)); | |
860 | hsize >>= 1; | |
861 | goto retry; | |
862 | } | |
863 | ||
864 | hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), | |
865 | 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); | |
866 | buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), | |
b128c09f | 867 | 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); |
34dc7c2f BB |
868 | |
869 | for (i = 0; i < 256; i++) | |
870 | for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) | |
871 | *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); | |
872 | ||
873 | for (i = 0; i < BUF_LOCKS; i++) { | |
874 | mutex_init(&buf_hash_table.ht_locks[i].ht_lock, | |
875 | NULL, MUTEX_DEFAULT, NULL); | |
876 | } | |
877 | } | |
878 | ||
879 | #define ARC_MINTIME (hz>>4) /* 62 ms */ | |
880 | ||
881 | static void | |
882 | arc_cksum_verify(arc_buf_t *buf) | |
883 | { | |
884 | zio_cksum_t zc; | |
885 | ||
886 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
887 | return; | |
888 | ||
889 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
890 | if (buf->b_hdr->b_freeze_cksum == NULL || | |
891 | (buf->b_hdr->b_flags & ARC_IO_ERROR)) { | |
892 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
893 | return; | |
894 | } | |
895 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
896 | if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) | |
897 | panic("buffer modified while frozen!"); | |
898 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
899 | } | |
900 | ||
901 | static int | |
902 | arc_cksum_equal(arc_buf_t *buf) | |
903 | { | |
904 | zio_cksum_t zc; | |
905 | int equal; | |
906 | ||
907 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
908 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
909 | equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); | |
910 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
911 | ||
912 | return (equal); | |
913 | } | |
914 | ||
915 | static void | |
916 | arc_cksum_compute(arc_buf_t *buf, boolean_t force) | |
917 | { | |
918 | if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) | |
919 | return; | |
920 | ||
921 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
922 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
923 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
924 | return; | |
925 | } | |
926 | buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); | |
927 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, | |
928 | buf->b_hdr->b_freeze_cksum); | |
929 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
930 | } | |
931 | ||
932 | void | |
933 | arc_buf_thaw(arc_buf_t *buf) | |
934 | { | |
935 | if (zfs_flags & ZFS_DEBUG_MODIFY) { | |
936 | if (buf->b_hdr->b_state != arc_anon) | |
937 | panic("modifying non-anon buffer!"); | |
938 | if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) | |
939 | panic("modifying buffer while i/o in progress!"); | |
940 | arc_cksum_verify(buf); | |
941 | } | |
942 | ||
943 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
944 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
945 | kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
946 | buf->b_hdr->b_freeze_cksum = NULL; | |
947 | } | |
948 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
949 | } | |
950 | ||
951 | void | |
952 | arc_buf_freeze(arc_buf_t *buf) | |
953 | { | |
954 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
955 | return; | |
956 | ||
957 | ASSERT(buf->b_hdr->b_freeze_cksum != NULL || | |
958 | buf->b_hdr->b_state == arc_anon); | |
959 | arc_cksum_compute(buf, B_FALSE); | |
960 | } | |
961 | ||
962 | static void | |
963 | add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
964 | { | |
965 | ASSERT(MUTEX_HELD(hash_lock)); | |
966 | ||
967 | if ((refcount_add(&ab->b_refcnt, tag) == 1) && | |
968 | (ab->b_state != arc_anon)) { | |
969 | uint64_t delta = ab->b_size * ab->b_datacnt; | |
970 | list_t *list = &ab->b_state->arcs_list[ab->b_type]; | |
971 | uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; | |
972 | ||
973 | ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); | |
974 | mutex_enter(&ab->b_state->arcs_mtx); | |
975 | ASSERT(list_link_active(&ab->b_arc_node)); | |
976 | list_remove(list, ab); | |
977 | if (GHOST_STATE(ab->b_state)) { | |
978 | ASSERT3U(ab->b_datacnt, ==, 0); | |
979 | ASSERT3P(ab->b_buf, ==, NULL); | |
980 | delta = ab->b_size; | |
981 | } | |
982 | ASSERT(delta > 0); | |
983 | ASSERT3U(*size, >=, delta); | |
984 | atomic_add_64(size, -delta); | |
985 | mutex_exit(&ab->b_state->arcs_mtx); | |
b128c09f | 986 | /* remove the prefetch flag if we get a reference */ |
34dc7c2f BB |
987 | if (ab->b_flags & ARC_PREFETCH) |
988 | ab->b_flags &= ~ARC_PREFETCH; | |
989 | } | |
990 | } | |
991 | ||
992 | static int | |
993 | remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
994 | { | |
995 | int cnt; | |
996 | arc_state_t *state = ab->b_state; | |
997 | ||
998 | ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); | |
999 | ASSERT(!GHOST_STATE(state)); | |
1000 | ||
1001 | if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && | |
1002 | (state != arc_anon)) { | |
1003 | uint64_t *size = &state->arcs_lsize[ab->b_type]; | |
1004 | ||
1005 | ASSERT(!MUTEX_HELD(&state->arcs_mtx)); | |
1006 | mutex_enter(&state->arcs_mtx); | |
1007 | ASSERT(!list_link_active(&ab->b_arc_node)); | |
1008 | list_insert_head(&state->arcs_list[ab->b_type], ab); | |
1009 | ASSERT(ab->b_datacnt > 0); | |
1010 | atomic_add_64(size, ab->b_size * ab->b_datacnt); | |
1011 | mutex_exit(&state->arcs_mtx); | |
1012 | } | |
1013 | return (cnt); | |
1014 | } | |
1015 | ||
1016 | /* | |
1017 | * Move the supplied buffer to the indicated state. The mutex | |
1018 | * for the buffer must be held by the caller. | |
1019 | */ | |
1020 | static void | |
1021 | arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) | |
1022 | { | |
1023 | arc_state_t *old_state = ab->b_state; | |
1024 | int64_t refcnt = refcount_count(&ab->b_refcnt); | |
1025 | uint64_t from_delta, to_delta; | |
1026 | ||
1027 | ASSERT(MUTEX_HELD(hash_lock)); | |
1028 | ASSERT(new_state != old_state); | |
1029 | ASSERT(refcnt == 0 || ab->b_datacnt > 0); | |
1030 | ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); | |
1031 | ||
1032 | from_delta = to_delta = ab->b_datacnt * ab->b_size; | |
1033 | ||
1034 | /* | |
1035 | * If this buffer is evictable, transfer it from the | |
1036 | * old state list to the new state list. | |
1037 | */ | |
1038 | if (refcnt == 0) { | |
1039 | if (old_state != arc_anon) { | |
1040 | int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); | |
1041 | uint64_t *size = &old_state->arcs_lsize[ab->b_type]; | |
1042 | ||
1043 | if (use_mutex) | |
1044 | mutex_enter(&old_state->arcs_mtx); | |
1045 | ||
1046 | ASSERT(list_link_active(&ab->b_arc_node)); | |
1047 | list_remove(&old_state->arcs_list[ab->b_type], ab); | |
1048 | ||
1049 | /* | |
1050 | * If prefetching out of the ghost cache, | |
1051 | * we will have a non-null datacnt. | |
1052 | */ | |
1053 | if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { | |
1054 | /* ghost elements have a ghost size */ | |
1055 | ASSERT(ab->b_buf == NULL); | |
1056 | from_delta = ab->b_size; | |
1057 | } | |
1058 | ASSERT3U(*size, >=, from_delta); | |
1059 | atomic_add_64(size, -from_delta); | |
1060 | ||
1061 | if (use_mutex) | |
1062 | mutex_exit(&old_state->arcs_mtx); | |
1063 | } | |
1064 | if (new_state != arc_anon) { | |
1065 | int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); | |
1066 | uint64_t *size = &new_state->arcs_lsize[ab->b_type]; | |
1067 | ||
1068 | if (use_mutex) | |
1069 | mutex_enter(&new_state->arcs_mtx); | |
1070 | ||
1071 | list_insert_head(&new_state->arcs_list[ab->b_type], ab); | |
1072 | ||
1073 | /* ghost elements have a ghost size */ | |
1074 | if (GHOST_STATE(new_state)) { | |
1075 | ASSERT(ab->b_datacnt == 0); | |
1076 | ASSERT(ab->b_buf == NULL); | |
1077 | to_delta = ab->b_size; | |
1078 | } | |
1079 | atomic_add_64(size, to_delta); | |
1080 | ||
1081 | if (use_mutex) | |
1082 | mutex_exit(&new_state->arcs_mtx); | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | ASSERT(!BUF_EMPTY(ab)); | |
1087 | if (new_state == arc_anon) { | |
1088 | buf_hash_remove(ab); | |
1089 | } | |
1090 | ||
1091 | /* adjust state sizes */ | |
1092 | if (to_delta) | |
1093 | atomic_add_64(&new_state->arcs_size, to_delta); | |
1094 | if (from_delta) { | |
1095 | ASSERT3U(old_state->arcs_size, >=, from_delta); | |
1096 | atomic_add_64(&old_state->arcs_size, -from_delta); | |
1097 | } | |
1098 | ab->b_state = new_state; | |
1099 | ||
1100 | /* adjust l2arc hdr stats */ | |
1101 | if (new_state == arc_l2c_only) | |
1102 | l2arc_hdr_stat_add(); | |
1103 | else if (old_state == arc_l2c_only) | |
1104 | l2arc_hdr_stat_remove(); | |
1105 | } | |
1106 | ||
1107 | void | |
d164b209 | 1108 | arc_space_consume(uint64_t space, arc_space_type_t type) |
34dc7c2f | 1109 | { |
d164b209 BB |
1110 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
1111 | ||
1112 | switch (type) { | |
1113 | case ARC_SPACE_DATA: | |
1114 | ARCSTAT_INCR(arcstat_data_size, space); | |
1115 | break; | |
1116 | case ARC_SPACE_OTHER: | |
1117 | ARCSTAT_INCR(arcstat_other_size, space); | |
1118 | break; | |
1119 | case ARC_SPACE_HDRS: | |
1120 | ARCSTAT_INCR(arcstat_hdr_size, space); | |
1121 | break; | |
1122 | case ARC_SPACE_L2HDRS: | |
1123 | ARCSTAT_INCR(arcstat_l2_hdr_size, space); | |
1124 | break; | |
1125 | } | |
1126 | ||
34dc7c2f BB |
1127 | atomic_add_64(&arc_meta_used, space); |
1128 | atomic_add_64(&arc_size, space); | |
1129 | } | |
1130 | ||
1131 | void | |
d164b209 | 1132 | arc_space_return(uint64_t space, arc_space_type_t type) |
34dc7c2f | 1133 | { |
d164b209 BB |
1134 | ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); |
1135 | ||
1136 | switch (type) { | |
1137 | case ARC_SPACE_DATA: | |
1138 | ARCSTAT_INCR(arcstat_data_size, -space); | |
1139 | break; | |
1140 | case ARC_SPACE_OTHER: | |
1141 | ARCSTAT_INCR(arcstat_other_size, -space); | |
1142 | break; | |
1143 | case ARC_SPACE_HDRS: | |
1144 | ARCSTAT_INCR(arcstat_hdr_size, -space); | |
1145 | break; | |
1146 | case ARC_SPACE_L2HDRS: | |
1147 | ARCSTAT_INCR(arcstat_l2_hdr_size, -space); | |
1148 | break; | |
1149 | } | |
1150 | ||
34dc7c2f BB |
1151 | ASSERT(arc_meta_used >= space); |
1152 | if (arc_meta_max < arc_meta_used) | |
1153 | arc_meta_max = arc_meta_used; | |
1154 | atomic_add_64(&arc_meta_used, -space); | |
1155 | ASSERT(arc_size >= space); | |
1156 | atomic_add_64(&arc_size, -space); | |
1157 | } | |
1158 | ||
1159 | void * | |
1160 | arc_data_buf_alloc(uint64_t size) | |
1161 | { | |
1162 | if (arc_evict_needed(ARC_BUFC_DATA)) | |
1163 | cv_signal(&arc_reclaim_thr_cv); | |
1164 | atomic_add_64(&arc_size, size); | |
1165 | return (zio_data_buf_alloc(size)); | |
1166 | } | |
1167 | ||
1168 | void | |
1169 | arc_data_buf_free(void *buf, uint64_t size) | |
1170 | { | |
1171 | zio_data_buf_free(buf, size); | |
1172 | ASSERT(arc_size >= size); | |
1173 | atomic_add_64(&arc_size, -size); | |
1174 | } | |
1175 | ||
1176 | arc_buf_t * | |
1177 | arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) | |
1178 | { | |
1179 | arc_buf_hdr_t *hdr; | |
1180 | arc_buf_t *buf; | |
1181 | ||
1182 | ASSERT3U(size, >, 0); | |
1183 | hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
1184 | ASSERT(BUF_EMPTY(hdr)); | |
1185 | hdr->b_size = size; | |
1186 | hdr->b_type = type; | |
d164b209 | 1187 | hdr->b_spa = spa_guid(spa); |
34dc7c2f BB |
1188 | hdr->b_state = arc_anon; |
1189 | hdr->b_arc_access = 0; | |
1190 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1191 | buf->b_hdr = hdr; | |
1192 | buf->b_data = NULL; | |
1193 | buf->b_efunc = NULL; | |
1194 | buf->b_private = NULL; | |
1195 | buf->b_next = NULL; | |
1196 | hdr->b_buf = buf; | |
1197 | arc_get_data_buf(buf); | |
1198 | hdr->b_datacnt = 1; | |
1199 | hdr->b_flags = 0; | |
1200 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1201 | (void) refcount_add(&hdr->b_refcnt, tag); | |
1202 | ||
1203 | return (buf); | |
1204 | } | |
1205 | ||
1206 | static arc_buf_t * | |
1207 | arc_buf_clone(arc_buf_t *from) | |
1208 | { | |
1209 | arc_buf_t *buf; | |
1210 | arc_buf_hdr_t *hdr = from->b_hdr; | |
1211 | uint64_t size = hdr->b_size; | |
1212 | ||
1213 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1214 | buf->b_hdr = hdr; | |
1215 | buf->b_data = NULL; | |
1216 | buf->b_efunc = NULL; | |
1217 | buf->b_private = NULL; | |
1218 | buf->b_next = hdr->b_buf; | |
1219 | hdr->b_buf = buf; | |
1220 | arc_get_data_buf(buf); | |
1221 | bcopy(from->b_data, buf->b_data, size); | |
1222 | hdr->b_datacnt += 1; | |
1223 | return (buf); | |
1224 | } | |
1225 | ||
1226 | void | |
1227 | arc_buf_add_ref(arc_buf_t *buf, void* tag) | |
1228 | { | |
1229 | arc_buf_hdr_t *hdr; | |
1230 | kmutex_t *hash_lock; | |
1231 | ||
1232 | /* | |
b128c09f BB |
1233 | * Check to see if this buffer is evicted. Callers |
1234 | * must verify b_data != NULL to know if the add_ref | |
1235 | * was successful. | |
34dc7c2f | 1236 | */ |
b128c09f BB |
1237 | rw_enter(&buf->b_lock, RW_READER); |
1238 | if (buf->b_data == NULL) { | |
1239 | rw_exit(&buf->b_lock); | |
34dc7c2f BB |
1240 | return; |
1241 | } | |
b128c09f BB |
1242 | hdr = buf->b_hdr; |
1243 | ASSERT(hdr != NULL); | |
34dc7c2f | 1244 | hash_lock = HDR_LOCK(hdr); |
34dc7c2f | 1245 | mutex_enter(hash_lock); |
b128c09f | 1246 | rw_exit(&buf->b_lock); |
34dc7c2f | 1247 | |
34dc7c2f BB |
1248 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); |
1249 | add_reference(hdr, hash_lock, tag); | |
d164b209 | 1250 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); |
34dc7c2f BB |
1251 | arc_access(hdr, hash_lock); |
1252 | mutex_exit(hash_lock); | |
1253 | ARCSTAT_BUMP(arcstat_hits); | |
1254 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
1255 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
1256 | data, metadata, hits); | |
1257 | } | |
1258 | ||
1259 | /* | |
1260 | * Free the arc data buffer. If it is an l2arc write in progress, | |
1261 | * the buffer is placed on l2arc_free_on_write to be freed later. | |
1262 | */ | |
1263 | static void | |
1264 | arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), | |
1265 | void *data, size_t size) | |
1266 | { | |
1267 | if (HDR_L2_WRITING(hdr)) { | |
1268 | l2arc_data_free_t *df; | |
1269 | df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); | |
1270 | df->l2df_data = data; | |
1271 | df->l2df_size = size; | |
1272 | df->l2df_func = free_func; | |
1273 | mutex_enter(&l2arc_free_on_write_mtx); | |
1274 | list_insert_head(l2arc_free_on_write, df); | |
1275 | mutex_exit(&l2arc_free_on_write_mtx); | |
1276 | ARCSTAT_BUMP(arcstat_l2_free_on_write); | |
1277 | } else { | |
1278 | free_func(data, size); | |
1279 | } | |
1280 | } | |
1281 | ||
1282 | static void | |
1283 | arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) | |
1284 | { | |
1285 | arc_buf_t **bufp; | |
1286 | ||
1287 | /* free up data associated with the buf */ | |
1288 | if (buf->b_data) { | |
1289 | arc_state_t *state = buf->b_hdr->b_state; | |
1290 | uint64_t size = buf->b_hdr->b_size; | |
1291 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
1292 | ||
1293 | arc_cksum_verify(buf); | |
1294 | if (!recycle) { | |
1295 | if (type == ARC_BUFC_METADATA) { | |
1296 | arc_buf_data_free(buf->b_hdr, zio_buf_free, | |
1297 | buf->b_data, size); | |
d164b209 | 1298 | arc_space_return(size, ARC_SPACE_DATA); |
34dc7c2f BB |
1299 | } else { |
1300 | ASSERT(type == ARC_BUFC_DATA); | |
1301 | arc_buf_data_free(buf->b_hdr, | |
1302 | zio_data_buf_free, buf->b_data, size); | |
d164b209 | 1303 | ARCSTAT_INCR(arcstat_data_size, -size); |
34dc7c2f BB |
1304 | atomic_add_64(&arc_size, -size); |
1305 | } | |
1306 | } | |
1307 | if (list_link_active(&buf->b_hdr->b_arc_node)) { | |
1308 | uint64_t *cnt = &state->arcs_lsize[type]; | |
1309 | ||
1310 | ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); | |
1311 | ASSERT(state != arc_anon); | |
1312 | ||
1313 | ASSERT3U(*cnt, >=, size); | |
1314 | atomic_add_64(cnt, -size); | |
1315 | } | |
1316 | ASSERT3U(state->arcs_size, >=, size); | |
1317 | atomic_add_64(&state->arcs_size, -size); | |
1318 | buf->b_data = NULL; | |
1319 | ASSERT(buf->b_hdr->b_datacnt > 0); | |
1320 | buf->b_hdr->b_datacnt -= 1; | |
1321 | } | |
1322 | ||
1323 | /* only remove the buf if requested */ | |
1324 | if (!all) | |
1325 | return; | |
1326 | ||
1327 | /* remove the buf from the hdr list */ | |
1328 | for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) | |
1329 | continue; | |
1330 | *bufp = buf->b_next; | |
1331 | ||
1332 | ASSERT(buf->b_efunc == NULL); | |
1333 | ||
1334 | /* clean up the buf */ | |
1335 | buf->b_hdr = NULL; | |
1336 | kmem_cache_free(buf_cache, buf); | |
1337 | } | |
1338 | ||
1339 | static void | |
1340 | arc_hdr_destroy(arc_buf_hdr_t *hdr) | |
1341 | { | |
1342 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1343 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
1344 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
b128c09f | 1345 | ASSERT(!(hdr->b_flags & ARC_STORED)); |
34dc7c2f BB |
1346 | |
1347 | if (hdr->b_l2hdr != NULL) { | |
1348 | if (!MUTEX_HELD(&l2arc_buflist_mtx)) { | |
1349 | /* | |
1350 | * To prevent arc_free() and l2arc_evict() from | |
1351 | * attempting to free the same buffer at the same time, | |
1352 | * a FREE_IN_PROGRESS flag is given to arc_free() to | |
1353 | * give it priority. l2arc_evict() can't destroy this | |
1354 | * header while we are waiting on l2arc_buflist_mtx. | |
b128c09f BB |
1355 | * |
1356 | * The hdr may be removed from l2ad_buflist before we | |
1357 | * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. | |
34dc7c2f BB |
1358 | */ |
1359 | mutex_enter(&l2arc_buflist_mtx); | |
b128c09f BB |
1360 | if (hdr->b_l2hdr != NULL) { |
1361 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, | |
1362 | hdr); | |
1363 | } | |
34dc7c2f BB |
1364 | mutex_exit(&l2arc_buflist_mtx); |
1365 | } else { | |
1366 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); | |
1367 | } | |
1368 | ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); | |
1369 | kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); | |
1370 | if (hdr->b_state == arc_l2c_only) | |
1371 | l2arc_hdr_stat_remove(); | |
1372 | hdr->b_l2hdr = NULL; | |
1373 | } | |
1374 | ||
1375 | if (!BUF_EMPTY(hdr)) { | |
1376 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
1377 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
1378 | hdr->b_birth = 0; | |
1379 | hdr->b_cksum0 = 0; | |
1380 | } | |
1381 | while (hdr->b_buf) { | |
1382 | arc_buf_t *buf = hdr->b_buf; | |
1383 | ||
1384 | if (buf->b_efunc) { | |
1385 | mutex_enter(&arc_eviction_mtx); | |
b128c09f | 1386 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f BB |
1387 | ASSERT(buf->b_hdr != NULL); |
1388 | arc_buf_destroy(hdr->b_buf, FALSE, FALSE); | |
1389 | hdr->b_buf = buf->b_next; | |
1390 | buf->b_hdr = &arc_eviction_hdr; | |
1391 | buf->b_next = arc_eviction_list; | |
1392 | arc_eviction_list = buf; | |
b128c09f | 1393 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1394 | mutex_exit(&arc_eviction_mtx); |
1395 | } else { | |
1396 | arc_buf_destroy(hdr->b_buf, FALSE, TRUE); | |
1397 | } | |
1398 | } | |
1399 | if (hdr->b_freeze_cksum != NULL) { | |
1400 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
1401 | hdr->b_freeze_cksum = NULL; | |
1402 | } | |
1403 | ||
1404 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
1405 | ASSERT3P(hdr->b_hash_next, ==, NULL); | |
1406 | ASSERT3P(hdr->b_acb, ==, NULL); | |
1407 | kmem_cache_free(hdr_cache, hdr); | |
1408 | } | |
1409 | ||
1410 | void | |
1411 | arc_buf_free(arc_buf_t *buf, void *tag) | |
1412 | { | |
1413 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1414 | int hashed = hdr->b_state != arc_anon; | |
1415 | ||
1416 | ASSERT(buf->b_efunc == NULL); | |
1417 | ASSERT(buf->b_data != NULL); | |
1418 | ||
1419 | if (hashed) { | |
1420 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1421 | ||
1422 | mutex_enter(hash_lock); | |
1423 | (void) remove_reference(hdr, hash_lock, tag); | |
1424 | if (hdr->b_datacnt > 1) | |
1425 | arc_buf_destroy(buf, FALSE, TRUE); | |
1426 | else | |
1427 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1428 | mutex_exit(hash_lock); | |
1429 | } else if (HDR_IO_IN_PROGRESS(hdr)) { | |
1430 | int destroy_hdr; | |
1431 | /* | |
1432 | * We are in the middle of an async write. Don't destroy | |
1433 | * this buffer unless the write completes before we finish | |
1434 | * decrementing the reference count. | |
1435 | */ | |
1436 | mutex_enter(&arc_eviction_mtx); | |
1437 | (void) remove_reference(hdr, NULL, tag); | |
1438 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1439 | destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); | |
1440 | mutex_exit(&arc_eviction_mtx); | |
1441 | if (destroy_hdr) | |
1442 | arc_hdr_destroy(hdr); | |
1443 | } else { | |
1444 | if (remove_reference(hdr, NULL, tag) > 0) { | |
1445 | ASSERT(HDR_IO_ERROR(hdr)); | |
1446 | arc_buf_destroy(buf, FALSE, TRUE); | |
1447 | } else { | |
1448 | arc_hdr_destroy(hdr); | |
1449 | } | |
1450 | } | |
1451 | } | |
1452 | ||
1453 | int | |
1454 | arc_buf_remove_ref(arc_buf_t *buf, void* tag) | |
1455 | { | |
1456 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1457 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1458 | int no_callback = (buf->b_efunc == NULL); | |
1459 | ||
1460 | if (hdr->b_state == arc_anon) { | |
1461 | arc_buf_free(buf, tag); | |
1462 | return (no_callback); | |
1463 | } | |
1464 | ||
1465 | mutex_enter(hash_lock); | |
1466 | ASSERT(hdr->b_state != arc_anon); | |
1467 | ASSERT(buf->b_data != NULL); | |
1468 | ||
1469 | (void) remove_reference(hdr, hash_lock, tag); | |
1470 | if (hdr->b_datacnt > 1) { | |
1471 | if (no_callback) | |
1472 | arc_buf_destroy(buf, FALSE, TRUE); | |
1473 | } else if (no_callback) { | |
1474 | ASSERT(hdr->b_buf == buf && buf->b_next == NULL); | |
1475 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1476 | } | |
1477 | ASSERT(no_callback || hdr->b_datacnt > 1 || | |
1478 | refcount_is_zero(&hdr->b_refcnt)); | |
1479 | mutex_exit(hash_lock); | |
1480 | return (no_callback); | |
1481 | } | |
1482 | ||
1483 | int | |
1484 | arc_buf_size(arc_buf_t *buf) | |
1485 | { | |
1486 | return (buf->b_hdr->b_size); | |
1487 | } | |
1488 | ||
1489 | /* | |
1490 | * Evict buffers from list until we've removed the specified number of | |
1491 | * bytes. Move the removed buffers to the appropriate evict state. | |
1492 | * If the recycle flag is set, then attempt to "recycle" a buffer: | |
1493 | * - look for a buffer to evict that is `bytes' long. | |
1494 | * - return the data block from this buffer rather than freeing it. | |
1495 | * This flag is used by callers that are trying to make space for a | |
1496 | * new buffer in a full arc cache. | |
1497 | * | |
1498 | * This function makes a "best effort". It skips over any buffers | |
1499 | * it can't get a hash_lock on, and so may not catch all candidates. | |
1500 | * It may also return without evicting as much space as requested. | |
1501 | */ | |
1502 | static void * | |
d164b209 | 1503 | arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, |
34dc7c2f BB |
1504 | arc_buf_contents_t type) |
1505 | { | |
1506 | arc_state_t *evicted_state; | |
1507 | uint64_t bytes_evicted = 0, skipped = 0, missed = 0; | |
1508 | arc_buf_hdr_t *ab, *ab_prev = NULL; | |
1509 | list_t *list = &state->arcs_list[type]; | |
1510 | kmutex_t *hash_lock; | |
1511 | boolean_t have_lock; | |
1512 | void *stolen = NULL; | |
1513 | ||
1514 | ASSERT(state == arc_mru || state == arc_mfu); | |
1515 | ||
1516 | evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
1517 | ||
1518 | mutex_enter(&state->arcs_mtx); | |
1519 | mutex_enter(&evicted_state->arcs_mtx); | |
1520 | ||
1521 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1522 | ab_prev = list_prev(list, ab); | |
1523 | /* prefetch buffers have a minimum lifespan */ | |
1524 | if (HDR_IO_IN_PROGRESS(ab) || | |
1525 | (spa && ab->b_spa != spa) || | |
1526 | (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && | |
1527 | lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { | |
1528 | skipped++; | |
1529 | continue; | |
1530 | } | |
1531 | /* "lookahead" for better eviction candidate */ | |
1532 | if (recycle && ab->b_size != bytes && | |
1533 | ab_prev && ab_prev->b_size == bytes) | |
1534 | continue; | |
1535 | hash_lock = HDR_LOCK(ab); | |
1536 | have_lock = MUTEX_HELD(hash_lock); | |
1537 | if (have_lock || mutex_tryenter(hash_lock)) { | |
1538 | ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); | |
1539 | ASSERT(ab->b_datacnt > 0); | |
1540 | while (ab->b_buf) { | |
1541 | arc_buf_t *buf = ab->b_buf; | |
b128c09f BB |
1542 | if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { |
1543 | missed += 1; | |
1544 | break; | |
1545 | } | |
34dc7c2f BB |
1546 | if (buf->b_data) { |
1547 | bytes_evicted += ab->b_size; | |
1548 | if (recycle && ab->b_type == type && | |
1549 | ab->b_size == bytes && | |
1550 | !HDR_L2_WRITING(ab)) { | |
1551 | stolen = buf->b_data; | |
1552 | recycle = FALSE; | |
1553 | } | |
1554 | } | |
1555 | if (buf->b_efunc) { | |
1556 | mutex_enter(&arc_eviction_mtx); | |
1557 | arc_buf_destroy(buf, | |
1558 | buf->b_data == stolen, FALSE); | |
1559 | ab->b_buf = buf->b_next; | |
1560 | buf->b_hdr = &arc_eviction_hdr; | |
1561 | buf->b_next = arc_eviction_list; | |
1562 | arc_eviction_list = buf; | |
1563 | mutex_exit(&arc_eviction_mtx); | |
b128c09f | 1564 | rw_exit(&buf->b_lock); |
34dc7c2f | 1565 | } else { |
b128c09f | 1566 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1567 | arc_buf_destroy(buf, |
1568 | buf->b_data == stolen, TRUE); | |
1569 | } | |
1570 | } | |
b128c09f BB |
1571 | if (ab->b_datacnt == 0) { |
1572 | arc_change_state(evicted_state, ab, hash_lock); | |
1573 | ASSERT(HDR_IN_HASH_TABLE(ab)); | |
1574 | ab->b_flags |= ARC_IN_HASH_TABLE; | |
1575 | ab->b_flags &= ~ARC_BUF_AVAILABLE; | |
1576 | DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); | |
1577 | } | |
34dc7c2f BB |
1578 | if (!have_lock) |
1579 | mutex_exit(hash_lock); | |
1580 | if (bytes >= 0 && bytes_evicted >= bytes) | |
1581 | break; | |
1582 | } else { | |
1583 | missed += 1; | |
1584 | } | |
1585 | } | |
1586 | ||
1587 | mutex_exit(&evicted_state->arcs_mtx); | |
1588 | mutex_exit(&state->arcs_mtx); | |
1589 | ||
1590 | if (bytes_evicted < bytes) | |
1591 | dprintf("only evicted %lld bytes from %x", | |
1592 | (longlong_t)bytes_evicted, state); | |
1593 | ||
1594 | if (skipped) | |
1595 | ARCSTAT_INCR(arcstat_evict_skip, skipped); | |
1596 | ||
1597 | if (missed) | |
1598 | ARCSTAT_INCR(arcstat_mutex_miss, missed); | |
1599 | ||
1600 | /* | |
1601 | * We have just evicted some date into the ghost state, make | |
1602 | * sure we also adjust the ghost state size if necessary. | |
1603 | */ | |
1604 | if (arc_no_grow && | |
1605 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { | |
1606 | int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + | |
1607 | arc_mru_ghost->arcs_size - arc_c; | |
1608 | ||
1609 | if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { | |
1610 | int64_t todelete = | |
1611 | MIN(arc_mru_ghost->arcs_lsize[type], mru_over); | |
1612 | arc_evict_ghost(arc_mru_ghost, NULL, todelete); | |
1613 | } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { | |
1614 | int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], | |
1615 | arc_mru_ghost->arcs_size + | |
1616 | arc_mfu_ghost->arcs_size - arc_c); | |
1617 | arc_evict_ghost(arc_mfu_ghost, NULL, todelete); | |
1618 | } | |
1619 | } | |
1620 | ||
1621 | return (stolen); | |
1622 | } | |
1623 | ||
1624 | /* | |
1625 | * Remove buffers from list until we've removed the specified number of | |
1626 | * bytes. Destroy the buffers that are removed. | |
1627 | */ | |
1628 | static void | |
d164b209 | 1629 | arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) |
34dc7c2f BB |
1630 | { |
1631 | arc_buf_hdr_t *ab, *ab_prev; | |
1632 | list_t *list = &state->arcs_list[ARC_BUFC_DATA]; | |
1633 | kmutex_t *hash_lock; | |
1634 | uint64_t bytes_deleted = 0; | |
1635 | uint64_t bufs_skipped = 0; | |
1636 | ||
1637 | ASSERT(GHOST_STATE(state)); | |
1638 | top: | |
1639 | mutex_enter(&state->arcs_mtx); | |
1640 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1641 | ab_prev = list_prev(list, ab); | |
1642 | if (spa && ab->b_spa != spa) | |
1643 | continue; | |
1644 | hash_lock = HDR_LOCK(ab); | |
1645 | if (mutex_tryenter(hash_lock)) { | |
1646 | ASSERT(!HDR_IO_IN_PROGRESS(ab)); | |
1647 | ASSERT(ab->b_buf == NULL); | |
1648 | ARCSTAT_BUMP(arcstat_deleted); | |
1649 | bytes_deleted += ab->b_size; | |
1650 | ||
1651 | if (ab->b_l2hdr != NULL) { | |
1652 | /* | |
1653 | * This buffer is cached on the 2nd Level ARC; | |
1654 | * don't destroy the header. | |
1655 | */ | |
1656 | arc_change_state(arc_l2c_only, ab, hash_lock); | |
1657 | mutex_exit(hash_lock); | |
1658 | } else { | |
1659 | arc_change_state(arc_anon, ab, hash_lock); | |
1660 | mutex_exit(hash_lock); | |
1661 | arc_hdr_destroy(ab); | |
1662 | } | |
1663 | ||
1664 | DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); | |
1665 | if (bytes >= 0 && bytes_deleted >= bytes) | |
1666 | break; | |
1667 | } else { | |
1668 | if (bytes < 0) { | |
1669 | mutex_exit(&state->arcs_mtx); | |
1670 | mutex_enter(hash_lock); | |
1671 | mutex_exit(hash_lock); | |
1672 | goto top; | |
1673 | } | |
1674 | bufs_skipped += 1; | |
1675 | } | |
1676 | } | |
1677 | mutex_exit(&state->arcs_mtx); | |
1678 | ||
1679 | if (list == &state->arcs_list[ARC_BUFC_DATA] && | |
1680 | (bytes < 0 || bytes_deleted < bytes)) { | |
1681 | list = &state->arcs_list[ARC_BUFC_METADATA]; | |
1682 | goto top; | |
1683 | } | |
1684 | ||
1685 | if (bufs_skipped) { | |
1686 | ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); | |
1687 | ASSERT(bytes >= 0); | |
1688 | } | |
1689 | ||
1690 | if (bytes_deleted < bytes) | |
1691 | dprintf("only deleted %lld bytes from %p", | |
1692 | (longlong_t)bytes_deleted, state); | |
1693 | } | |
1694 | ||
1695 | static void | |
1696 | arc_adjust(void) | |
1697 | { | |
d164b209 BB |
1698 | int64_t adjustment, delta; |
1699 | ||
1700 | /* | |
1701 | * Adjust MRU size | |
1702 | */ | |
34dc7c2f | 1703 | |
d164b209 BB |
1704 | adjustment = MIN(arc_size - arc_c, |
1705 | arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - arc_p); | |
34dc7c2f | 1706 | |
d164b209 BB |
1707 | if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { |
1708 | delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); | |
1709 | (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA); | |
1710 | adjustment -= delta; | |
34dc7c2f BB |
1711 | } |
1712 | ||
d164b209 BB |
1713 | if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { |
1714 | delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); | |
1715 | (void) arc_evict(arc_mru, NULL, delta, FALSE, | |
34dc7c2f | 1716 | ARC_BUFC_METADATA); |
34dc7c2f BB |
1717 | } |
1718 | ||
d164b209 BB |
1719 | /* |
1720 | * Adjust MFU size | |
1721 | */ | |
34dc7c2f | 1722 | |
d164b209 BB |
1723 | adjustment = arc_size - arc_c; |
1724 | ||
1725 | if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { | |
1726 | delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); | |
1727 | (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA); | |
1728 | adjustment -= delta; | |
34dc7c2f BB |
1729 | } |
1730 | ||
d164b209 BB |
1731 | if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { |
1732 | int64_t delta = MIN(adjustment, | |
1733 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); | |
1734 | (void) arc_evict(arc_mfu, NULL, delta, FALSE, | |
1735 | ARC_BUFC_METADATA); | |
1736 | } | |
34dc7c2f | 1737 | |
d164b209 BB |
1738 | /* |
1739 | * Adjust ghost lists | |
1740 | */ | |
34dc7c2f | 1741 | |
d164b209 BB |
1742 | adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; |
1743 | ||
1744 | if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { | |
1745 | delta = MIN(arc_mru_ghost->arcs_size, adjustment); | |
1746 | arc_evict_ghost(arc_mru_ghost, NULL, delta); | |
1747 | } | |
34dc7c2f | 1748 | |
d164b209 BB |
1749 | adjustment = |
1750 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; | |
34dc7c2f | 1751 | |
d164b209 BB |
1752 | if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { |
1753 | delta = MIN(arc_mfu_ghost->arcs_size, adjustment); | |
1754 | arc_evict_ghost(arc_mfu_ghost, NULL, delta); | |
34dc7c2f BB |
1755 | } |
1756 | } | |
1757 | ||
1758 | static void | |
1759 | arc_do_user_evicts(void) | |
1760 | { | |
1761 | mutex_enter(&arc_eviction_mtx); | |
1762 | while (arc_eviction_list != NULL) { | |
1763 | arc_buf_t *buf = arc_eviction_list; | |
1764 | arc_eviction_list = buf->b_next; | |
b128c09f | 1765 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f | 1766 | buf->b_hdr = NULL; |
b128c09f | 1767 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1768 | mutex_exit(&arc_eviction_mtx); |
1769 | ||
1770 | if (buf->b_efunc != NULL) | |
1771 | VERIFY(buf->b_efunc(buf) == 0); | |
1772 | ||
1773 | buf->b_efunc = NULL; | |
1774 | buf->b_private = NULL; | |
1775 | kmem_cache_free(buf_cache, buf); | |
1776 | mutex_enter(&arc_eviction_mtx); | |
1777 | } | |
1778 | mutex_exit(&arc_eviction_mtx); | |
1779 | } | |
1780 | ||
1781 | /* | |
1782 | * Flush all *evictable* data from the cache for the given spa. | |
1783 | * NOTE: this will not touch "active" (i.e. referenced) data. | |
1784 | */ | |
1785 | void | |
1786 | arc_flush(spa_t *spa) | |
1787 | { | |
d164b209 BB |
1788 | uint64_t guid = 0; |
1789 | ||
1790 | if (spa) | |
1791 | guid = spa_guid(spa); | |
1792 | ||
34dc7c2f | 1793 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { |
d164b209 | 1794 | (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); |
34dc7c2f BB |
1795 | if (spa) |
1796 | break; | |
1797 | } | |
1798 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { | |
d164b209 | 1799 | (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); |
34dc7c2f BB |
1800 | if (spa) |
1801 | break; | |
1802 | } | |
1803 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { | |
d164b209 | 1804 | (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); |
34dc7c2f BB |
1805 | if (spa) |
1806 | break; | |
1807 | } | |
1808 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { | |
d164b209 | 1809 | (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); |
34dc7c2f BB |
1810 | if (spa) |
1811 | break; | |
1812 | } | |
1813 | ||
d164b209 BB |
1814 | arc_evict_ghost(arc_mru_ghost, guid, -1); |
1815 | arc_evict_ghost(arc_mfu_ghost, guid, -1); | |
34dc7c2f BB |
1816 | |
1817 | mutex_enter(&arc_reclaim_thr_lock); | |
1818 | arc_do_user_evicts(); | |
1819 | mutex_exit(&arc_reclaim_thr_lock); | |
1820 | ASSERT(spa || arc_eviction_list == NULL); | |
1821 | } | |
1822 | ||
34dc7c2f BB |
1823 | void |
1824 | arc_shrink(void) | |
1825 | { | |
1826 | if (arc_c > arc_c_min) { | |
1827 | uint64_t to_free; | |
1828 | ||
1829 | #ifdef _KERNEL | |
1830 | to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); | |
1831 | #else | |
1832 | to_free = arc_c >> arc_shrink_shift; | |
1833 | #endif | |
1834 | if (arc_c > arc_c_min + to_free) | |
1835 | atomic_add_64(&arc_c, -to_free); | |
1836 | else | |
1837 | arc_c = arc_c_min; | |
1838 | ||
1839 | atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); | |
1840 | if (arc_c > arc_size) | |
1841 | arc_c = MAX(arc_size, arc_c_min); | |
1842 | if (arc_p > arc_c) | |
1843 | arc_p = (arc_c >> 1); | |
1844 | ASSERT(arc_c >= arc_c_min); | |
1845 | ASSERT((int64_t)arc_p >= 0); | |
1846 | } | |
1847 | ||
1848 | if (arc_size > arc_c) | |
1849 | arc_adjust(); | |
1850 | } | |
1851 | ||
1852 | static int | |
1853 | arc_reclaim_needed(void) | |
1854 | { | |
1855 | uint64_t extra; | |
1856 | ||
1857 | #ifdef _KERNEL | |
1858 | ||
1859 | if (needfree) | |
1860 | return (1); | |
1861 | ||
1862 | /* | |
1863 | * take 'desfree' extra pages, so we reclaim sooner, rather than later | |
1864 | */ | |
1865 | extra = desfree; | |
1866 | ||
1867 | /* | |
1868 | * check that we're out of range of the pageout scanner. It starts to | |
1869 | * schedule paging if freemem is less than lotsfree and needfree. | |
1870 | * lotsfree is the high-water mark for pageout, and needfree is the | |
1871 | * number of needed free pages. We add extra pages here to make sure | |
1872 | * the scanner doesn't start up while we're freeing memory. | |
1873 | */ | |
1874 | if (freemem < lotsfree + needfree + extra) | |
1875 | return (1); | |
1876 | ||
1877 | /* | |
1878 | * check to make sure that swapfs has enough space so that anon | |
1879 | * reservations can still succeed. anon_resvmem() checks that the | |
1880 | * availrmem is greater than swapfs_minfree, and the number of reserved | |
1881 | * swap pages. We also add a bit of extra here just to prevent | |
1882 | * circumstances from getting really dire. | |
1883 | */ | |
1884 | if (availrmem < swapfs_minfree + swapfs_reserve + extra) | |
1885 | return (1); | |
1886 | ||
1887 | #if defined(__i386) | |
1888 | /* | |
1889 | * If we're on an i386 platform, it's possible that we'll exhaust the | |
1890 | * kernel heap space before we ever run out of available physical | |
1891 | * memory. Most checks of the size of the heap_area compare against | |
1892 | * tune.t_minarmem, which is the minimum available real memory that we | |
1893 | * can have in the system. However, this is generally fixed at 25 pages | |
1894 | * which is so low that it's useless. In this comparison, we seek to | |
1895 | * calculate the total heap-size, and reclaim if more than 3/4ths of the | |
1896 | * heap is allocated. (Or, in the calculation, if less than 1/4th is | |
1897 | * free) | |
1898 | */ | |
1899 | if (btop(vmem_size(heap_arena, VMEM_FREE)) < | |
1900 | (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) | |
1901 | return (1); | |
1902 | #endif | |
1903 | ||
1904 | #else | |
1905 | if (spa_get_random(100) == 0) | |
1906 | return (1); | |
1907 | #endif | |
1908 | return (0); | |
1909 | } | |
1910 | ||
1911 | static void | |
1912 | arc_kmem_reap_now(arc_reclaim_strategy_t strat) | |
1913 | { | |
1914 | size_t i; | |
1915 | kmem_cache_t *prev_cache = NULL; | |
1916 | kmem_cache_t *prev_data_cache = NULL; | |
1917 | extern kmem_cache_t *zio_buf_cache[]; | |
1918 | extern kmem_cache_t *zio_data_buf_cache[]; | |
1919 | ||
1920 | #ifdef _KERNEL | |
1921 | if (arc_meta_used >= arc_meta_limit) { | |
1922 | /* | |
1923 | * We are exceeding our meta-data cache limit. | |
1924 | * Purge some DNLC entries to release holds on meta-data. | |
1925 | */ | |
1926 | dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); | |
1927 | } | |
1928 | #if defined(__i386) | |
1929 | /* | |
1930 | * Reclaim unused memory from all kmem caches. | |
1931 | */ | |
1932 | kmem_reap(); | |
1933 | #endif | |
1934 | #endif | |
1935 | ||
1936 | /* | |
1937 | * An aggressive reclamation will shrink the cache size as well as | |
1938 | * reap free buffers from the arc kmem caches. | |
1939 | */ | |
1940 | if (strat == ARC_RECLAIM_AGGR) | |
1941 | arc_shrink(); | |
1942 | ||
1943 | for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { | |
1944 | if (zio_buf_cache[i] != prev_cache) { | |
1945 | prev_cache = zio_buf_cache[i]; | |
1946 | kmem_cache_reap_now(zio_buf_cache[i]); | |
1947 | } | |
1948 | if (zio_data_buf_cache[i] != prev_data_cache) { | |
1949 | prev_data_cache = zio_data_buf_cache[i]; | |
1950 | kmem_cache_reap_now(zio_data_buf_cache[i]); | |
1951 | } | |
1952 | } | |
1953 | kmem_cache_reap_now(buf_cache); | |
1954 | kmem_cache_reap_now(hdr_cache); | |
1955 | } | |
1956 | ||
1957 | static void | |
1958 | arc_reclaim_thread(void) | |
1959 | { | |
1960 | clock_t growtime = 0; | |
1961 | arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; | |
1962 | callb_cpr_t cpr; | |
1963 | ||
1964 | CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); | |
1965 | ||
1966 | mutex_enter(&arc_reclaim_thr_lock); | |
1967 | while (arc_thread_exit == 0) { | |
1968 | if (arc_reclaim_needed()) { | |
1969 | ||
1970 | if (arc_no_grow) { | |
1971 | if (last_reclaim == ARC_RECLAIM_CONS) { | |
1972 | last_reclaim = ARC_RECLAIM_AGGR; | |
1973 | } else { | |
1974 | last_reclaim = ARC_RECLAIM_CONS; | |
1975 | } | |
1976 | } else { | |
1977 | arc_no_grow = TRUE; | |
1978 | last_reclaim = ARC_RECLAIM_AGGR; | |
1979 | membar_producer(); | |
1980 | } | |
1981 | ||
1982 | /* reset the growth delay for every reclaim */ | |
1983 | growtime = lbolt + (arc_grow_retry * hz); | |
1984 | ||
1985 | arc_kmem_reap_now(last_reclaim); | |
b128c09f | 1986 | arc_warm = B_TRUE; |
34dc7c2f BB |
1987 | |
1988 | } else if (arc_no_grow && lbolt >= growtime) { | |
1989 | arc_no_grow = FALSE; | |
1990 | } | |
1991 | ||
1992 | if (2 * arc_c < arc_size + | |
1993 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) | |
1994 | arc_adjust(); | |
1995 | ||
1996 | if (arc_eviction_list != NULL) | |
1997 | arc_do_user_evicts(); | |
1998 | ||
1999 | /* block until needed, or one second, whichever is shorter */ | |
2000 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
2001 | (void) cv_timedwait(&arc_reclaim_thr_cv, | |
2002 | &arc_reclaim_thr_lock, (lbolt + hz)); | |
2003 | CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); | |
2004 | } | |
2005 | ||
2006 | arc_thread_exit = 0; | |
2007 | cv_broadcast(&arc_reclaim_thr_cv); | |
2008 | CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ | |
2009 | thread_exit(); | |
2010 | } | |
2011 | ||
2012 | /* | |
2013 | * Adapt arc info given the number of bytes we are trying to add and | |
2014 | * the state that we are comming from. This function is only called | |
2015 | * when we are adding new content to the cache. | |
2016 | */ | |
2017 | static void | |
2018 | arc_adapt(int bytes, arc_state_t *state) | |
2019 | { | |
2020 | int mult; | |
d164b209 | 2021 | uint64_t arc_p_min = (arc_c >> arc_p_min_shift); |
34dc7c2f BB |
2022 | |
2023 | if (state == arc_l2c_only) | |
2024 | return; | |
2025 | ||
2026 | ASSERT(bytes > 0); | |
2027 | /* | |
2028 | * Adapt the target size of the MRU list: | |
2029 | * - if we just hit in the MRU ghost list, then increase | |
2030 | * the target size of the MRU list. | |
2031 | * - if we just hit in the MFU ghost list, then increase | |
2032 | * the target size of the MFU list by decreasing the | |
2033 | * target size of the MRU list. | |
2034 | */ | |
2035 | if (state == arc_mru_ghost) { | |
2036 | mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? | |
2037 | 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); | |
2038 | ||
d164b209 | 2039 | arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); |
34dc7c2f | 2040 | } else if (state == arc_mfu_ghost) { |
d164b209 BB |
2041 | uint64_t delta; |
2042 | ||
34dc7c2f BB |
2043 | mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? |
2044 | 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); | |
2045 | ||
d164b209 BB |
2046 | delta = MIN(bytes * mult, arc_p); |
2047 | arc_p = MAX(arc_p_min, arc_p - delta); | |
34dc7c2f BB |
2048 | } |
2049 | ASSERT((int64_t)arc_p >= 0); | |
2050 | ||
2051 | if (arc_reclaim_needed()) { | |
2052 | cv_signal(&arc_reclaim_thr_cv); | |
2053 | return; | |
2054 | } | |
2055 | ||
2056 | if (arc_no_grow) | |
2057 | return; | |
2058 | ||
2059 | if (arc_c >= arc_c_max) | |
2060 | return; | |
2061 | ||
2062 | /* | |
2063 | * If we're within (2 * maxblocksize) bytes of the target | |
2064 | * cache size, increment the target cache size | |
2065 | */ | |
2066 | if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { | |
2067 | atomic_add_64(&arc_c, (int64_t)bytes); | |
2068 | if (arc_c > arc_c_max) | |
2069 | arc_c = arc_c_max; | |
2070 | else if (state == arc_anon) | |
2071 | atomic_add_64(&arc_p, (int64_t)bytes); | |
2072 | if (arc_p > arc_c) | |
2073 | arc_p = arc_c; | |
2074 | } | |
2075 | ASSERT((int64_t)arc_p >= 0); | |
2076 | } | |
2077 | ||
2078 | /* | |
2079 | * Check if the cache has reached its limits and eviction is required | |
2080 | * prior to insert. | |
2081 | */ | |
2082 | static int | |
2083 | arc_evict_needed(arc_buf_contents_t type) | |
2084 | { | |
2085 | if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) | |
2086 | return (1); | |
2087 | ||
2088 | #ifdef _KERNEL | |
2089 | /* | |
2090 | * If zio data pages are being allocated out of a separate heap segment, | |
2091 | * then enforce that the size of available vmem for this area remains | |
2092 | * above about 1/32nd free. | |
2093 | */ | |
2094 | if (type == ARC_BUFC_DATA && zio_arena != NULL && | |
2095 | vmem_size(zio_arena, VMEM_FREE) < | |
2096 | (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) | |
2097 | return (1); | |
2098 | #endif | |
2099 | ||
2100 | if (arc_reclaim_needed()) | |
2101 | return (1); | |
2102 | ||
2103 | return (arc_size > arc_c); | |
2104 | } | |
2105 | ||
2106 | /* | |
2107 | * The buffer, supplied as the first argument, needs a data block. | |
2108 | * So, if we are at cache max, determine which cache should be victimized. | |
2109 | * We have the following cases: | |
2110 | * | |
2111 | * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> | |
2112 | * In this situation if we're out of space, but the resident size of the MFU is | |
2113 | * under the limit, victimize the MFU cache to satisfy this insertion request. | |
2114 | * | |
2115 | * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> | |
2116 | * Here, we've used up all of the available space for the MRU, so we need to | |
2117 | * evict from our own cache instead. Evict from the set of resident MRU | |
2118 | * entries. | |
2119 | * | |
2120 | * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> | |
2121 | * c minus p represents the MFU space in the cache, since p is the size of the | |
2122 | * cache that is dedicated to the MRU. In this situation there's still space on | |
2123 | * the MFU side, so the MRU side needs to be victimized. | |
2124 | * | |
2125 | * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> | |
2126 | * MFU's resident set is consuming more space than it has been allotted. In | |
2127 | * this situation, we must victimize our own cache, the MFU, for this insertion. | |
2128 | */ | |
2129 | static void | |
2130 | arc_get_data_buf(arc_buf_t *buf) | |
2131 | { | |
2132 | arc_state_t *state = buf->b_hdr->b_state; | |
2133 | uint64_t size = buf->b_hdr->b_size; | |
2134 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
2135 | ||
2136 | arc_adapt(size, state); | |
2137 | ||
2138 | /* | |
2139 | * We have not yet reached cache maximum size, | |
2140 | * just allocate a new buffer. | |
2141 | */ | |
2142 | if (!arc_evict_needed(type)) { | |
2143 | if (type == ARC_BUFC_METADATA) { | |
2144 | buf->b_data = zio_buf_alloc(size); | |
d164b209 | 2145 | arc_space_consume(size, ARC_SPACE_DATA); |
34dc7c2f BB |
2146 | } else { |
2147 | ASSERT(type == ARC_BUFC_DATA); | |
2148 | buf->b_data = zio_data_buf_alloc(size); | |
d164b209 | 2149 | ARCSTAT_INCR(arcstat_data_size, size); |
34dc7c2f BB |
2150 | atomic_add_64(&arc_size, size); |
2151 | } | |
2152 | goto out; | |
2153 | } | |
2154 | ||
2155 | /* | |
2156 | * If we are prefetching from the mfu ghost list, this buffer | |
2157 | * will end up on the mru list; so steal space from there. | |
2158 | */ | |
2159 | if (state == arc_mfu_ghost) | |
2160 | state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; | |
2161 | else if (state == arc_mru_ghost) | |
2162 | state = arc_mru; | |
2163 | ||
2164 | if (state == arc_mru || state == arc_anon) { | |
2165 | uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; | |
d164b209 | 2166 | state = (arc_mfu->arcs_lsize[type] >= size && |
34dc7c2f BB |
2167 | arc_p > mru_used) ? arc_mfu : arc_mru; |
2168 | } else { | |
2169 | /* MFU cases */ | |
2170 | uint64_t mfu_space = arc_c - arc_p; | |
d164b209 | 2171 | state = (arc_mru->arcs_lsize[type] >= size && |
34dc7c2f BB |
2172 | mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; |
2173 | } | |
2174 | if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { | |
2175 | if (type == ARC_BUFC_METADATA) { | |
2176 | buf->b_data = zio_buf_alloc(size); | |
d164b209 | 2177 | arc_space_consume(size, ARC_SPACE_DATA); |
34dc7c2f BB |
2178 | } else { |
2179 | ASSERT(type == ARC_BUFC_DATA); | |
2180 | buf->b_data = zio_data_buf_alloc(size); | |
d164b209 | 2181 | ARCSTAT_INCR(arcstat_data_size, size); |
34dc7c2f BB |
2182 | atomic_add_64(&arc_size, size); |
2183 | } | |
2184 | ARCSTAT_BUMP(arcstat_recycle_miss); | |
2185 | } | |
2186 | ASSERT(buf->b_data != NULL); | |
2187 | out: | |
2188 | /* | |
2189 | * Update the state size. Note that ghost states have a | |
2190 | * "ghost size" and so don't need to be updated. | |
2191 | */ | |
2192 | if (!GHOST_STATE(buf->b_hdr->b_state)) { | |
2193 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2194 | ||
2195 | atomic_add_64(&hdr->b_state->arcs_size, size); | |
2196 | if (list_link_active(&hdr->b_arc_node)) { | |
2197 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2198 | atomic_add_64(&hdr->b_state->arcs_lsize[type], size); | |
2199 | } | |
2200 | /* | |
2201 | * If we are growing the cache, and we are adding anonymous | |
2202 | * data, and we have outgrown arc_p, update arc_p | |
2203 | */ | |
2204 | if (arc_size < arc_c && hdr->b_state == arc_anon && | |
2205 | arc_anon->arcs_size + arc_mru->arcs_size > arc_p) | |
2206 | arc_p = MIN(arc_c, arc_p + size); | |
2207 | } | |
2208 | } | |
2209 | ||
2210 | /* | |
2211 | * This routine is called whenever a buffer is accessed. | |
2212 | * NOTE: the hash lock is dropped in this function. | |
2213 | */ | |
2214 | static void | |
2215 | arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) | |
2216 | { | |
2217 | ASSERT(MUTEX_HELD(hash_lock)); | |
2218 | ||
2219 | if (buf->b_state == arc_anon) { | |
2220 | /* | |
2221 | * This buffer is not in the cache, and does not | |
2222 | * appear in our "ghost" list. Add the new buffer | |
2223 | * to the MRU state. | |
2224 | */ | |
2225 | ||
2226 | ASSERT(buf->b_arc_access == 0); | |
2227 | buf->b_arc_access = lbolt; | |
2228 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2229 | arc_change_state(arc_mru, buf, hash_lock); | |
2230 | ||
2231 | } else if (buf->b_state == arc_mru) { | |
2232 | /* | |
2233 | * If this buffer is here because of a prefetch, then either: | |
2234 | * - clear the flag if this is a "referencing" read | |
2235 | * (any subsequent access will bump this into the MFU state). | |
2236 | * or | |
2237 | * - move the buffer to the head of the list if this is | |
2238 | * another prefetch (to make it less likely to be evicted). | |
2239 | */ | |
2240 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2241 | if (refcount_count(&buf->b_refcnt) == 0) { | |
2242 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2243 | } else { | |
2244 | buf->b_flags &= ~ARC_PREFETCH; | |
2245 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2246 | } | |
2247 | buf->b_arc_access = lbolt; | |
2248 | return; | |
2249 | } | |
2250 | ||
2251 | /* | |
2252 | * This buffer has been "accessed" only once so far, | |
2253 | * but it is still in the cache. Move it to the MFU | |
2254 | * state. | |
2255 | */ | |
2256 | if (lbolt > buf->b_arc_access + ARC_MINTIME) { | |
2257 | /* | |
2258 | * More than 125ms have passed since we | |
2259 | * instantiated this buffer. Move it to the | |
2260 | * most frequently used state. | |
2261 | */ | |
2262 | buf->b_arc_access = lbolt; | |
2263 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2264 | arc_change_state(arc_mfu, buf, hash_lock); | |
2265 | } | |
2266 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2267 | } else if (buf->b_state == arc_mru_ghost) { | |
2268 | arc_state_t *new_state; | |
2269 | /* | |
2270 | * This buffer has been "accessed" recently, but | |
2271 | * was evicted from the cache. Move it to the | |
2272 | * MFU state. | |
2273 | */ | |
2274 | ||
2275 | if (buf->b_flags & ARC_PREFETCH) { | |
2276 | new_state = arc_mru; | |
2277 | if (refcount_count(&buf->b_refcnt) > 0) | |
2278 | buf->b_flags &= ~ARC_PREFETCH; | |
2279 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2280 | } else { | |
2281 | new_state = arc_mfu; | |
2282 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2283 | } | |
2284 | ||
2285 | buf->b_arc_access = lbolt; | |
2286 | arc_change_state(new_state, buf, hash_lock); | |
2287 | ||
2288 | ARCSTAT_BUMP(arcstat_mru_ghost_hits); | |
2289 | } else if (buf->b_state == arc_mfu) { | |
2290 | /* | |
2291 | * This buffer has been accessed more than once and is | |
2292 | * still in the cache. Keep it in the MFU state. | |
2293 | * | |
2294 | * NOTE: an add_reference() that occurred when we did | |
2295 | * the arc_read() will have kicked this off the list. | |
2296 | * If it was a prefetch, we will explicitly move it to | |
2297 | * the head of the list now. | |
2298 | */ | |
2299 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2300 | ASSERT(refcount_count(&buf->b_refcnt) == 0); | |
2301 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2302 | } | |
2303 | ARCSTAT_BUMP(arcstat_mfu_hits); | |
2304 | buf->b_arc_access = lbolt; | |
2305 | } else if (buf->b_state == arc_mfu_ghost) { | |
2306 | arc_state_t *new_state = arc_mfu; | |
2307 | /* | |
2308 | * This buffer has been accessed more than once but has | |
2309 | * been evicted from the cache. Move it back to the | |
2310 | * MFU state. | |
2311 | */ | |
2312 | ||
2313 | if (buf->b_flags & ARC_PREFETCH) { | |
2314 | /* | |
2315 | * This is a prefetch access... | |
2316 | * move this block back to the MRU state. | |
2317 | */ | |
2318 | ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); | |
2319 | new_state = arc_mru; | |
2320 | } | |
2321 | ||
2322 | buf->b_arc_access = lbolt; | |
2323 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2324 | arc_change_state(new_state, buf, hash_lock); | |
2325 | ||
2326 | ARCSTAT_BUMP(arcstat_mfu_ghost_hits); | |
2327 | } else if (buf->b_state == arc_l2c_only) { | |
2328 | /* | |
2329 | * This buffer is on the 2nd Level ARC. | |
2330 | */ | |
2331 | ||
2332 | buf->b_arc_access = lbolt; | |
2333 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2334 | arc_change_state(arc_mfu, buf, hash_lock); | |
2335 | } else { | |
2336 | ASSERT(!"invalid arc state"); | |
2337 | } | |
2338 | } | |
2339 | ||
2340 | /* a generic arc_done_func_t which you can use */ | |
2341 | /* ARGSUSED */ | |
2342 | void | |
2343 | arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2344 | { | |
2345 | bcopy(buf->b_data, arg, buf->b_hdr->b_size); | |
2346 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2347 | } | |
2348 | ||
2349 | /* a generic arc_done_func_t */ | |
2350 | void | |
2351 | arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2352 | { | |
2353 | arc_buf_t **bufp = arg; | |
2354 | if (zio && zio->io_error) { | |
2355 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2356 | *bufp = NULL; | |
2357 | } else { | |
2358 | *bufp = buf; | |
2359 | } | |
2360 | } | |
2361 | ||
2362 | static void | |
2363 | arc_read_done(zio_t *zio) | |
2364 | { | |
2365 | arc_buf_hdr_t *hdr, *found; | |
2366 | arc_buf_t *buf; | |
2367 | arc_buf_t *abuf; /* buffer we're assigning to callback */ | |
2368 | kmutex_t *hash_lock; | |
2369 | arc_callback_t *callback_list, *acb; | |
2370 | int freeable = FALSE; | |
2371 | ||
2372 | buf = zio->io_private; | |
2373 | hdr = buf->b_hdr; | |
2374 | ||
2375 | /* | |
2376 | * The hdr was inserted into hash-table and removed from lists | |
2377 | * prior to starting I/O. We should find this header, since | |
2378 | * it's in the hash table, and it should be legit since it's | |
2379 | * not possible to evict it during the I/O. The only possible | |
2380 | * reason for it not to be found is if we were freed during the | |
2381 | * read. | |
2382 | */ | |
d164b209 | 2383 | found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, |
34dc7c2f BB |
2384 | &hash_lock); |
2385 | ||
2386 | ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || | |
2387 | (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || | |
2388 | (found == hdr && HDR_L2_READING(hdr))); | |
2389 | ||
b128c09f | 2390 | hdr->b_flags &= ~ARC_L2_EVICTED; |
34dc7c2f | 2391 | if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) |
b128c09f | 2392 | hdr->b_flags &= ~ARC_L2CACHE; |
34dc7c2f BB |
2393 | |
2394 | /* byteswap if necessary */ | |
2395 | callback_list = hdr->b_acb; | |
2396 | ASSERT(callback_list != NULL); | |
b128c09f BB |
2397 | if (BP_SHOULD_BYTESWAP(zio->io_bp)) { |
2398 | arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? | |
2399 | byteswap_uint64_array : | |
2400 | dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; | |
2401 | func(buf->b_data, hdr->b_size); | |
2402 | } | |
34dc7c2f BB |
2403 | |
2404 | arc_cksum_compute(buf, B_FALSE); | |
2405 | ||
2406 | /* create copies of the data buffer for the callers */ | |
2407 | abuf = buf; | |
2408 | for (acb = callback_list; acb; acb = acb->acb_next) { | |
2409 | if (acb->acb_done) { | |
2410 | if (abuf == NULL) | |
2411 | abuf = arc_buf_clone(buf); | |
2412 | acb->acb_buf = abuf; | |
2413 | abuf = NULL; | |
2414 | } | |
2415 | } | |
2416 | hdr->b_acb = NULL; | |
2417 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2418 | ASSERT(!HDR_BUF_AVAILABLE(hdr)); | |
2419 | if (abuf == buf) | |
2420 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
2421 | ||
2422 | ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); | |
2423 | ||
2424 | if (zio->io_error != 0) { | |
2425 | hdr->b_flags |= ARC_IO_ERROR; | |
2426 | if (hdr->b_state != arc_anon) | |
2427 | arc_change_state(arc_anon, hdr, hash_lock); | |
2428 | if (HDR_IN_HASH_TABLE(hdr)) | |
2429 | buf_hash_remove(hdr); | |
2430 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
34dc7c2f BB |
2431 | } |
2432 | ||
2433 | /* | |
2434 | * Broadcast before we drop the hash_lock to avoid the possibility | |
2435 | * that the hdr (and hence the cv) might be freed before we get to | |
2436 | * the cv_broadcast(). | |
2437 | */ | |
2438 | cv_broadcast(&hdr->b_cv); | |
2439 | ||
2440 | if (hash_lock) { | |
2441 | /* | |
2442 | * Only call arc_access on anonymous buffers. This is because | |
2443 | * if we've issued an I/O for an evicted buffer, we've already | |
2444 | * called arc_access (to prevent any simultaneous readers from | |
2445 | * getting confused). | |
2446 | */ | |
2447 | if (zio->io_error == 0 && hdr->b_state == arc_anon) | |
2448 | arc_access(hdr, hash_lock); | |
2449 | mutex_exit(hash_lock); | |
2450 | } else { | |
2451 | /* | |
2452 | * This block was freed while we waited for the read to | |
2453 | * complete. It has been removed from the hash table and | |
2454 | * moved to the anonymous state (so that it won't show up | |
2455 | * in the cache). | |
2456 | */ | |
2457 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
2458 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
2459 | } | |
2460 | ||
2461 | /* execute each callback and free its structure */ | |
2462 | while ((acb = callback_list) != NULL) { | |
2463 | if (acb->acb_done) | |
2464 | acb->acb_done(zio, acb->acb_buf, acb->acb_private); | |
2465 | ||
2466 | if (acb->acb_zio_dummy != NULL) { | |
2467 | acb->acb_zio_dummy->io_error = zio->io_error; | |
2468 | zio_nowait(acb->acb_zio_dummy); | |
2469 | } | |
2470 | ||
2471 | callback_list = acb->acb_next; | |
2472 | kmem_free(acb, sizeof (arc_callback_t)); | |
2473 | } | |
2474 | ||
2475 | if (freeable) | |
2476 | arc_hdr_destroy(hdr); | |
2477 | } | |
2478 | ||
2479 | /* | |
2480 | * "Read" the block block at the specified DVA (in bp) via the | |
2481 | * cache. If the block is found in the cache, invoke the provided | |
2482 | * callback immediately and return. Note that the `zio' parameter | |
2483 | * in the callback will be NULL in this case, since no IO was | |
2484 | * required. If the block is not in the cache pass the read request | |
2485 | * on to the spa with a substitute callback function, so that the | |
2486 | * requested block will be added to the cache. | |
2487 | * | |
2488 | * If a read request arrives for a block that has a read in-progress, | |
2489 | * either wait for the in-progress read to complete (and return the | |
2490 | * results); or, if this is a read with a "done" func, add a record | |
2491 | * to the read to invoke the "done" func when the read completes, | |
2492 | * and return; or just return. | |
2493 | * | |
2494 | * arc_read_done() will invoke all the requested "done" functions | |
2495 | * for readers of this block. | |
b128c09f BB |
2496 | * |
2497 | * Normal callers should use arc_read and pass the arc buffer and offset | |
2498 | * for the bp. But if you know you don't need locking, you can use | |
2499 | * arc_read_bp. | |
34dc7c2f BB |
2500 | */ |
2501 | int | |
b128c09f BB |
2502 | arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, |
2503 | arc_done_func_t *done, void *private, int priority, int zio_flags, | |
2504 | uint32_t *arc_flags, const zbookmark_t *zb) | |
2505 | { | |
2506 | int err; | |
2507 | arc_buf_hdr_t *hdr = pbuf->b_hdr; | |
2508 | ||
2509 | ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); | |
2510 | ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); | |
2511 | rw_enter(&pbuf->b_lock, RW_READER); | |
2512 | ||
2513 | err = arc_read_nolock(pio, spa, bp, done, private, priority, | |
2514 | zio_flags, arc_flags, zb); | |
2515 | ||
2516 | ASSERT3P(hdr, ==, pbuf->b_hdr); | |
2517 | rw_exit(&pbuf->b_lock); | |
2518 | return (err); | |
2519 | } | |
2520 | ||
2521 | int | |
2522 | arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, | |
2523 | arc_done_func_t *done, void *private, int priority, int zio_flags, | |
2524 | uint32_t *arc_flags, const zbookmark_t *zb) | |
34dc7c2f BB |
2525 | { |
2526 | arc_buf_hdr_t *hdr; | |
2527 | arc_buf_t *buf; | |
2528 | kmutex_t *hash_lock; | |
2529 | zio_t *rzio; | |
d164b209 | 2530 | uint64_t guid = spa_guid(spa); |
34dc7c2f BB |
2531 | |
2532 | top: | |
d164b209 | 2533 | hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); |
34dc7c2f BB |
2534 | if (hdr && hdr->b_datacnt > 0) { |
2535 | ||
2536 | *arc_flags |= ARC_CACHED; | |
2537 | ||
2538 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
2539 | ||
2540 | if (*arc_flags & ARC_WAIT) { | |
2541 | cv_wait(&hdr->b_cv, hash_lock); | |
2542 | mutex_exit(hash_lock); | |
2543 | goto top; | |
2544 | } | |
2545 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2546 | ||
2547 | if (done) { | |
2548 | arc_callback_t *acb = NULL; | |
2549 | ||
2550 | acb = kmem_zalloc(sizeof (arc_callback_t), | |
2551 | KM_SLEEP); | |
2552 | acb->acb_done = done; | |
2553 | acb->acb_private = private; | |
34dc7c2f BB |
2554 | if (pio != NULL) |
2555 | acb->acb_zio_dummy = zio_null(pio, | |
d164b209 | 2556 | spa, NULL, NULL, NULL, zio_flags); |
34dc7c2f BB |
2557 | |
2558 | ASSERT(acb->acb_done != NULL); | |
2559 | acb->acb_next = hdr->b_acb; | |
2560 | hdr->b_acb = acb; | |
2561 | add_reference(hdr, hash_lock, private); | |
2562 | mutex_exit(hash_lock); | |
2563 | return (0); | |
2564 | } | |
2565 | mutex_exit(hash_lock); | |
2566 | return (0); | |
2567 | } | |
2568 | ||
2569 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2570 | ||
2571 | if (done) { | |
2572 | add_reference(hdr, hash_lock, private); | |
2573 | /* | |
2574 | * If this block is already in use, create a new | |
2575 | * copy of the data so that we will be guaranteed | |
2576 | * that arc_release() will always succeed. | |
2577 | */ | |
2578 | buf = hdr->b_buf; | |
2579 | ASSERT(buf); | |
2580 | ASSERT(buf->b_data); | |
2581 | if (HDR_BUF_AVAILABLE(hdr)) { | |
2582 | ASSERT(buf->b_efunc == NULL); | |
2583 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2584 | } else { | |
2585 | buf = arc_buf_clone(buf); | |
2586 | } | |
2587 | } else if (*arc_flags & ARC_PREFETCH && | |
2588 | refcount_count(&hdr->b_refcnt) == 0) { | |
2589 | hdr->b_flags |= ARC_PREFETCH; | |
2590 | } | |
2591 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); | |
2592 | arc_access(hdr, hash_lock); | |
b128c09f BB |
2593 | if (*arc_flags & ARC_L2CACHE) |
2594 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2595 | mutex_exit(hash_lock); |
2596 | ARCSTAT_BUMP(arcstat_hits); | |
2597 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2598 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2599 | data, metadata, hits); | |
2600 | ||
2601 | if (done) | |
2602 | done(NULL, buf, private); | |
2603 | } else { | |
2604 | uint64_t size = BP_GET_LSIZE(bp); | |
2605 | arc_callback_t *acb; | |
b128c09f BB |
2606 | vdev_t *vd = NULL; |
2607 | daddr_t addr; | |
d164b209 | 2608 | boolean_t devw = B_FALSE; |
34dc7c2f BB |
2609 | |
2610 | if (hdr == NULL) { | |
2611 | /* this block is not in the cache */ | |
2612 | arc_buf_hdr_t *exists; | |
2613 | arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); | |
2614 | buf = arc_buf_alloc(spa, size, private, type); | |
2615 | hdr = buf->b_hdr; | |
2616 | hdr->b_dva = *BP_IDENTITY(bp); | |
2617 | hdr->b_birth = bp->blk_birth; | |
2618 | hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; | |
2619 | exists = buf_hash_insert(hdr, &hash_lock); | |
2620 | if (exists) { | |
2621 | /* somebody beat us to the hash insert */ | |
2622 | mutex_exit(hash_lock); | |
2623 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
2624 | hdr->b_birth = 0; | |
2625 | hdr->b_cksum0 = 0; | |
2626 | (void) arc_buf_remove_ref(buf, private); | |
2627 | goto top; /* restart the IO request */ | |
2628 | } | |
2629 | /* if this is a prefetch, we don't have a reference */ | |
2630 | if (*arc_flags & ARC_PREFETCH) { | |
2631 | (void) remove_reference(hdr, hash_lock, | |
2632 | private); | |
2633 | hdr->b_flags |= ARC_PREFETCH; | |
2634 | } | |
b128c09f BB |
2635 | if (*arc_flags & ARC_L2CACHE) |
2636 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2637 | if (BP_GET_LEVEL(bp) > 0) |
2638 | hdr->b_flags |= ARC_INDIRECT; | |
2639 | } else { | |
2640 | /* this block is in the ghost cache */ | |
2641 | ASSERT(GHOST_STATE(hdr->b_state)); | |
2642 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2643 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); | |
2644 | ASSERT(hdr->b_buf == NULL); | |
2645 | ||
2646 | /* if this is a prefetch, we don't have a reference */ | |
2647 | if (*arc_flags & ARC_PREFETCH) | |
2648 | hdr->b_flags |= ARC_PREFETCH; | |
2649 | else | |
2650 | add_reference(hdr, hash_lock, private); | |
b128c09f BB |
2651 | if (*arc_flags & ARC_L2CACHE) |
2652 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2653 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
2654 | buf->b_hdr = hdr; | |
2655 | buf->b_data = NULL; | |
2656 | buf->b_efunc = NULL; | |
2657 | buf->b_private = NULL; | |
2658 | buf->b_next = NULL; | |
2659 | hdr->b_buf = buf; | |
2660 | arc_get_data_buf(buf); | |
2661 | ASSERT(hdr->b_datacnt == 0); | |
2662 | hdr->b_datacnt = 1; | |
2663 | ||
2664 | } | |
2665 | ||
2666 | acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); | |
2667 | acb->acb_done = done; | |
2668 | acb->acb_private = private; | |
34dc7c2f BB |
2669 | |
2670 | ASSERT(hdr->b_acb == NULL); | |
2671 | hdr->b_acb = acb; | |
2672 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
2673 | ||
2674 | /* | |
2675 | * If the buffer has been evicted, migrate it to a present state | |
2676 | * before issuing the I/O. Once we drop the hash-table lock, | |
2677 | * the header will be marked as I/O in progress and have an | |
2678 | * attached buffer. At this point, anybody who finds this | |
2679 | * buffer ought to notice that it's legit but has a pending I/O. | |
2680 | */ | |
2681 | ||
2682 | if (GHOST_STATE(hdr->b_state)) | |
2683 | arc_access(hdr, hash_lock); | |
2684 | ||
b128c09f BB |
2685 | if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && |
2686 | (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { | |
d164b209 | 2687 | devw = hdr->b_l2hdr->b_dev->l2ad_writing; |
b128c09f BB |
2688 | addr = hdr->b_l2hdr->b_daddr; |
2689 | /* | |
2690 | * Lock out device removal. | |
2691 | */ | |
2692 | if (vdev_is_dead(vd) || | |
2693 | !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) | |
2694 | vd = NULL; | |
2695 | } | |
2696 | ||
2697 | mutex_exit(hash_lock); | |
2698 | ||
34dc7c2f BB |
2699 | ASSERT3U(hdr->b_size, ==, size); |
2700 | DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, | |
2701 | zbookmark_t *, zb); | |
2702 | ARCSTAT_BUMP(arcstat_misses); | |
2703 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2704 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2705 | data, metadata, misses); | |
2706 | ||
d164b209 | 2707 | if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { |
34dc7c2f BB |
2708 | /* |
2709 | * Read from the L2ARC if the following are true: | |
b128c09f BB |
2710 | * 1. The L2ARC vdev was previously cached. |
2711 | * 2. This buffer still has L2ARC metadata. | |
2712 | * 3. This buffer isn't currently writing to the L2ARC. | |
2713 | * 4. The L2ARC entry wasn't evicted, which may | |
2714 | * also have invalidated the vdev. | |
d164b209 | 2715 | * 5. This isn't prefetch and l2arc_noprefetch is set. |
34dc7c2f | 2716 | */ |
b128c09f | 2717 | if (hdr->b_l2hdr != NULL && |
d164b209 BB |
2718 | !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && |
2719 | !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { | |
34dc7c2f BB |
2720 | l2arc_read_callback_t *cb; |
2721 | ||
2722 | DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); | |
2723 | ARCSTAT_BUMP(arcstat_l2_hits); | |
2724 | ||
34dc7c2f BB |
2725 | cb = kmem_zalloc(sizeof (l2arc_read_callback_t), |
2726 | KM_SLEEP); | |
2727 | cb->l2rcb_buf = buf; | |
2728 | cb->l2rcb_spa = spa; | |
2729 | cb->l2rcb_bp = *bp; | |
2730 | cb->l2rcb_zb = *zb; | |
b128c09f | 2731 | cb->l2rcb_flags = zio_flags; |
34dc7c2f BB |
2732 | |
2733 | /* | |
b128c09f BB |
2734 | * l2arc read. The SCL_L2ARC lock will be |
2735 | * released by l2arc_read_done(). | |
34dc7c2f BB |
2736 | */ |
2737 | rzio = zio_read_phys(pio, vd, addr, size, | |
2738 | buf->b_data, ZIO_CHECKSUM_OFF, | |
b128c09f BB |
2739 | l2arc_read_done, cb, priority, zio_flags | |
2740 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | | |
2741 | ZIO_FLAG_DONT_PROPAGATE | | |
2742 | ZIO_FLAG_DONT_RETRY, B_FALSE); | |
34dc7c2f BB |
2743 | DTRACE_PROBE2(l2arc__read, vdev_t *, vd, |
2744 | zio_t *, rzio); | |
d164b209 | 2745 | ARCSTAT_INCR(arcstat_l2_read_bytes, size); |
34dc7c2f | 2746 | |
b128c09f BB |
2747 | if (*arc_flags & ARC_NOWAIT) { |
2748 | zio_nowait(rzio); | |
2749 | return (0); | |
2750 | } | |
34dc7c2f | 2751 | |
b128c09f BB |
2752 | ASSERT(*arc_flags & ARC_WAIT); |
2753 | if (zio_wait(rzio) == 0) | |
2754 | return (0); | |
2755 | ||
2756 | /* l2arc read error; goto zio_read() */ | |
34dc7c2f BB |
2757 | } else { |
2758 | DTRACE_PROBE1(l2arc__miss, | |
2759 | arc_buf_hdr_t *, hdr); | |
2760 | ARCSTAT_BUMP(arcstat_l2_misses); | |
2761 | if (HDR_L2_WRITING(hdr)) | |
2762 | ARCSTAT_BUMP(arcstat_l2_rw_clash); | |
b128c09f | 2763 | spa_config_exit(spa, SCL_L2ARC, vd); |
34dc7c2f | 2764 | } |
d164b209 BB |
2765 | } else { |
2766 | if (vd != NULL) | |
2767 | spa_config_exit(spa, SCL_L2ARC, vd); | |
2768 | if (l2arc_ndev != 0) { | |
2769 | DTRACE_PROBE1(l2arc__miss, | |
2770 | arc_buf_hdr_t *, hdr); | |
2771 | ARCSTAT_BUMP(arcstat_l2_misses); | |
2772 | } | |
34dc7c2f | 2773 | } |
34dc7c2f BB |
2774 | |
2775 | rzio = zio_read(pio, spa, bp, buf->b_data, size, | |
b128c09f | 2776 | arc_read_done, buf, priority, zio_flags, zb); |
34dc7c2f BB |
2777 | |
2778 | if (*arc_flags & ARC_WAIT) | |
2779 | return (zio_wait(rzio)); | |
2780 | ||
2781 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2782 | zio_nowait(rzio); | |
2783 | } | |
2784 | return (0); | |
2785 | } | |
2786 | ||
2787 | /* | |
2788 | * arc_read() variant to support pool traversal. If the block is already | |
2789 | * in the ARC, make a copy of it; otherwise, the caller will do the I/O. | |
2790 | * The idea is that we don't want pool traversal filling up memory, but | |
2791 | * if the ARC already has the data anyway, we shouldn't pay for the I/O. | |
2792 | */ | |
2793 | int | |
2794 | arc_tryread(spa_t *spa, blkptr_t *bp, void *data) | |
2795 | { | |
2796 | arc_buf_hdr_t *hdr; | |
2797 | kmutex_t *hash_mtx; | |
d164b209 | 2798 | uint64_t guid = spa_guid(spa); |
34dc7c2f BB |
2799 | int rc = 0; |
2800 | ||
d164b209 | 2801 | hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); |
34dc7c2f BB |
2802 | |
2803 | if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { | |
2804 | arc_buf_t *buf = hdr->b_buf; | |
2805 | ||
2806 | ASSERT(buf); | |
2807 | while (buf->b_data == NULL) { | |
2808 | buf = buf->b_next; | |
2809 | ASSERT(buf); | |
2810 | } | |
2811 | bcopy(buf->b_data, data, hdr->b_size); | |
2812 | } else { | |
2813 | rc = ENOENT; | |
2814 | } | |
2815 | ||
2816 | if (hash_mtx) | |
2817 | mutex_exit(hash_mtx); | |
2818 | ||
2819 | return (rc); | |
2820 | } | |
2821 | ||
2822 | void | |
2823 | arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) | |
2824 | { | |
2825 | ASSERT(buf->b_hdr != NULL); | |
2826 | ASSERT(buf->b_hdr->b_state != arc_anon); | |
2827 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); | |
2828 | buf->b_efunc = func; | |
2829 | buf->b_private = private; | |
2830 | } | |
2831 | ||
2832 | /* | |
2833 | * This is used by the DMU to let the ARC know that a buffer is | |
2834 | * being evicted, so the ARC should clean up. If this arc buf | |
2835 | * is not yet in the evicted state, it will be put there. | |
2836 | */ | |
2837 | int | |
2838 | arc_buf_evict(arc_buf_t *buf) | |
2839 | { | |
2840 | arc_buf_hdr_t *hdr; | |
2841 | kmutex_t *hash_lock; | |
2842 | arc_buf_t **bufp; | |
2843 | ||
b128c09f | 2844 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f BB |
2845 | hdr = buf->b_hdr; |
2846 | if (hdr == NULL) { | |
2847 | /* | |
2848 | * We are in arc_do_user_evicts(). | |
2849 | */ | |
2850 | ASSERT(buf->b_data == NULL); | |
b128c09f | 2851 | rw_exit(&buf->b_lock); |
34dc7c2f | 2852 | return (0); |
b128c09f BB |
2853 | } else if (buf->b_data == NULL) { |
2854 | arc_buf_t copy = *buf; /* structure assignment */ | |
34dc7c2f | 2855 | /* |
b128c09f BB |
2856 | * We are on the eviction list; process this buffer now |
2857 | * but let arc_do_user_evicts() do the reaping. | |
34dc7c2f | 2858 | */ |
b128c09f BB |
2859 | buf->b_efunc = NULL; |
2860 | rw_exit(&buf->b_lock); | |
2861 | VERIFY(copy.b_efunc(©) == 0); | |
2862 | return (1); | |
34dc7c2f | 2863 | } |
b128c09f BB |
2864 | hash_lock = HDR_LOCK(hdr); |
2865 | mutex_enter(hash_lock); | |
34dc7c2f BB |
2866 | |
2867 | ASSERT(buf->b_hdr == hdr); | |
2868 | ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); | |
2869 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2870 | ||
2871 | /* | |
2872 | * Pull this buffer off of the hdr | |
2873 | */ | |
2874 | bufp = &hdr->b_buf; | |
2875 | while (*bufp != buf) | |
2876 | bufp = &(*bufp)->b_next; | |
2877 | *bufp = buf->b_next; | |
2878 | ||
2879 | ASSERT(buf->b_data != NULL); | |
2880 | arc_buf_destroy(buf, FALSE, FALSE); | |
2881 | ||
2882 | if (hdr->b_datacnt == 0) { | |
2883 | arc_state_t *old_state = hdr->b_state; | |
2884 | arc_state_t *evicted_state; | |
2885 | ||
2886 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2887 | ||
2888 | evicted_state = | |
2889 | (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
2890 | ||
2891 | mutex_enter(&old_state->arcs_mtx); | |
2892 | mutex_enter(&evicted_state->arcs_mtx); | |
2893 | ||
2894 | arc_change_state(evicted_state, hdr, hash_lock); | |
2895 | ASSERT(HDR_IN_HASH_TABLE(hdr)); | |
2896 | hdr->b_flags |= ARC_IN_HASH_TABLE; | |
2897 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2898 | ||
2899 | mutex_exit(&evicted_state->arcs_mtx); | |
2900 | mutex_exit(&old_state->arcs_mtx); | |
2901 | } | |
2902 | mutex_exit(hash_lock); | |
b128c09f | 2903 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2904 | |
2905 | VERIFY(buf->b_efunc(buf) == 0); | |
2906 | buf->b_efunc = NULL; | |
2907 | buf->b_private = NULL; | |
2908 | buf->b_hdr = NULL; | |
2909 | kmem_cache_free(buf_cache, buf); | |
2910 | return (1); | |
2911 | } | |
2912 | ||
2913 | /* | |
2914 | * Release this buffer from the cache. This must be done | |
2915 | * after a read and prior to modifying the buffer contents. | |
2916 | * If the buffer has more than one reference, we must make | |
b128c09f | 2917 | * a new hdr for the buffer. |
34dc7c2f BB |
2918 | */ |
2919 | void | |
2920 | arc_release(arc_buf_t *buf, void *tag) | |
2921 | { | |
b128c09f BB |
2922 | arc_buf_hdr_t *hdr; |
2923 | kmutex_t *hash_lock; | |
2924 | l2arc_buf_hdr_t *l2hdr; | |
34dc7c2f BB |
2925 | uint64_t buf_size; |
2926 | ||
b128c09f BB |
2927 | rw_enter(&buf->b_lock, RW_WRITER); |
2928 | hdr = buf->b_hdr; | |
2929 | ||
34dc7c2f BB |
2930 | /* this buffer is not on any list */ |
2931 | ASSERT(refcount_count(&hdr->b_refcnt) > 0); | |
b128c09f | 2932 | ASSERT(!(hdr->b_flags & ARC_STORED)); |
34dc7c2f BB |
2933 | |
2934 | if (hdr->b_state == arc_anon) { | |
2935 | /* this buffer is already released */ | |
2936 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); | |
2937 | ASSERT(BUF_EMPTY(hdr)); | |
2938 | ASSERT(buf->b_efunc == NULL); | |
2939 | arc_buf_thaw(buf); | |
b128c09f | 2940 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2941 | return; |
2942 | } | |
2943 | ||
b128c09f | 2944 | hash_lock = HDR_LOCK(hdr); |
34dc7c2f BB |
2945 | mutex_enter(hash_lock); |
2946 | ||
b128c09f BB |
2947 | l2hdr = hdr->b_l2hdr; |
2948 | if (l2hdr) { | |
2949 | mutex_enter(&l2arc_buflist_mtx); | |
2950 | hdr->b_l2hdr = NULL; | |
2951 | buf_size = hdr->b_size; | |
2952 | } | |
2953 | ||
34dc7c2f BB |
2954 | /* |
2955 | * Do we have more than one buf? | |
2956 | */ | |
b128c09f | 2957 | if (hdr->b_datacnt > 1) { |
34dc7c2f BB |
2958 | arc_buf_hdr_t *nhdr; |
2959 | arc_buf_t **bufp; | |
2960 | uint64_t blksz = hdr->b_size; | |
d164b209 | 2961 | uint64_t spa = hdr->b_spa; |
34dc7c2f BB |
2962 | arc_buf_contents_t type = hdr->b_type; |
2963 | uint32_t flags = hdr->b_flags; | |
2964 | ||
b128c09f | 2965 | ASSERT(hdr->b_buf != buf || buf->b_next != NULL); |
34dc7c2f BB |
2966 | /* |
2967 | * Pull the data off of this buf and attach it to | |
2968 | * a new anonymous buf. | |
2969 | */ | |
2970 | (void) remove_reference(hdr, hash_lock, tag); | |
2971 | bufp = &hdr->b_buf; | |
2972 | while (*bufp != buf) | |
2973 | bufp = &(*bufp)->b_next; | |
2974 | *bufp = (*bufp)->b_next; | |
2975 | buf->b_next = NULL; | |
2976 | ||
2977 | ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); | |
2978 | atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); | |
2979 | if (refcount_is_zero(&hdr->b_refcnt)) { | |
2980 | uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; | |
2981 | ASSERT3U(*size, >=, hdr->b_size); | |
2982 | atomic_add_64(size, -hdr->b_size); | |
2983 | } | |
2984 | hdr->b_datacnt -= 1; | |
34dc7c2f BB |
2985 | arc_cksum_verify(buf); |
2986 | ||
2987 | mutex_exit(hash_lock); | |
2988 | ||
2989 | nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
2990 | nhdr->b_size = blksz; | |
2991 | nhdr->b_spa = spa; | |
2992 | nhdr->b_type = type; | |
2993 | nhdr->b_buf = buf; | |
2994 | nhdr->b_state = arc_anon; | |
2995 | nhdr->b_arc_access = 0; | |
2996 | nhdr->b_flags = flags & ARC_L2_WRITING; | |
2997 | nhdr->b_l2hdr = NULL; | |
2998 | nhdr->b_datacnt = 1; | |
2999 | nhdr->b_freeze_cksum = NULL; | |
3000 | (void) refcount_add(&nhdr->b_refcnt, tag); | |
3001 | buf->b_hdr = nhdr; | |
b128c09f | 3002 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
3003 | atomic_add_64(&arc_anon->arcs_size, blksz); |
3004 | } else { | |
b128c09f | 3005 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
3006 | ASSERT(refcount_count(&hdr->b_refcnt) == 1); |
3007 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
3008 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
3009 | arc_change_state(arc_anon, hdr, hash_lock); | |
3010 | hdr->b_arc_access = 0; | |
34dc7c2f BB |
3011 | mutex_exit(hash_lock); |
3012 | ||
3013 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
3014 | hdr->b_birth = 0; | |
3015 | hdr->b_cksum0 = 0; | |
3016 | arc_buf_thaw(buf); | |
3017 | } | |
3018 | buf->b_efunc = NULL; | |
3019 | buf->b_private = NULL; | |
3020 | ||
3021 | if (l2hdr) { | |
3022 | list_remove(l2hdr->b_dev->l2ad_buflist, hdr); | |
3023 | kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); | |
3024 | ARCSTAT_INCR(arcstat_l2_size, -buf_size); | |
34dc7c2f | 3025 | mutex_exit(&l2arc_buflist_mtx); |
b128c09f | 3026 | } |
34dc7c2f BB |
3027 | } |
3028 | ||
3029 | int | |
3030 | arc_released(arc_buf_t *buf) | |
3031 | { | |
b128c09f BB |
3032 | int released; |
3033 | ||
3034 | rw_enter(&buf->b_lock, RW_READER); | |
3035 | released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); | |
3036 | rw_exit(&buf->b_lock); | |
3037 | return (released); | |
34dc7c2f BB |
3038 | } |
3039 | ||
3040 | int | |
3041 | arc_has_callback(arc_buf_t *buf) | |
3042 | { | |
b128c09f BB |
3043 | int callback; |
3044 | ||
3045 | rw_enter(&buf->b_lock, RW_READER); | |
3046 | callback = (buf->b_efunc != NULL); | |
3047 | rw_exit(&buf->b_lock); | |
3048 | return (callback); | |
34dc7c2f BB |
3049 | } |
3050 | ||
3051 | #ifdef ZFS_DEBUG | |
3052 | int | |
3053 | arc_referenced(arc_buf_t *buf) | |
3054 | { | |
b128c09f BB |
3055 | int referenced; |
3056 | ||
3057 | rw_enter(&buf->b_lock, RW_READER); | |
3058 | referenced = (refcount_count(&buf->b_hdr->b_refcnt)); | |
3059 | rw_exit(&buf->b_lock); | |
3060 | return (referenced); | |
34dc7c2f BB |
3061 | } |
3062 | #endif | |
3063 | ||
3064 | static void | |
3065 | arc_write_ready(zio_t *zio) | |
3066 | { | |
3067 | arc_write_callback_t *callback = zio->io_private; | |
3068 | arc_buf_t *buf = callback->awcb_buf; | |
3069 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3070 | ||
b128c09f BB |
3071 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); |
3072 | callback->awcb_ready(zio, buf, callback->awcb_private); | |
3073 | ||
34dc7c2f BB |
3074 | /* |
3075 | * If the IO is already in progress, then this is a re-write | |
b128c09f BB |
3076 | * attempt, so we need to thaw and re-compute the cksum. |
3077 | * It is the responsibility of the callback to handle the | |
3078 | * accounting for any re-write attempt. | |
34dc7c2f BB |
3079 | */ |
3080 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
34dc7c2f BB |
3081 | mutex_enter(&hdr->b_freeze_lock); |
3082 | if (hdr->b_freeze_cksum != NULL) { | |
3083 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
3084 | hdr->b_freeze_cksum = NULL; | |
3085 | } | |
3086 | mutex_exit(&hdr->b_freeze_lock); | |
3087 | } | |
3088 | arc_cksum_compute(buf, B_FALSE); | |
3089 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
3090 | } | |
3091 | ||
3092 | static void | |
3093 | arc_write_done(zio_t *zio) | |
3094 | { | |
3095 | arc_write_callback_t *callback = zio->io_private; | |
3096 | arc_buf_t *buf = callback->awcb_buf; | |
3097 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3098 | ||
3099 | hdr->b_acb = NULL; | |
3100 | ||
34dc7c2f BB |
3101 | hdr->b_dva = *BP_IDENTITY(zio->io_bp); |
3102 | hdr->b_birth = zio->io_bp->blk_birth; | |
3103 | hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; | |
3104 | /* | |
3105 | * If the block to be written was all-zero, we may have | |
3106 | * compressed it away. In this case no write was performed | |
3107 | * so there will be no dva/birth-date/checksum. The buffer | |
3108 | * must therefor remain anonymous (and uncached). | |
3109 | */ | |
3110 | if (!BUF_EMPTY(hdr)) { | |
3111 | arc_buf_hdr_t *exists; | |
3112 | kmutex_t *hash_lock; | |
3113 | ||
3114 | arc_cksum_verify(buf); | |
3115 | ||
3116 | exists = buf_hash_insert(hdr, &hash_lock); | |
3117 | if (exists) { | |
3118 | /* | |
3119 | * This can only happen if we overwrite for | |
3120 | * sync-to-convergence, because we remove | |
3121 | * buffers from the hash table when we arc_free(). | |
3122 | */ | |
b128c09f | 3123 | ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); |
34dc7c2f BB |
3124 | ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), |
3125 | BP_IDENTITY(zio->io_bp))); | |
3126 | ASSERT3U(zio->io_bp_orig.blk_birth, ==, | |
3127 | zio->io_bp->blk_birth); | |
3128 | ||
3129 | ASSERT(refcount_is_zero(&exists->b_refcnt)); | |
3130 | arc_change_state(arc_anon, exists, hash_lock); | |
3131 | mutex_exit(hash_lock); | |
3132 | arc_hdr_destroy(exists); | |
3133 | exists = buf_hash_insert(hdr, &hash_lock); | |
3134 | ASSERT3P(exists, ==, NULL); | |
3135 | } | |
3136 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
b128c09f BB |
3137 | /* if it's not anon, we are doing a scrub */ |
3138 | if (hdr->b_state == arc_anon) | |
3139 | arc_access(hdr, hash_lock); | |
34dc7c2f BB |
3140 | mutex_exit(hash_lock); |
3141 | } else if (callback->awcb_done == NULL) { | |
3142 | int destroy_hdr; | |
3143 | /* | |
3144 | * This is an anonymous buffer with no user callback, | |
3145 | * destroy it if there are no active references. | |
3146 | */ | |
3147 | mutex_enter(&arc_eviction_mtx); | |
3148 | destroy_hdr = refcount_is_zero(&hdr->b_refcnt); | |
3149 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
3150 | mutex_exit(&arc_eviction_mtx); | |
3151 | if (destroy_hdr) | |
3152 | arc_hdr_destroy(hdr); | |
3153 | } else { | |
3154 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
3155 | } | |
b128c09f | 3156 | hdr->b_flags &= ~ARC_STORED; |
34dc7c2f BB |
3157 | |
3158 | if (callback->awcb_done) { | |
3159 | ASSERT(!refcount_is_zero(&hdr->b_refcnt)); | |
3160 | callback->awcb_done(zio, buf, callback->awcb_private); | |
3161 | } | |
3162 | ||
3163 | kmem_free(callback, sizeof (arc_write_callback_t)); | |
3164 | } | |
3165 | ||
b128c09f BB |
3166 | void |
3167 | write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) | |
3168 | { | |
3169 | boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); | |
3170 | ||
3171 | /* Determine checksum setting */ | |
3172 | if (ismd) { | |
3173 | /* | |
3174 | * Metadata always gets checksummed. If the data | |
3175 | * checksum is multi-bit correctable, and it's not a | |
3176 | * ZBT-style checksum, then it's suitable for metadata | |
3177 | * as well. Otherwise, the metadata checksum defaults | |
3178 | * to fletcher4. | |
3179 | */ | |
3180 | if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && | |
3181 | !zio_checksum_table[wp->wp_oschecksum].ci_zbt) | |
3182 | zp->zp_checksum = wp->wp_oschecksum; | |
3183 | else | |
3184 | zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; | |
3185 | } else { | |
3186 | zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, | |
3187 | wp->wp_oschecksum); | |
3188 | } | |
3189 | ||
3190 | /* Determine compression setting */ | |
3191 | if (ismd) { | |
3192 | /* | |
3193 | * XXX -- we should design a compression algorithm | |
3194 | * that specializes in arrays of bps. | |
3195 | */ | |
3196 | zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : | |
3197 | ZIO_COMPRESS_LZJB; | |
3198 | } else { | |
3199 | zp->zp_compress = zio_compress_select(wp->wp_dncompress, | |
3200 | wp->wp_oscompress); | |
3201 | } | |
3202 | ||
3203 | zp->zp_type = wp->wp_type; | |
3204 | zp->zp_level = wp->wp_level; | |
3205 | zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); | |
3206 | } | |
3207 | ||
34dc7c2f | 3208 | zio_t * |
b128c09f BB |
3209 | arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, |
3210 | boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, | |
34dc7c2f | 3211 | arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, |
b128c09f | 3212 | int zio_flags, const zbookmark_t *zb) |
34dc7c2f BB |
3213 | { |
3214 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3215 | arc_write_callback_t *callback; | |
b128c09f BB |
3216 | zio_t *zio; |
3217 | zio_prop_t zp; | |
34dc7c2f | 3218 | |
b128c09f | 3219 | ASSERT(ready != NULL); |
34dc7c2f BB |
3220 | ASSERT(!HDR_IO_ERROR(hdr)); |
3221 | ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); | |
3222 | ASSERT(hdr->b_acb == 0); | |
b128c09f BB |
3223 | if (l2arc) |
3224 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
3225 | callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); |
3226 | callback->awcb_ready = ready; | |
3227 | callback->awcb_done = done; | |
3228 | callback->awcb_private = private; | |
3229 | callback->awcb_buf = buf; | |
b128c09f BB |
3230 | |
3231 | write_policy(spa, wp, &zp); | |
3232 | zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, | |
3233 | arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); | |
34dc7c2f BB |
3234 | |
3235 | return (zio); | |
3236 | } | |
3237 | ||
3238 | int | |
3239 | arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, | |
3240 | zio_done_func_t *done, void *private, uint32_t arc_flags) | |
3241 | { | |
3242 | arc_buf_hdr_t *ab; | |
3243 | kmutex_t *hash_lock; | |
3244 | zio_t *zio; | |
d164b209 | 3245 | uint64_t guid = spa_guid(spa); |
34dc7c2f BB |
3246 | |
3247 | /* | |
3248 | * If this buffer is in the cache, release it, so it | |
3249 | * can be re-used. | |
3250 | */ | |
d164b209 | 3251 | ab = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); |
34dc7c2f BB |
3252 | if (ab != NULL) { |
3253 | /* | |
3254 | * The checksum of blocks to free is not always | |
3255 | * preserved (eg. on the deadlist). However, if it is | |
3256 | * nonzero, it should match what we have in the cache. | |
3257 | */ | |
3258 | ASSERT(bp->blk_cksum.zc_word[0] == 0 || | |
b128c09f BB |
3259 | bp->blk_cksum.zc_word[0] == ab->b_cksum0 || |
3260 | bp->blk_fill == BLK_FILL_ALREADY_FREED); | |
3261 | ||
34dc7c2f BB |
3262 | if (ab->b_state != arc_anon) |
3263 | arc_change_state(arc_anon, ab, hash_lock); | |
3264 | if (HDR_IO_IN_PROGRESS(ab)) { | |
3265 | /* | |
3266 | * This should only happen when we prefetch. | |
3267 | */ | |
3268 | ASSERT(ab->b_flags & ARC_PREFETCH); | |
3269 | ASSERT3U(ab->b_datacnt, ==, 1); | |
3270 | ab->b_flags |= ARC_FREED_IN_READ; | |
3271 | if (HDR_IN_HASH_TABLE(ab)) | |
3272 | buf_hash_remove(ab); | |
3273 | ab->b_arc_access = 0; | |
3274 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3275 | ab->b_birth = 0; | |
3276 | ab->b_cksum0 = 0; | |
3277 | ab->b_buf->b_efunc = NULL; | |
3278 | ab->b_buf->b_private = NULL; | |
3279 | mutex_exit(hash_lock); | |
3280 | } else if (refcount_is_zero(&ab->b_refcnt)) { | |
3281 | ab->b_flags |= ARC_FREE_IN_PROGRESS; | |
3282 | mutex_exit(hash_lock); | |
3283 | arc_hdr_destroy(ab); | |
3284 | ARCSTAT_BUMP(arcstat_deleted); | |
3285 | } else { | |
3286 | /* | |
3287 | * We still have an active reference on this | |
3288 | * buffer. This can happen, e.g., from | |
3289 | * dbuf_unoverride(). | |
3290 | */ | |
3291 | ASSERT(!HDR_IN_HASH_TABLE(ab)); | |
3292 | ab->b_arc_access = 0; | |
3293 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3294 | ab->b_birth = 0; | |
3295 | ab->b_cksum0 = 0; | |
3296 | ab->b_buf->b_efunc = NULL; | |
3297 | ab->b_buf->b_private = NULL; | |
3298 | mutex_exit(hash_lock); | |
3299 | } | |
3300 | } | |
3301 | ||
b128c09f | 3302 | zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); |
34dc7c2f BB |
3303 | |
3304 | if (arc_flags & ARC_WAIT) | |
3305 | return (zio_wait(zio)); | |
3306 | ||
3307 | ASSERT(arc_flags & ARC_NOWAIT); | |
3308 | zio_nowait(zio); | |
3309 | ||
3310 | return (0); | |
3311 | } | |
3312 | ||
3313 | static int | |
3314 | arc_memory_throttle(uint64_t reserve, uint64_t txg) | |
3315 | { | |
3316 | #ifdef _KERNEL | |
3317 | uint64_t inflight_data = arc_anon->arcs_size; | |
3318 | uint64_t available_memory = ptob(freemem); | |
3319 | static uint64_t page_load = 0; | |
3320 | static uint64_t last_txg = 0; | |
3321 | ||
3322 | #if defined(__i386) | |
3323 | available_memory = | |
3324 | MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); | |
3325 | #endif | |
3326 | if (available_memory >= zfs_write_limit_max) | |
3327 | return (0); | |
3328 | ||
3329 | if (txg > last_txg) { | |
3330 | last_txg = txg; | |
3331 | page_load = 0; | |
3332 | } | |
3333 | /* | |
3334 | * If we are in pageout, we know that memory is already tight, | |
3335 | * the arc is already going to be evicting, so we just want to | |
3336 | * continue to let page writes occur as quickly as possible. | |
3337 | */ | |
3338 | if (curproc == proc_pageout) { | |
3339 | if (page_load > MAX(ptob(minfree), available_memory) / 4) | |
3340 | return (ERESTART); | |
3341 | /* Note: reserve is inflated, so we deflate */ | |
3342 | page_load += reserve / 8; | |
3343 | return (0); | |
3344 | } else if (page_load > 0 && arc_reclaim_needed()) { | |
3345 | /* memory is low, delay before restarting */ | |
3346 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3347 | return (EAGAIN); | |
3348 | } | |
3349 | page_load = 0; | |
3350 | ||
3351 | if (arc_size > arc_c_min) { | |
3352 | uint64_t evictable_memory = | |
3353 | arc_mru->arcs_lsize[ARC_BUFC_DATA] + | |
3354 | arc_mru->arcs_lsize[ARC_BUFC_METADATA] + | |
3355 | arc_mfu->arcs_lsize[ARC_BUFC_DATA] + | |
3356 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; | |
3357 | available_memory += MIN(evictable_memory, arc_size - arc_c_min); | |
3358 | } | |
3359 | ||
3360 | if (inflight_data > available_memory / 4) { | |
3361 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3362 | return (ERESTART); | |
3363 | } | |
3364 | #endif | |
3365 | return (0); | |
3366 | } | |
3367 | ||
3368 | void | |
3369 | arc_tempreserve_clear(uint64_t reserve) | |
3370 | { | |
3371 | atomic_add_64(&arc_tempreserve, -reserve); | |
3372 | ASSERT((int64_t)arc_tempreserve >= 0); | |
3373 | } | |
3374 | ||
3375 | int | |
3376 | arc_tempreserve_space(uint64_t reserve, uint64_t txg) | |
3377 | { | |
3378 | int error; | |
3379 | ||
3380 | #ifdef ZFS_DEBUG | |
3381 | /* | |
3382 | * Once in a while, fail for no reason. Everything should cope. | |
3383 | */ | |
3384 | if (spa_get_random(10000) == 0) { | |
3385 | dprintf("forcing random failure\n"); | |
3386 | return (ERESTART); | |
3387 | } | |
3388 | #endif | |
3389 | if (reserve > arc_c/4 && !arc_no_grow) | |
3390 | arc_c = MIN(arc_c_max, reserve * 4); | |
3391 | if (reserve > arc_c) | |
3392 | return (ENOMEM); | |
3393 | ||
3394 | /* | |
3395 | * Writes will, almost always, require additional memory allocations | |
3396 | * in order to compress/encrypt/etc the data. We therefor need to | |
3397 | * make sure that there is sufficient available memory for this. | |
3398 | */ | |
3399 | if (error = arc_memory_throttle(reserve, txg)) | |
3400 | return (error); | |
3401 | ||
3402 | /* | |
3403 | * Throttle writes when the amount of dirty data in the cache | |
3404 | * gets too large. We try to keep the cache less than half full | |
3405 | * of dirty blocks so that our sync times don't grow too large. | |
3406 | * Note: if two requests come in concurrently, we might let them | |
3407 | * both succeed, when one of them should fail. Not a huge deal. | |
3408 | */ | |
3409 | if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && | |
3410 | arc_anon->arcs_size > arc_c / 4) { | |
3411 | dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " | |
3412 | "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", | |
3413 | arc_tempreserve>>10, | |
3414 | arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, | |
3415 | arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, | |
3416 | reserve>>10, arc_c>>10); | |
3417 | return (ERESTART); | |
3418 | } | |
3419 | atomic_add_64(&arc_tempreserve, reserve); | |
3420 | return (0); | |
3421 | } | |
3422 | ||
3423 | void | |
3424 | arc_init(void) | |
3425 | { | |
3426 | mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
3427 | cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); | |
3428 | ||
3429 | /* Convert seconds to clock ticks */ | |
3430 | arc_min_prefetch_lifespan = 1 * hz; | |
3431 | ||
3432 | /* Start out with 1/8 of all memory */ | |
3433 | arc_c = physmem * PAGESIZE / 8; | |
3434 | ||
3435 | #ifdef _KERNEL | |
3436 | /* | |
3437 | * On architectures where the physical memory can be larger | |
3438 | * than the addressable space (intel in 32-bit mode), we may | |
3439 | * need to limit the cache to 1/8 of VM size. | |
3440 | */ | |
3441 | arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); | |
3442 | #endif | |
3443 | ||
3444 | /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ | |
3445 | arc_c_min = MAX(arc_c / 4, 64<<20); | |
3446 | /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ | |
3447 | if (arc_c * 8 >= 1<<30) | |
3448 | arc_c_max = (arc_c * 8) - (1<<30); | |
3449 | else | |
3450 | arc_c_max = arc_c_min; | |
3451 | arc_c_max = MAX(arc_c * 6, arc_c_max); | |
3452 | ||
3453 | /* | |
3454 | * Allow the tunables to override our calculations if they are | |
3455 | * reasonable (ie. over 64MB) | |
3456 | */ | |
3457 | if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) | |
3458 | arc_c_max = zfs_arc_max; | |
3459 | if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) | |
3460 | arc_c_min = zfs_arc_min; | |
3461 | ||
3462 | arc_c = arc_c_max; | |
3463 | arc_p = (arc_c >> 1); | |
3464 | ||
3465 | /* limit meta-data to 1/4 of the arc capacity */ | |
3466 | arc_meta_limit = arc_c_max / 4; | |
3467 | ||
3468 | /* Allow the tunable to override if it is reasonable */ | |
3469 | if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) | |
3470 | arc_meta_limit = zfs_arc_meta_limit; | |
3471 | ||
3472 | if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) | |
3473 | arc_c_min = arc_meta_limit / 2; | |
3474 | ||
d164b209 BB |
3475 | if (zfs_arc_grow_retry > 0) |
3476 | arc_grow_retry = zfs_arc_grow_retry; | |
3477 | ||
3478 | if (zfs_arc_shrink_shift > 0) | |
3479 | arc_shrink_shift = zfs_arc_shrink_shift; | |
3480 | ||
3481 | if (zfs_arc_p_min_shift > 0) | |
3482 | arc_p_min_shift = zfs_arc_p_min_shift; | |
3483 | ||
34dc7c2f BB |
3484 | /* if kmem_flags are set, lets try to use less memory */ |
3485 | if (kmem_debugging()) | |
3486 | arc_c = arc_c / 2; | |
3487 | if (arc_c < arc_c_min) | |
3488 | arc_c = arc_c_min; | |
3489 | ||
3490 | arc_anon = &ARC_anon; | |
3491 | arc_mru = &ARC_mru; | |
3492 | arc_mru_ghost = &ARC_mru_ghost; | |
3493 | arc_mfu = &ARC_mfu; | |
3494 | arc_mfu_ghost = &ARC_mfu_ghost; | |
3495 | arc_l2c_only = &ARC_l2c_only; | |
3496 | arc_size = 0; | |
3497 | ||
3498 | mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3499 | mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3500 | mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3501 | mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3502 | mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3503 | mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3504 | ||
3505 | list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], | |
3506 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3507 | list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], | |
3508 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3509 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], | |
3510 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3511 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], | |
3512 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3513 | list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], | |
3514 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3515 | list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], | |
3516 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3517 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], | |
3518 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3519 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], | |
3520 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3521 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], | |
3522 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3523 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], | |
3524 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3525 | ||
3526 | buf_init(); | |
3527 | ||
3528 | arc_thread_exit = 0; | |
3529 | arc_eviction_list = NULL; | |
3530 | mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3531 | bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); | |
3532 | ||
3533 | arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, | |
3534 | sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
3535 | ||
3536 | if (arc_ksp != NULL) { | |
3537 | arc_ksp->ks_data = &arc_stats; | |
3538 | kstat_install(arc_ksp); | |
3539 | } | |
3540 | ||
3541 | (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, | |
3542 | TS_RUN, minclsyspri); | |
3543 | ||
3544 | arc_dead = FALSE; | |
b128c09f | 3545 | arc_warm = B_FALSE; |
34dc7c2f BB |
3546 | |
3547 | if (zfs_write_limit_max == 0) | |
b128c09f | 3548 | zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; |
34dc7c2f BB |
3549 | else |
3550 | zfs_write_limit_shift = 0; | |
b128c09f | 3551 | mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
3552 | } |
3553 | ||
3554 | void | |
3555 | arc_fini(void) | |
3556 | { | |
3557 | mutex_enter(&arc_reclaim_thr_lock); | |
3558 | arc_thread_exit = 1; | |
3559 | while (arc_thread_exit != 0) | |
3560 | cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); | |
3561 | mutex_exit(&arc_reclaim_thr_lock); | |
3562 | ||
3563 | arc_flush(NULL); | |
3564 | ||
3565 | arc_dead = TRUE; | |
3566 | ||
3567 | if (arc_ksp != NULL) { | |
3568 | kstat_delete(arc_ksp); | |
3569 | arc_ksp = NULL; | |
3570 | } | |
3571 | ||
3572 | mutex_destroy(&arc_eviction_mtx); | |
3573 | mutex_destroy(&arc_reclaim_thr_lock); | |
3574 | cv_destroy(&arc_reclaim_thr_cv); | |
3575 | ||
3576 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); | |
3577 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3578 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); | |
3579 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3580 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); | |
3581 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); | |
3582 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); | |
3583 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); | |
3584 | ||
3585 | mutex_destroy(&arc_anon->arcs_mtx); | |
3586 | mutex_destroy(&arc_mru->arcs_mtx); | |
3587 | mutex_destroy(&arc_mru_ghost->arcs_mtx); | |
3588 | mutex_destroy(&arc_mfu->arcs_mtx); | |
3589 | mutex_destroy(&arc_mfu_ghost->arcs_mtx); | |
fb5f0bc8 | 3590 | mutex_destroy(&arc_l2c_only->arcs_mtx); |
34dc7c2f | 3591 | |
b128c09f BB |
3592 | mutex_destroy(&zfs_write_limit_lock); |
3593 | ||
34dc7c2f BB |
3594 | buf_fini(); |
3595 | } | |
3596 | ||
3597 | /* | |
3598 | * Level 2 ARC | |
3599 | * | |
3600 | * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. | |
3601 | * It uses dedicated storage devices to hold cached data, which are populated | |
3602 | * using large infrequent writes. The main role of this cache is to boost | |
3603 | * the performance of random read workloads. The intended L2ARC devices | |
3604 | * include short-stroked disks, solid state disks, and other media with | |
3605 | * substantially faster read latency than disk. | |
3606 | * | |
3607 | * +-----------------------+ | |
3608 | * | ARC | | |
3609 | * +-----------------------+ | |
3610 | * | ^ ^ | |
3611 | * | | | | |
3612 | * l2arc_feed_thread() arc_read() | |
3613 | * | | | | |
3614 | * | l2arc read | | |
3615 | * V | | | |
3616 | * +---------------+ | | |
3617 | * | L2ARC | | | |
3618 | * +---------------+ | | |
3619 | * | ^ | | |
3620 | * l2arc_write() | | | |
3621 | * | | | | |
3622 | * V | | | |
3623 | * +-------+ +-------+ | |
3624 | * | vdev | | vdev | | |
3625 | * | cache | | cache | | |
3626 | * +-------+ +-------+ | |
3627 | * +=========+ .-----. | |
3628 | * : L2ARC : |-_____-| | |
3629 | * : devices : | Disks | | |
3630 | * +=========+ `-_____-' | |
3631 | * | |
3632 | * Read requests are satisfied from the following sources, in order: | |
3633 | * | |
3634 | * 1) ARC | |
3635 | * 2) vdev cache of L2ARC devices | |
3636 | * 3) L2ARC devices | |
3637 | * 4) vdev cache of disks | |
3638 | * 5) disks | |
3639 | * | |
3640 | * Some L2ARC device types exhibit extremely slow write performance. | |
3641 | * To accommodate for this there are some significant differences between | |
3642 | * the L2ARC and traditional cache design: | |
3643 | * | |
3644 | * 1. There is no eviction path from the ARC to the L2ARC. Evictions from | |
3645 | * the ARC behave as usual, freeing buffers and placing headers on ghost | |
3646 | * lists. The ARC does not send buffers to the L2ARC during eviction as | |
3647 | * this would add inflated write latencies for all ARC memory pressure. | |
3648 | * | |
3649 | * 2. The L2ARC attempts to cache data from the ARC before it is evicted. | |
3650 | * It does this by periodically scanning buffers from the eviction-end of | |
3651 | * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are | |
3652 | * not already there. It scans until a headroom of buffers is satisfied, | |
3653 | * which itself is a buffer for ARC eviction. The thread that does this is | |
3654 | * l2arc_feed_thread(), illustrated below; example sizes are included to | |
3655 | * provide a better sense of ratio than this diagram: | |
3656 | * | |
3657 | * head --> tail | |
3658 | * +---------------------+----------+ | |
3659 | * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC | |
3660 | * +---------------------+----------+ | o L2ARC eligible | |
3661 | * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer | |
3662 | * +---------------------+----------+ | | |
3663 | * 15.9 Gbytes ^ 32 Mbytes | | |
3664 | * headroom | | |
3665 | * l2arc_feed_thread() | |
3666 | * | | |
3667 | * l2arc write hand <--[oooo]--' | |
3668 | * | 8 Mbyte | |
3669 | * | write max | |
3670 | * V | |
3671 | * +==============================+ | |
3672 | * L2ARC dev |####|#|###|###| |####| ... | | |
3673 | * +==============================+ | |
3674 | * 32 Gbytes | |
3675 | * | |
3676 | * 3. If an ARC buffer is copied to the L2ARC but then hit instead of | |
3677 | * evicted, then the L2ARC has cached a buffer much sooner than it probably | |
3678 | * needed to, potentially wasting L2ARC device bandwidth and storage. It is | |
3679 | * safe to say that this is an uncommon case, since buffers at the end of | |
3680 | * the ARC lists have moved there due to inactivity. | |
3681 | * | |
3682 | * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, | |
3683 | * then the L2ARC simply misses copying some buffers. This serves as a | |
3684 | * pressure valve to prevent heavy read workloads from both stalling the ARC | |
3685 | * with waits and clogging the L2ARC with writes. This also helps prevent | |
3686 | * the potential for the L2ARC to churn if it attempts to cache content too | |
3687 | * quickly, such as during backups of the entire pool. | |
3688 | * | |
b128c09f BB |
3689 | * 5. After system boot and before the ARC has filled main memory, there are |
3690 | * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru | |
3691 | * lists can remain mostly static. Instead of searching from tail of these | |
3692 | * lists as pictured, the l2arc_feed_thread() will search from the list heads | |
3693 | * for eligible buffers, greatly increasing its chance of finding them. | |
3694 | * | |
3695 | * The L2ARC device write speed is also boosted during this time so that | |
3696 | * the L2ARC warms up faster. Since there have been no ARC evictions yet, | |
3697 | * there are no L2ARC reads, and no fear of degrading read performance | |
3698 | * through increased writes. | |
3699 | * | |
3700 | * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that | |
34dc7c2f BB |
3701 | * the vdev queue can aggregate them into larger and fewer writes. Each |
3702 | * device is written to in a rotor fashion, sweeping writes through | |
3703 | * available space then repeating. | |
3704 | * | |
b128c09f | 3705 | * 7. The L2ARC does not store dirty content. It never needs to flush |
34dc7c2f BB |
3706 | * write buffers back to disk based storage. |
3707 | * | |
b128c09f | 3708 | * 8. If an ARC buffer is written (and dirtied) which also exists in the |
34dc7c2f BB |
3709 | * L2ARC, the now stale L2ARC buffer is immediately dropped. |
3710 | * | |
3711 | * The performance of the L2ARC can be tweaked by a number of tunables, which | |
3712 | * may be necessary for different workloads: | |
3713 | * | |
3714 | * l2arc_write_max max write bytes per interval | |
b128c09f | 3715 | * l2arc_write_boost extra write bytes during device warmup |
34dc7c2f BB |
3716 | * l2arc_noprefetch skip caching prefetched buffers |
3717 | * l2arc_headroom number of max device writes to precache | |
3718 | * l2arc_feed_secs seconds between L2ARC writing | |
3719 | * | |
3720 | * Tunables may be removed or added as future performance improvements are | |
3721 | * integrated, and also may become zpool properties. | |
d164b209 BB |
3722 | * |
3723 | * There are three key functions that control how the L2ARC warms up: | |
3724 | * | |
3725 | * l2arc_write_eligible() check if a buffer is eligible to cache | |
3726 | * l2arc_write_size() calculate how much to write | |
3727 | * l2arc_write_interval() calculate sleep delay between writes | |
3728 | * | |
3729 | * These three functions determine what to write, how much, and how quickly | |
3730 | * to send writes. | |
34dc7c2f BB |
3731 | */ |
3732 | ||
d164b209 BB |
3733 | static boolean_t |
3734 | l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) | |
3735 | { | |
3736 | /* | |
3737 | * A buffer is *not* eligible for the L2ARC if it: | |
3738 | * 1. belongs to a different spa. | |
3739 | * 2. has no attached buffer. | |
3740 | * 3. is already cached on the L2ARC. | |
3741 | * 4. has an I/O in progress (it may be an incomplete read). | |
3742 | * 5. is flagged not eligible (zfs property). | |
3743 | */ | |
3744 | if (ab->b_spa != spa_guid || ab->b_buf == NULL || ab->b_l2hdr != NULL || | |
3745 | HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) | |
3746 | return (B_FALSE); | |
3747 | ||
3748 | return (B_TRUE); | |
3749 | } | |
3750 | ||
3751 | static uint64_t | |
3752 | l2arc_write_size(l2arc_dev_t *dev) | |
3753 | { | |
3754 | uint64_t size; | |
3755 | ||
3756 | size = dev->l2ad_write; | |
3757 | ||
3758 | if (arc_warm == B_FALSE) | |
3759 | size += dev->l2ad_boost; | |
3760 | ||
3761 | return (size); | |
3762 | ||
3763 | } | |
3764 | ||
3765 | static clock_t | |
3766 | l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) | |
3767 | { | |
3768 | clock_t interval, next; | |
3769 | ||
3770 | /* | |
3771 | * If the ARC lists are busy, increase our write rate; if the | |
3772 | * lists are stale, idle back. This is achieved by checking | |
3773 | * how much we previously wrote - if it was more than half of | |
3774 | * what we wanted, schedule the next write much sooner. | |
3775 | */ | |
3776 | if (l2arc_feed_again && wrote > (wanted / 2)) | |
3777 | interval = (hz * l2arc_feed_min_ms) / 1000; | |
3778 | else | |
3779 | interval = hz * l2arc_feed_secs; | |
3780 | ||
3781 | next = MAX(lbolt, MIN(lbolt + interval, began + interval)); | |
3782 | ||
3783 | return (next); | |
3784 | } | |
3785 | ||
34dc7c2f BB |
3786 | static void |
3787 | l2arc_hdr_stat_add(void) | |
3788 | { | |
3789 | ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); | |
3790 | ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); | |
3791 | } | |
3792 | ||
3793 | static void | |
3794 | l2arc_hdr_stat_remove(void) | |
3795 | { | |
3796 | ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); | |
3797 | ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); | |
3798 | } | |
3799 | ||
3800 | /* | |
3801 | * Cycle through L2ARC devices. This is how L2ARC load balances. | |
b128c09f | 3802 | * If a device is returned, this also returns holding the spa config lock. |
34dc7c2f BB |
3803 | */ |
3804 | static l2arc_dev_t * | |
3805 | l2arc_dev_get_next(void) | |
3806 | { | |
b128c09f | 3807 | l2arc_dev_t *first, *next = NULL; |
34dc7c2f | 3808 | |
b128c09f BB |
3809 | /* |
3810 | * Lock out the removal of spas (spa_namespace_lock), then removal | |
3811 | * of cache devices (l2arc_dev_mtx). Once a device has been selected, | |
3812 | * both locks will be dropped and a spa config lock held instead. | |
3813 | */ | |
3814 | mutex_enter(&spa_namespace_lock); | |
3815 | mutex_enter(&l2arc_dev_mtx); | |
3816 | ||
3817 | /* if there are no vdevs, there is nothing to do */ | |
3818 | if (l2arc_ndev == 0) | |
3819 | goto out; | |
3820 | ||
3821 | first = NULL; | |
3822 | next = l2arc_dev_last; | |
3823 | do { | |
3824 | /* loop around the list looking for a non-faulted vdev */ | |
3825 | if (next == NULL) { | |
34dc7c2f | 3826 | next = list_head(l2arc_dev_list); |
b128c09f BB |
3827 | } else { |
3828 | next = list_next(l2arc_dev_list, next); | |
3829 | if (next == NULL) | |
3830 | next = list_head(l2arc_dev_list); | |
3831 | } | |
3832 | ||
3833 | /* if we have come back to the start, bail out */ | |
3834 | if (first == NULL) | |
3835 | first = next; | |
3836 | else if (next == first) | |
3837 | break; | |
3838 | ||
3839 | } while (vdev_is_dead(next->l2ad_vdev)); | |
3840 | ||
3841 | /* if we were unable to find any usable vdevs, return NULL */ | |
3842 | if (vdev_is_dead(next->l2ad_vdev)) | |
3843 | next = NULL; | |
34dc7c2f BB |
3844 | |
3845 | l2arc_dev_last = next; | |
3846 | ||
b128c09f BB |
3847 | out: |
3848 | mutex_exit(&l2arc_dev_mtx); | |
3849 | ||
3850 | /* | |
3851 | * Grab the config lock to prevent the 'next' device from being | |
3852 | * removed while we are writing to it. | |
3853 | */ | |
3854 | if (next != NULL) | |
3855 | spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); | |
3856 | mutex_exit(&spa_namespace_lock); | |
3857 | ||
34dc7c2f BB |
3858 | return (next); |
3859 | } | |
3860 | ||
b128c09f BB |
3861 | /* |
3862 | * Free buffers that were tagged for destruction. | |
3863 | */ | |
3864 | static void | |
3865 | l2arc_do_free_on_write() | |
3866 | { | |
3867 | list_t *buflist; | |
3868 | l2arc_data_free_t *df, *df_prev; | |
3869 | ||
3870 | mutex_enter(&l2arc_free_on_write_mtx); | |
3871 | buflist = l2arc_free_on_write; | |
3872 | ||
3873 | for (df = list_tail(buflist); df; df = df_prev) { | |
3874 | df_prev = list_prev(buflist, df); | |
3875 | ASSERT(df->l2df_data != NULL); | |
3876 | ASSERT(df->l2df_func != NULL); | |
3877 | df->l2df_func(df->l2df_data, df->l2df_size); | |
3878 | list_remove(buflist, df); | |
3879 | kmem_free(df, sizeof (l2arc_data_free_t)); | |
3880 | } | |
3881 | ||
3882 | mutex_exit(&l2arc_free_on_write_mtx); | |
3883 | } | |
3884 | ||
34dc7c2f BB |
3885 | /* |
3886 | * A write to a cache device has completed. Update all headers to allow | |
3887 | * reads from these buffers to begin. | |
3888 | */ | |
3889 | static void | |
3890 | l2arc_write_done(zio_t *zio) | |
3891 | { | |
3892 | l2arc_write_callback_t *cb; | |
3893 | l2arc_dev_t *dev; | |
3894 | list_t *buflist; | |
34dc7c2f | 3895 | arc_buf_hdr_t *head, *ab, *ab_prev; |
b128c09f | 3896 | l2arc_buf_hdr_t *abl2; |
34dc7c2f BB |
3897 | kmutex_t *hash_lock; |
3898 | ||
3899 | cb = zio->io_private; | |
3900 | ASSERT(cb != NULL); | |
3901 | dev = cb->l2wcb_dev; | |
3902 | ASSERT(dev != NULL); | |
3903 | head = cb->l2wcb_head; | |
3904 | ASSERT(head != NULL); | |
3905 | buflist = dev->l2ad_buflist; | |
3906 | ASSERT(buflist != NULL); | |
3907 | DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, | |
3908 | l2arc_write_callback_t *, cb); | |
3909 | ||
3910 | if (zio->io_error != 0) | |
3911 | ARCSTAT_BUMP(arcstat_l2_writes_error); | |
3912 | ||
3913 | mutex_enter(&l2arc_buflist_mtx); | |
3914 | ||
3915 | /* | |
3916 | * All writes completed, or an error was hit. | |
3917 | */ | |
3918 | for (ab = list_prev(buflist, head); ab; ab = ab_prev) { | |
3919 | ab_prev = list_prev(buflist, ab); | |
3920 | ||
3921 | hash_lock = HDR_LOCK(ab); | |
3922 | if (!mutex_tryenter(hash_lock)) { | |
3923 | /* | |
3924 | * This buffer misses out. It may be in a stage | |
3925 | * of eviction. Its ARC_L2_WRITING flag will be | |
3926 | * left set, denying reads to this buffer. | |
3927 | */ | |
3928 | ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); | |
3929 | continue; | |
3930 | } | |
3931 | ||
3932 | if (zio->io_error != 0) { | |
3933 | /* | |
b128c09f | 3934 | * Error - drop L2ARC entry. |
34dc7c2f | 3935 | */ |
b128c09f BB |
3936 | list_remove(buflist, ab); |
3937 | abl2 = ab->b_l2hdr; | |
34dc7c2f | 3938 | ab->b_l2hdr = NULL; |
b128c09f BB |
3939 | kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); |
3940 | ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); | |
34dc7c2f BB |
3941 | } |
3942 | ||
3943 | /* | |
3944 | * Allow ARC to begin reads to this L2ARC entry. | |
3945 | */ | |
3946 | ab->b_flags &= ~ARC_L2_WRITING; | |
3947 | ||
3948 | mutex_exit(hash_lock); | |
3949 | } | |
3950 | ||
3951 | atomic_inc_64(&l2arc_writes_done); | |
3952 | list_remove(buflist, head); | |
3953 | kmem_cache_free(hdr_cache, head); | |
3954 | mutex_exit(&l2arc_buflist_mtx); | |
3955 | ||
b128c09f | 3956 | l2arc_do_free_on_write(); |
34dc7c2f BB |
3957 | |
3958 | kmem_free(cb, sizeof (l2arc_write_callback_t)); | |
3959 | } | |
3960 | ||
3961 | /* | |
3962 | * A read to a cache device completed. Validate buffer contents before | |
3963 | * handing over to the regular ARC routines. | |
3964 | */ | |
3965 | static void | |
3966 | l2arc_read_done(zio_t *zio) | |
3967 | { | |
3968 | l2arc_read_callback_t *cb; | |
3969 | arc_buf_hdr_t *hdr; | |
3970 | arc_buf_t *buf; | |
34dc7c2f | 3971 | kmutex_t *hash_lock; |
b128c09f BB |
3972 | int equal; |
3973 | ||
3974 | ASSERT(zio->io_vd != NULL); | |
3975 | ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); | |
3976 | ||
3977 | spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); | |
34dc7c2f BB |
3978 | |
3979 | cb = zio->io_private; | |
3980 | ASSERT(cb != NULL); | |
3981 | buf = cb->l2rcb_buf; | |
3982 | ASSERT(buf != NULL); | |
3983 | hdr = buf->b_hdr; | |
3984 | ASSERT(hdr != NULL); | |
3985 | ||
3986 | hash_lock = HDR_LOCK(hdr); | |
3987 | mutex_enter(hash_lock); | |
3988 | ||
3989 | /* | |
3990 | * Check this survived the L2ARC journey. | |
3991 | */ | |
3992 | equal = arc_cksum_equal(buf); | |
3993 | if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { | |
3994 | mutex_exit(hash_lock); | |
3995 | zio->io_private = buf; | |
b128c09f BB |
3996 | zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ |
3997 | zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ | |
34dc7c2f BB |
3998 | arc_read_done(zio); |
3999 | } else { | |
4000 | mutex_exit(hash_lock); | |
4001 | /* | |
4002 | * Buffer didn't survive caching. Increment stats and | |
4003 | * reissue to the original storage device. | |
4004 | */ | |
b128c09f | 4005 | if (zio->io_error != 0) { |
34dc7c2f | 4006 | ARCSTAT_BUMP(arcstat_l2_io_error); |
b128c09f BB |
4007 | } else { |
4008 | zio->io_error = EIO; | |
4009 | } | |
34dc7c2f BB |
4010 | if (!equal) |
4011 | ARCSTAT_BUMP(arcstat_l2_cksum_bad); | |
4012 | ||
34dc7c2f | 4013 | /* |
b128c09f BB |
4014 | * If there's no waiter, issue an async i/o to the primary |
4015 | * storage now. If there *is* a waiter, the caller must | |
4016 | * issue the i/o in a context where it's OK to block. | |
34dc7c2f | 4017 | */ |
d164b209 BB |
4018 | if (zio->io_waiter == NULL) { |
4019 | zio_t *pio = zio_unique_parent(zio); | |
4020 | ||
4021 | ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); | |
4022 | ||
4023 | zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, | |
b128c09f BB |
4024 | buf->b_data, zio->io_size, arc_read_done, buf, |
4025 | zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); | |
d164b209 | 4026 | } |
34dc7c2f BB |
4027 | } |
4028 | ||
4029 | kmem_free(cb, sizeof (l2arc_read_callback_t)); | |
4030 | } | |
4031 | ||
4032 | /* | |
4033 | * This is the list priority from which the L2ARC will search for pages to | |
4034 | * cache. This is used within loops (0..3) to cycle through lists in the | |
4035 | * desired order. This order can have a significant effect on cache | |
4036 | * performance. | |
4037 | * | |
4038 | * Currently the metadata lists are hit first, MFU then MRU, followed by | |
4039 | * the data lists. This function returns a locked list, and also returns | |
4040 | * the lock pointer. | |
4041 | */ | |
4042 | static list_t * | |
4043 | l2arc_list_locked(int list_num, kmutex_t **lock) | |
4044 | { | |
4045 | list_t *list; | |
4046 | ||
4047 | ASSERT(list_num >= 0 && list_num <= 3); | |
4048 | ||
4049 | switch (list_num) { | |
4050 | case 0: | |
4051 | list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; | |
4052 | *lock = &arc_mfu->arcs_mtx; | |
4053 | break; | |
4054 | case 1: | |
4055 | list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; | |
4056 | *lock = &arc_mru->arcs_mtx; | |
4057 | break; | |
4058 | case 2: | |
4059 | list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; | |
4060 | *lock = &arc_mfu->arcs_mtx; | |
4061 | break; | |
4062 | case 3: | |
4063 | list = &arc_mru->arcs_list[ARC_BUFC_DATA]; | |
4064 | *lock = &arc_mru->arcs_mtx; | |
4065 | break; | |
4066 | } | |
4067 | ||
4068 | ASSERT(!(MUTEX_HELD(*lock))); | |
4069 | mutex_enter(*lock); | |
4070 | return (list); | |
4071 | } | |
4072 | ||
4073 | /* | |
4074 | * Evict buffers from the device write hand to the distance specified in | |
4075 | * bytes. This distance may span populated buffers, it may span nothing. | |
4076 | * This is clearing a region on the L2ARC device ready for writing. | |
4077 | * If the 'all' boolean is set, every buffer is evicted. | |
4078 | */ | |
4079 | static void | |
4080 | l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) | |
4081 | { | |
4082 | list_t *buflist; | |
4083 | l2arc_buf_hdr_t *abl2; | |
4084 | arc_buf_hdr_t *ab, *ab_prev; | |
4085 | kmutex_t *hash_lock; | |
4086 | uint64_t taddr; | |
4087 | ||
34dc7c2f BB |
4088 | buflist = dev->l2ad_buflist; |
4089 | ||
4090 | if (buflist == NULL) | |
4091 | return; | |
4092 | ||
4093 | if (!all && dev->l2ad_first) { | |
4094 | /* | |
4095 | * This is the first sweep through the device. There is | |
4096 | * nothing to evict. | |
4097 | */ | |
4098 | return; | |
4099 | } | |
4100 | ||
b128c09f | 4101 | if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { |
34dc7c2f BB |
4102 | /* |
4103 | * When nearing the end of the device, evict to the end | |
4104 | * before the device write hand jumps to the start. | |
4105 | */ | |
4106 | taddr = dev->l2ad_end; | |
4107 | } else { | |
4108 | taddr = dev->l2ad_hand + distance; | |
4109 | } | |
4110 | DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, | |
4111 | uint64_t, taddr, boolean_t, all); | |
4112 | ||
4113 | top: | |
4114 | mutex_enter(&l2arc_buflist_mtx); | |
4115 | for (ab = list_tail(buflist); ab; ab = ab_prev) { | |
4116 | ab_prev = list_prev(buflist, ab); | |
4117 | ||
4118 | hash_lock = HDR_LOCK(ab); | |
4119 | if (!mutex_tryenter(hash_lock)) { | |
4120 | /* | |
4121 | * Missed the hash lock. Retry. | |
4122 | */ | |
4123 | ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); | |
4124 | mutex_exit(&l2arc_buflist_mtx); | |
4125 | mutex_enter(hash_lock); | |
4126 | mutex_exit(hash_lock); | |
4127 | goto top; | |
4128 | } | |
4129 | ||
4130 | if (HDR_L2_WRITE_HEAD(ab)) { | |
4131 | /* | |
4132 | * We hit a write head node. Leave it for | |
4133 | * l2arc_write_done(). | |
4134 | */ | |
4135 | list_remove(buflist, ab); | |
4136 | mutex_exit(hash_lock); | |
4137 | continue; | |
4138 | } | |
4139 | ||
4140 | if (!all && ab->b_l2hdr != NULL && | |
4141 | (ab->b_l2hdr->b_daddr > taddr || | |
4142 | ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { | |
4143 | /* | |
4144 | * We've evicted to the target address, | |
4145 | * or the end of the device. | |
4146 | */ | |
4147 | mutex_exit(hash_lock); | |
4148 | break; | |
4149 | } | |
4150 | ||
4151 | if (HDR_FREE_IN_PROGRESS(ab)) { | |
4152 | /* | |
4153 | * Already on the path to destruction. | |
4154 | */ | |
4155 | mutex_exit(hash_lock); | |
4156 | continue; | |
4157 | } | |
4158 | ||
4159 | if (ab->b_state == arc_l2c_only) { | |
4160 | ASSERT(!HDR_L2_READING(ab)); | |
4161 | /* | |
4162 | * This doesn't exist in the ARC. Destroy. | |
4163 | * arc_hdr_destroy() will call list_remove() | |
4164 | * and decrement arcstat_l2_size. | |
4165 | */ | |
4166 | arc_change_state(arc_anon, ab, hash_lock); | |
4167 | arc_hdr_destroy(ab); | |
4168 | } else { | |
b128c09f BB |
4169 | /* |
4170 | * Invalidate issued or about to be issued | |
4171 | * reads, since we may be about to write | |
4172 | * over this location. | |
4173 | */ | |
4174 | if (HDR_L2_READING(ab)) { | |
4175 | ARCSTAT_BUMP(arcstat_l2_evict_reading); | |
4176 | ab->b_flags |= ARC_L2_EVICTED; | |
4177 | } | |
4178 | ||
34dc7c2f BB |
4179 | /* |
4180 | * Tell ARC this no longer exists in L2ARC. | |
4181 | */ | |
4182 | if (ab->b_l2hdr != NULL) { | |
4183 | abl2 = ab->b_l2hdr; | |
4184 | ab->b_l2hdr = NULL; | |
4185 | kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); | |
4186 | ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); | |
4187 | } | |
4188 | list_remove(buflist, ab); | |
4189 | ||
4190 | /* | |
4191 | * This may have been leftover after a | |
4192 | * failed write. | |
4193 | */ | |
4194 | ab->b_flags &= ~ARC_L2_WRITING; | |
34dc7c2f BB |
4195 | } |
4196 | mutex_exit(hash_lock); | |
4197 | } | |
4198 | mutex_exit(&l2arc_buflist_mtx); | |
4199 | ||
4200 | spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); | |
4201 | dev->l2ad_evict = taddr; | |
4202 | } | |
4203 | ||
4204 | /* | |
4205 | * Find and write ARC buffers to the L2ARC device. | |
4206 | * | |
4207 | * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid | |
4208 | * for reading until they have completed writing. | |
4209 | */ | |
d164b209 | 4210 | static uint64_t |
b128c09f | 4211 | l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) |
34dc7c2f BB |
4212 | { |
4213 | arc_buf_hdr_t *ab, *ab_prev, *head; | |
4214 | l2arc_buf_hdr_t *hdrl2; | |
4215 | list_t *list; | |
b128c09f | 4216 | uint64_t passed_sz, write_sz, buf_sz, headroom; |
34dc7c2f BB |
4217 | void *buf_data; |
4218 | kmutex_t *hash_lock, *list_lock; | |
4219 | boolean_t have_lock, full; | |
4220 | l2arc_write_callback_t *cb; | |
4221 | zio_t *pio, *wzio; | |
d164b209 | 4222 | uint64_t guid = spa_guid(spa); |
34dc7c2f | 4223 | |
34dc7c2f BB |
4224 | ASSERT(dev->l2ad_vdev != NULL); |
4225 | ||
4226 | pio = NULL; | |
4227 | write_sz = 0; | |
4228 | full = B_FALSE; | |
4229 | head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
4230 | head->b_flags |= ARC_L2_WRITE_HEAD; | |
4231 | ||
4232 | /* | |
4233 | * Copy buffers for L2ARC writing. | |
4234 | */ | |
4235 | mutex_enter(&l2arc_buflist_mtx); | |
4236 | for (int try = 0; try <= 3; try++) { | |
4237 | list = l2arc_list_locked(try, &list_lock); | |
4238 | passed_sz = 0; | |
4239 | ||
b128c09f BB |
4240 | /* |
4241 | * L2ARC fast warmup. | |
4242 | * | |
4243 | * Until the ARC is warm and starts to evict, read from the | |
4244 | * head of the ARC lists rather than the tail. | |
4245 | */ | |
4246 | headroom = target_sz * l2arc_headroom; | |
4247 | if (arc_warm == B_FALSE) | |
4248 | ab = list_head(list); | |
4249 | else | |
4250 | ab = list_tail(list); | |
4251 | ||
4252 | for (; ab; ab = ab_prev) { | |
4253 | if (arc_warm == B_FALSE) | |
4254 | ab_prev = list_next(list, ab); | |
4255 | else | |
4256 | ab_prev = list_prev(list, ab); | |
34dc7c2f BB |
4257 | |
4258 | hash_lock = HDR_LOCK(ab); | |
4259 | have_lock = MUTEX_HELD(hash_lock); | |
4260 | if (!have_lock && !mutex_tryenter(hash_lock)) { | |
4261 | /* | |
4262 | * Skip this buffer rather than waiting. | |
4263 | */ | |
4264 | continue; | |
4265 | } | |
4266 | ||
4267 | passed_sz += ab->b_size; | |
4268 | if (passed_sz > headroom) { | |
4269 | /* | |
4270 | * Searched too far. | |
4271 | */ | |
4272 | mutex_exit(hash_lock); | |
4273 | break; | |
4274 | } | |
4275 | ||
d164b209 | 4276 | if (!l2arc_write_eligible(guid, ab)) { |
34dc7c2f BB |
4277 | mutex_exit(hash_lock); |
4278 | continue; | |
4279 | } | |
4280 | ||
4281 | if ((write_sz + ab->b_size) > target_sz) { | |
4282 | full = B_TRUE; | |
4283 | mutex_exit(hash_lock); | |
4284 | break; | |
4285 | } | |
4286 | ||
34dc7c2f BB |
4287 | if (pio == NULL) { |
4288 | /* | |
4289 | * Insert a dummy header on the buflist so | |
4290 | * l2arc_write_done() can find where the | |
4291 | * write buffers begin without searching. | |
4292 | */ | |
4293 | list_insert_head(dev->l2ad_buflist, head); | |
4294 | ||
4295 | cb = kmem_alloc( | |
4296 | sizeof (l2arc_write_callback_t), KM_SLEEP); | |
4297 | cb->l2wcb_dev = dev; | |
4298 | cb->l2wcb_head = head; | |
4299 | pio = zio_root(spa, l2arc_write_done, cb, | |
4300 | ZIO_FLAG_CANFAIL); | |
4301 | } | |
4302 | ||
4303 | /* | |
4304 | * Create and add a new L2ARC header. | |
4305 | */ | |
4306 | hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); | |
4307 | hdrl2->b_dev = dev; | |
4308 | hdrl2->b_daddr = dev->l2ad_hand; | |
4309 | ||
4310 | ab->b_flags |= ARC_L2_WRITING; | |
4311 | ab->b_l2hdr = hdrl2; | |
4312 | list_insert_head(dev->l2ad_buflist, ab); | |
4313 | buf_data = ab->b_buf->b_data; | |
4314 | buf_sz = ab->b_size; | |
4315 | ||
4316 | /* | |
4317 | * Compute and store the buffer cksum before | |
4318 | * writing. On debug the cksum is verified first. | |
4319 | */ | |
4320 | arc_cksum_verify(ab->b_buf); | |
4321 | arc_cksum_compute(ab->b_buf, B_TRUE); | |
4322 | ||
4323 | mutex_exit(hash_lock); | |
4324 | ||
4325 | wzio = zio_write_phys(pio, dev->l2ad_vdev, | |
4326 | dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, | |
4327 | NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, | |
4328 | ZIO_FLAG_CANFAIL, B_FALSE); | |
4329 | ||
4330 | DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, | |
4331 | zio_t *, wzio); | |
4332 | (void) zio_nowait(wzio); | |
4333 | ||
b128c09f BB |
4334 | /* |
4335 | * Keep the clock hand suitably device-aligned. | |
4336 | */ | |
4337 | buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); | |
4338 | ||
34dc7c2f BB |
4339 | write_sz += buf_sz; |
4340 | dev->l2ad_hand += buf_sz; | |
4341 | } | |
4342 | ||
4343 | mutex_exit(list_lock); | |
4344 | ||
4345 | if (full == B_TRUE) | |
4346 | break; | |
4347 | } | |
4348 | mutex_exit(&l2arc_buflist_mtx); | |
4349 | ||
4350 | if (pio == NULL) { | |
4351 | ASSERT3U(write_sz, ==, 0); | |
4352 | kmem_cache_free(hdr_cache, head); | |
d164b209 | 4353 | return (0); |
34dc7c2f BB |
4354 | } |
4355 | ||
4356 | ASSERT3U(write_sz, <=, target_sz); | |
4357 | ARCSTAT_BUMP(arcstat_l2_writes_sent); | |
d164b209 | 4358 | ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); |
34dc7c2f BB |
4359 | ARCSTAT_INCR(arcstat_l2_size, write_sz); |
4360 | spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); | |
4361 | ||
4362 | /* | |
4363 | * Bump device hand to the device start if it is approaching the end. | |
4364 | * l2arc_evict() will already have evicted ahead for this case. | |
4365 | */ | |
b128c09f | 4366 | if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { |
34dc7c2f BB |
4367 | spa_l2cache_space_update(dev->l2ad_vdev, 0, |
4368 | dev->l2ad_end - dev->l2ad_hand); | |
4369 | dev->l2ad_hand = dev->l2ad_start; | |
4370 | dev->l2ad_evict = dev->l2ad_start; | |
4371 | dev->l2ad_first = B_FALSE; | |
4372 | } | |
4373 | ||
d164b209 | 4374 | dev->l2ad_writing = B_TRUE; |
34dc7c2f | 4375 | (void) zio_wait(pio); |
d164b209 BB |
4376 | dev->l2ad_writing = B_FALSE; |
4377 | ||
4378 | return (write_sz); | |
34dc7c2f BB |
4379 | } |
4380 | ||
4381 | /* | |
4382 | * This thread feeds the L2ARC at regular intervals. This is the beating | |
4383 | * heart of the L2ARC. | |
4384 | */ | |
4385 | static void | |
4386 | l2arc_feed_thread(void) | |
4387 | { | |
4388 | callb_cpr_t cpr; | |
4389 | l2arc_dev_t *dev; | |
4390 | spa_t *spa; | |
d164b209 BB |
4391 | uint64_t size, wrote; |
4392 | clock_t begin, next = lbolt; | |
34dc7c2f BB |
4393 | |
4394 | CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); | |
4395 | ||
4396 | mutex_enter(&l2arc_feed_thr_lock); | |
4397 | ||
4398 | while (l2arc_thread_exit == 0) { | |
34dc7c2f | 4399 | CALLB_CPR_SAFE_BEGIN(&cpr); |
34dc7c2f | 4400 | (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, |
d164b209 | 4401 | next); |
34dc7c2f | 4402 | CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); |
d164b209 | 4403 | next = lbolt + hz; |
34dc7c2f BB |
4404 | |
4405 | /* | |
b128c09f | 4406 | * Quick check for L2ARC devices. |
34dc7c2f BB |
4407 | */ |
4408 | mutex_enter(&l2arc_dev_mtx); | |
4409 | if (l2arc_ndev == 0) { | |
4410 | mutex_exit(&l2arc_dev_mtx); | |
4411 | continue; | |
4412 | } | |
b128c09f | 4413 | mutex_exit(&l2arc_dev_mtx); |
d164b209 | 4414 | begin = lbolt; |
34dc7c2f BB |
4415 | |
4416 | /* | |
b128c09f BB |
4417 | * This selects the next l2arc device to write to, and in |
4418 | * doing so the next spa to feed from: dev->l2ad_spa. This | |
4419 | * will return NULL if there are now no l2arc devices or if | |
4420 | * they are all faulted. | |
4421 | * | |
4422 | * If a device is returned, its spa's config lock is also | |
4423 | * held to prevent device removal. l2arc_dev_get_next() | |
4424 | * will grab and release l2arc_dev_mtx. | |
34dc7c2f | 4425 | */ |
b128c09f | 4426 | if ((dev = l2arc_dev_get_next()) == NULL) |
34dc7c2f | 4427 | continue; |
b128c09f BB |
4428 | |
4429 | spa = dev->l2ad_spa; | |
4430 | ASSERT(spa != NULL); | |
34dc7c2f BB |
4431 | |
4432 | /* | |
b128c09f | 4433 | * Avoid contributing to memory pressure. |
34dc7c2f | 4434 | */ |
b128c09f BB |
4435 | if (arc_reclaim_needed()) { |
4436 | ARCSTAT_BUMP(arcstat_l2_abort_lowmem); | |
4437 | spa_config_exit(spa, SCL_L2ARC, dev); | |
34dc7c2f BB |
4438 | continue; |
4439 | } | |
b128c09f | 4440 | |
34dc7c2f BB |
4441 | ARCSTAT_BUMP(arcstat_l2_feeds); |
4442 | ||
d164b209 | 4443 | size = l2arc_write_size(dev); |
b128c09f | 4444 | |
34dc7c2f BB |
4445 | /* |
4446 | * Evict L2ARC buffers that will be overwritten. | |
4447 | */ | |
b128c09f | 4448 | l2arc_evict(dev, size, B_FALSE); |
34dc7c2f BB |
4449 | |
4450 | /* | |
4451 | * Write ARC buffers. | |
4452 | */ | |
d164b209 BB |
4453 | wrote = l2arc_write_buffers(spa, dev, size); |
4454 | ||
4455 | /* | |
4456 | * Calculate interval between writes. | |
4457 | */ | |
4458 | next = l2arc_write_interval(begin, size, wrote); | |
b128c09f | 4459 | spa_config_exit(spa, SCL_L2ARC, dev); |
34dc7c2f BB |
4460 | } |
4461 | ||
4462 | l2arc_thread_exit = 0; | |
4463 | cv_broadcast(&l2arc_feed_thr_cv); | |
4464 | CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ | |
4465 | thread_exit(); | |
4466 | } | |
4467 | ||
b128c09f BB |
4468 | boolean_t |
4469 | l2arc_vdev_present(vdev_t *vd) | |
4470 | { | |
4471 | l2arc_dev_t *dev; | |
4472 | ||
4473 | mutex_enter(&l2arc_dev_mtx); | |
4474 | for (dev = list_head(l2arc_dev_list); dev != NULL; | |
4475 | dev = list_next(l2arc_dev_list, dev)) { | |
4476 | if (dev->l2ad_vdev == vd) | |
4477 | break; | |
4478 | } | |
4479 | mutex_exit(&l2arc_dev_mtx); | |
4480 | ||
4481 | return (dev != NULL); | |
4482 | } | |
4483 | ||
34dc7c2f BB |
4484 | /* |
4485 | * Add a vdev for use by the L2ARC. By this point the spa has already | |
4486 | * validated the vdev and opened it. | |
4487 | */ | |
4488 | void | |
4489 | l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) | |
4490 | { | |
4491 | l2arc_dev_t *adddev; | |
4492 | ||
b128c09f BB |
4493 | ASSERT(!l2arc_vdev_present(vd)); |
4494 | ||
34dc7c2f BB |
4495 | /* |
4496 | * Create a new l2arc device entry. | |
4497 | */ | |
4498 | adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); | |
4499 | adddev->l2ad_spa = spa; | |
4500 | adddev->l2ad_vdev = vd; | |
4501 | adddev->l2ad_write = l2arc_write_max; | |
b128c09f | 4502 | adddev->l2ad_boost = l2arc_write_boost; |
34dc7c2f BB |
4503 | adddev->l2ad_start = start; |
4504 | adddev->l2ad_end = end; | |
4505 | adddev->l2ad_hand = adddev->l2ad_start; | |
4506 | adddev->l2ad_evict = adddev->l2ad_start; | |
4507 | adddev->l2ad_first = B_TRUE; | |
d164b209 | 4508 | adddev->l2ad_writing = B_FALSE; |
34dc7c2f BB |
4509 | ASSERT3U(adddev->l2ad_write, >, 0); |
4510 | ||
4511 | /* | |
4512 | * This is a list of all ARC buffers that are still valid on the | |
4513 | * device. | |
4514 | */ | |
4515 | adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); | |
4516 | list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), | |
4517 | offsetof(arc_buf_hdr_t, b_l2node)); | |
4518 | ||
4519 | spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); | |
4520 | ||
4521 | /* | |
4522 | * Add device to global list | |
4523 | */ | |
4524 | mutex_enter(&l2arc_dev_mtx); | |
4525 | list_insert_head(l2arc_dev_list, adddev); | |
4526 | atomic_inc_64(&l2arc_ndev); | |
4527 | mutex_exit(&l2arc_dev_mtx); | |
4528 | } | |
4529 | ||
4530 | /* | |
4531 | * Remove a vdev from the L2ARC. | |
4532 | */ | |
4533 | void | |
4534 | l2arc_remove_vdev(vdev_t *vd) | |
4535 | { | |
4536 | l2arc_dev_t *dev, *nextdev, *remdev = NULL; | |
4537 | ||
34dc7c2f BB |
4538 | /* |
4539 | * Find the device by vdev | |
4540 | */ | |
4541 | mutex_enter(&l2arc_dev_mtx); | |
4542 | for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { | |
4543 | nextdev = list_next(l2arc_dev_list, dev); | |
4544 | if (vd == dev->l2ad_vdev) { | |
4545 | remdev = dev; | |
4546 | break; | |
4547 | } | |
4548 | } | |
4549 | ASSERT(remdev != NULL); | |
4550 | ||
4551 | /* | |
4552 | * Remove device from global list | |
4553 | */ | |
4554 | list_remove(l2arc_dev_list, remdev); | |
4555 | l2arc_dev_last = NULL; /* may have been invalidated */ | |
b128c09f BB |
4556 | atomic_dec_64(&l2arc_ndev); |
4557 | mutex_exit(&l2arc_dev_mtx); | |
34dc7c2f BB |
4558 | |
4559 | /* | |
4560 | * Clear all buflists and ARC references. L2ARC device flush. | |
4561 | */ | |
4562 | l2arc_evict(remdev, 0, B_TRUE); | |
4563 | list_destroy(remdev->l2ad_buflist); | |
4564 | kmem_free(remdev->l2ad_buflist, sizeof (list_t)); | |
4565 | kmem_free(remdev, sizeof (l2arc_dev_t)); | |
34dc7c2f BB |
4566 | } |
4567 | ||
4568 | void | |
b128c09f | 4569 | l2arc_init(void) |
34dc7c2f BB |
4570 | { |
4571 | l2arc_thread_exit = 0; | |
4572 | l2arc_ndev = 0; | |
4573 | l2arc_writes_sent = 0; | |
4574 | l2arc_writes_done = 0; | |
4575 | ||
4576 | mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
4577 | cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); | |
4578 | mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4579 | mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4580 | mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4581 | ||
4582 | l2arc_dev_list = &L2ARC_dev_list; | |
4583 | l2arc_free_on_write = &L2ARC_free_on_write; | |
4584 | list_create(l2arc_dev_list, sizeof (l2arc_dev_t), | |
4585 | offsetof(l2arc_dev_t, l2ad_node)); | |
4586 | list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), | |
4587 | offsetof(l2arc_data_free_t, l2df_list_node)); | |
34dc7c2f BB |
4588 | } |
4589 | ||
4590 | void | |
b128c09f | 4591 | l2arc_fini(void) |
34dc7c2f | 4592 | { |
b128c09f BB |
4593 | /* |
4594 | * This is called from dmu_fini(), which is called from spa_fini(); | |
4595 | * Because of this, we can assume that all l2arc devices have | |
4596 | * already been removed when the pools themselves were removed. | |
4597 | */ | |
4598 | ||
4599 | l2arc_do_free_on_write(); | |
34dc7c2f BB |
4600 | |
4601 | mutex_destroy(&l2arc_feed_thr_lock); | |
4602 | cv_destroy(&l2arc_feed_thr_cv); | |
4603 | mutex_destroy(&l2arc_dev_mtx); | |
4604 | mutex_destroy(&l2arc_buflist_mtx); | |
4605 | mutex_destroy(&l2arc_free_on_write_mtx); | |
4606 | ||
4607 | list_destroy(l2arc_dev_list); | |
4608 | list_destroy(l2arc_free_on_write); | |
4609 | } | |
b128c09f BB |
4610 | |
4611 | void | |
4612 | l2arc_start(void) | |
4613 | { | |
fb5f0bc8 | 4614 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
4615 | return; |
4616 | ||
4617 | (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, | |
4618 | TS_RUN, minclsyspri); | |
4619 | } | |
4620 | ||
4621 | void | |
4622 | l2arc_stop(void) | |
4623 | { | |
fb5f0bc8 | 4624 | if (!(spa_mode_global & FWRITE)) |
b128c09f BB |
4625 | return; |
4626 | ||
4627 | mutex_enter(&l2arc_feed_thr_lock); | |
4628 | cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ | |
4629 | l2arc_thread_exit = 1; | |
4630 | while (l2arc_thread_exit != 0) | |
4631 | cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); | |
4632 | mutex_exit(&l2arc_feed_thr_lock); | |
4633 | } |