]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright 2008 Sun Microsystems, Inc. All rights reserved. | |
23 | * Use is subject to license terms. | |
24 | */ | |
25 | ||
26 | #pragma ident "@(#)arc.c 1.44 08/03/20 SMI" | |
27 | ||
28 | /* | |
29 | * DVA-based Adjustable Replacement Cache | |
30 | * | |
31 | * While much of the theory of operation used here is | |
32 | * based on the self-tuning, low overhead replacement cache | |
33 | * presented by Megiddo and Modha at FAST 2003, there are some | |
34 | * significant differences: | |
35 | * | |
36 | * 1. The Megiddo and Modha model assumes any page is evictable. | |
37 | * Pages in its cache cannot be "locked" into memory. This makes | |
38 | * the eviction algorithm simple: evict the last page in the list. | |
39 | * This also make the performance characteristics easy to reason | |
40 | * about. Our cache is not so simple. At any given moment, some | |
41 | * subset of the blocks in the cache are un-evictable because we | |
42 | * have handed out a reference to them. Blocks are only evictable | |
43 | * when there are no external references active. This makes | |
44 | * eviction far more problematic: we choose to evict the evictable | |
45 | * blocks that are the "lowest" in the list. | |
46 | * | |
47 | * There are times when it is not possible to evict the requested | |
48 | * space. In these circumstances we are unable to adjust the cache | |
49 | * size. To prevent the cache growing unbounded at these times we | |
50 | * implement a "cache throttle" that slows the flow of new data | |
51 | * into the cache until we can make space available. | |
52 | * | |
53 | * 2. The Megiddo and Modha model assumes a fixed cache size. | |
54 | * Pages are evicted when the cache is full and there is a cache | |
55 | * miss. Our model has a variable sized cache. It grows with | |
56 | * high use, but also tries to react to memory pressure from the | |
57 | * operating system: decreasing its size when system memory is | |
58 | * tight. | |
59 | * | |
60 | * 3. The Megiddo and Modha model assumes a fixed page size. All | |
61 | * elements of the cache are therefor exactly the same size. So | |
62 | * when adjusting the cache size following a cache miss, its simply | |
63 | * a matter of choosing a single page to evict. In our model, we | |
64 | * have variable sized cache blocks (rangeing from 512 bytes to | |
65 | * 128K bytes). We therefor choose a set of blocks to evict to make | |
66 | * space for a cache miss that approximates as closely as possible | |
67 | * the space used by the new block. | |
68 | * | |
69 | * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" | |
70 | * by N. Megiddo & D. Modha, FAST 2003 | |
71 | */ | |
72 | ||
73 | /* | |
74 | * The locking model: | |
75 | * | |
76 | * A new reference to a cache buffer can be obtained in two | |
77 | * ways: 1) via a hash table lookup using the DVA as a key, | |
78 | * or 2) via one of the ARC lists. The arc_read() interface | |
79 | * uses method 1, while the internal arc algorithms for | |
80 | * adjusting the cache use method 2. We therefor provide two | |
81 | * types of locks: 1) the hash table lock array, and 2) the | |
82 | * arc list locks. | |
83 | * | |
84 | * Buffers do not have their own mutexs, rather they rely on the | |
85 | * hash table mutexs for the bulk of their protection (i.e. most | |
86 | * fields in the arc_buf_hdr_t are protected by these mutexs). | |
87 | * | |
88 | * buf_hash_find() returns the appropriate mutex (held) when it | |
89 | * locates the requested buffer in the hash table. It returns | |
90 | * NULL for the mutex if the buffer was not in the table. | |
91 | * | |
92 | * buf_hash_remove() expects the appropriate hash mutex to be | |
93 | * already held before it is invoked. | |
94 | * | |
95 | * Each arc state also has a mutex which is used to protect the | |
96 | * buffer list associated with the state. When attempting to | |
97 | * obtain a hash table lock while holding an arc list lock you | |
98 | * must use: mutex_tryenter() to avoid deadlock. Also note that | |
99 | * the active state mutex must be held before the ghost state mutex. | |
100 | * | |
101 | * Arc buffers may have an associated eviction callback function. | |
102 | * This function will be invoked prior to removing the buffer (e.g. | |
103 | * in arc_do_user_evicts()). Note however that the data associated | |
104 | * with the buffer may be evicted prior to the callback. The callback | |
105 | * must be made with *no locks held* (to prevent deadlock). Additionally, | |
106 | * the users of callbacks must ensure that their private data is | |
107 | * protected from simultaneous callbacks from arc_buf_evict() | |
108 | * and arc_do_user_evicts(). | |
109 | * | |
110 | * Note that the majority of the performance stats are manipulated | |
111 | * with atomic operations. | |
112 | * | |
113 | * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: | |
114 | * | |
115 | * - L2ARC buflist creation | |
116 | * - L2ARC buflist eviction | |
117 | * - L2ARC write completion, which walks L2ARC buflists | |
118 | * - ARC header destruction, as it removes from L2ARC buflists | |
119 | * - ARC header release, as it removes from L2ARC buflists | |
120 | */ | |
121 | ||
122 | #include <sys/spa.h> | |
123 | #include <sys/zio.h> | |
124 | #include <sys/zio_checksum.h> | |
125 | #include <sys/zfs_context.h> | |
126 | #include <sys/arc.h> | |
127 | #include <sys/refcount.h> | |
128 | #ifdef _KERNEL | |
129 | #include <sys/vmsystm.h> | |
130 | #include <vm/anon.h> | |
131 | #include <sys/fs/swapnode.h> | |
132 | #include <sys/dnlc.h> | |
133 | #endif | |
134 | #include <sys/callb.h> | |
135 | #include <sys/kstat.h> | |
136 | ||
137 | static kmutex_t arc_reclaim_thr_lock; | |
138 | static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ | |
139 | static uint8_t arc_thread_exit; | |
140 | ||
141 | extern int zfs_write_limit_shift; | |
142 | extern uint64_t zfs_write_limit_max; | |
143 | extern uint64_t zfs_write_limit_inflated; | |
144 | ||
145 | #define ARC_REDUCE_DNLC_PERCENT 3 | |
146 | uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; | |
147 | ||
148 | typedef enum arc_reclaim_strategy { | |
149 | ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ | |
150 | ARC_RECLAIM_CONS /* Conservative reclaim strategy */ | |
151 | } arc_reclaim_strategy_t; | |
152 | ||
153 | /* number of seconds before growing cache again */ | |
154 | static int arc_grow_retry = 60; | |
155 | ||
156 | /* | |
157 | * minimum lifespan of a prefetch block in clock ticks | |
158 | * (initialized in arc_init()) | |
159 | */ | |
160 | static int arc_min_prefetch_lifespan; | |
161 | ||
162 | static int arc_dead; | |
163 | ||
164 | /* | |
165 | * These tunables are for performance analysis. | |
166 | */ | |
167 | uint64_t zfs_arc_max; | |
168 | uint64_t zfs_arc_min; | |
169 | uint64_t zfs_arc_meta_limit = 0; | |
170 | ||
171 | /* | |
172 | * Note that buffers can be in one of 6 states: | |
173 | * ARC_anon - anonymous (discussed below) | |
174 | * ARC_mru - recently used, currently cached | |
175 | * ARC_mru_ghost - recentely used, no longer in cache | |
176 | * ARC_mfu - frequently used, currently cached | |
177 | * ARC_mfu_ghost - frequently used, no longer in cache | |
178 | * ARC_l2c_only - exists in L2ARC but not other states | |
179 | * When there are no active references to the buffer, they are | |
180 | * are linked onto a list in one of these arc states. These are | |
181 | * the only buffers that can be evicted or deleted. Within each | |
182 | * state there are multiple lists, one for meta-data and one for | |
183 | * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, | |
184 | * etc.) is tracked separately so that it can be managed more | |
185 | * explicitly: favored over data, limited explicitly. | |
186 | * | |
187 | * Anonymous buffers are buffers that are not associated with | |
188 | * a DVA. These are buffers that hold dirty block copies | |
189 | * before they are written to stable storage. By definition, | |
190 | * they are "ref'd" and are considered part of arc_mru | |
191 | * that cannot be freed. Generally, they will aquire a DVA | |
192 | * as they are written and migrate onto the arc_mru list. | |
193 | * | |
194 | * The ARC_l2c_only state is for buffers that are in the second | |
195 | * level ARC but no longer in any of the ARC_m* lists. The second | |
196 | * level ARC itself may also contain buffers that are in any of | |
197 | * the ARC_m* states - meaning that a buffer can exist in two | |
198 | * places. The reason for the ARC_l2c_only state is to keep the | |
199 | * buffer header in the hash table, so that reads that hit the | |
200 | * second level ARC benefit from these fast lookups. | |
201 | */ | |
202 | ||
203 | typedef struct arc_state { | |
204 | list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ | |
205 | uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ | |
206 | uint64_t arcs_size; /* total amount of data in this state */ | |
207 | kmutex_t arcs_mtx; | |
208 | } arc_state_t; | |
209 | ||
210 | /* The 6 states: */ | |
211 | static arc_state_t ARC_anon; | |
212 | static arc_state_t ARC_mru; | |
213 | static arc_state_t ARC_mru_ghost; | |
214 | static arc_state_t ARC_mfu; | |
215 | static arc_state_t ARC_mfu_ghost; | |
216 | static arc_state_t ARC_l2c_only; | |
217 | ||
218 | typedef struct arc_stats { | |
219 | kstat_named_t arcstat_hits; | |
220 | kstat_named_t arcstat_misses; | |
221 | kstat_named_t arcstat_demand_data_hits; | |
222 | kstat_named_t arcstat_demand_data_misses; | |
223 | kstat_named_t arcstat_demand_metadata_hits; | |
224 | kstat_named_t arcstat_demand_metadata_misses; | |
225 | kstat_named_t arcstat_prefetch_data_hits; | |
226 | kstat_named_t arcstat_prefetch_data_misses; | |
227 | kstat_named_t arcstat_prefetch_metadata_hits; | |
228 | kstat_named_t arcstat_prefetch_metadata_misses; | |
229 | kstat_named_t arcstat_mru_hits; | |
230 | kstat_named_t arcstat_mru_ghost_hits; | |
231 | kstat_named_t arcstat_mfu_hits; | |
232 | kstat_named_t arcstat_mfu_ghost_hits; | |
233 | kstat_named_t arcstat_deleted; | |
234 | kstat_named_t arcstat_recycle_miss; | |
235 | kstat_named_t arcstat_mutex_miss; | |
236 | kstat_named_t arcstat_evict_skip; | |
237 | kstat_named_t arcstat_hash_elements; | |
238 | kstat_named_t arcstat_hash_elements_max; | |
239 | kstat_named_t arcstat_hash_collisions; | |
240 | kstat_named_t arcstat_hash_chains; | |
241 | kstat_named_t arcstat_hash_chain_max; | |
242 | kstat_named_t arcstat_p; | |
243 | kstat_named_t arcstat_c; | |
244 | kstat_named_t arcstat_c_min; | |
245 | kstat_named_t arcstat_c_max; | |
246 | kstat_named_t arcstat_size; | |
247 | kstat_named_t arcstat_hdr_size; | |
248 | kstat_named_t arcstat_l2_hits; | |
249 | kstat_named_t arcstat_l2_misses; | |
250 | kstat_named_t arcstat_l2_feeds; | |
251 | kstat_named_t arcstat_l2_rw_clash; | |
252 | kstat_named_t arcstat_l2_writes_sent; | |
253 | kstat_named_t arcstat_l2_writes_done; | |
254 | kstat_named_t arcstat_l2_writes_error; | |
255 | kstat_named_t arcstat_l2_writes_hdr_miss; | |
256 | kstat_named_t arcstat_l2_evict_lock_retry; | |
257 | kstat_named_t arcstat_l2_evict_reading; | |
258 | kstat_named_t arcstat_l2_free_on_write; | |
259 | kstat_named_t arcstat_l2_abort_lowmem; | |
260 | kstat_named_t arcstat_l2_cksum_bad; | |
261 | kstat_named_t arcstat_l2_io_error; | |
262 | kstat_named_t arcstat_l2_size; | |
263 | kstat_named_t arcstat_l2_hdr_size; | |
264 | kstat_named_t arcstat_memory_throttle_count; | |
265 | } arc_stats_t; | |
266 | ||
267 | static arc_stats_t arc_stats = { | |
268 | { "hits", KSTAT_DATA_UINT64 }, | |
269 | { "misses", KSTAT_DATA_UINT64 }, | |
270 | { "demand_data_hits", KSTAT_DATA_UINT64 }, | |
271 | { "demand_data_misses", KSTAT_DATA_UINT64 }, | |
272 | { "demand_metadata_hits", KSTAT_DATA_UINT64 }, | |
273 | { "demand_metadata_misses", KSTAT_DATA_UINT64 }, | |
274 | { "prefetch_data_hits", KSTAT_DATA_UINT64 }, | |
275 | { "prefetch_data_misses", KSTAT_DATA_UINT64 }, | |
276 | { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, | |
277 | { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, | |
278 | { "mru_hits", KSTAT_DATA_UINT64 }, | |
279 | { "mru_ghost_hits", KSTAT_DATA_UINT64 }, | |
280 | { "mfu_hits", KSTAT_DATA_UINT64 }, | |
281 | { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, | |
282 | { "deleted", KSTAT_DATA_UINT64 }, | |
283 | { "recycle_miss", KSTAT_DATA_UINT64 }, | |
284 | { "mutex_miss", KSTAT_DATA_UINT64 }, | |
285 | { "evict_skip", KSTAT_DATA_UINT64 }, | |
286 | { "hash_elements", KSTAT_DATA_UINT64 }, | |
287 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
288 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
289 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
290 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
291 | { "p", KSTAT_DATA_UINT64 }, | |
292 | { "c", KSTAT_DATA_UINT64 }, | |
293 | { "c_min", KSTAT_DATA_UINT64 }, | |
294 | { "c_max", KSTAT_DATA_UINT64 }, | |
295 | { "size", KSTAT_DATA_UINT64 }, | |
296 | { "hdr_size", KSTAT_DATA_UINT64 }, | |
297 | { "l2_hits", KSTAT_DATA_UINT64 }, | |
298 | { "l2_misses", KSTAT_DATA_UINT64 }, | |
299 | { "l2_feeds", KSTAT_DATA_UINT64 }, | |
300 | { "l2_rw_clash", KSTAT_DATA_UINT64 }, | |
301 | { "l2_writes_sent", KSTAT_DATA_UINT64 }, | |
302 | { "l2_writes_done", KSTAT_DATA_UINT64 }, | |
303 | { "l2_writes_error", KSTAT_DATA_UINT64 }, | |
304 | { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, | |
305 | { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, | |
306 | { "l2_evict_reading", KSTAT_DATA_UINT64 }, | |
307 | { "l2_free_on_write", KSTAT_DATA_UINT64 }, | |
308 | { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, | |
309 | { "l2_cksum_bad", KSTAT_DATA_UINT64 }, | |
310 | { "l2_io_error", KSTAT_DATA_UINT64 }, | |
311 | { "l2_size", KSTAT_DATA_UINT64 }, | |
312 | { "l2_hdr_size", KSTAT_DATA_UINT64 }, | |
313 | { "memory_throttle_count", KSTAT_DATA_UINT64 } | |
314 | }; | |
315 | ||
316 | #define ARCSTAT(stat) (arc_stats.stat.value.ui64) | |
317 | ||
318 | #define ARCSTAT_INCR(stat, val) \ | |
319 | atomic_add_64(&arc_stats.stat.value.ui64, (val)); | |
320 | ||
321 | #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) | |
322 | #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) | |
323 | ||
324 | #define ARCSTAT_MAX(stat, val) { \ | |
325 | uint64_t m; \ | |
326 | while ((val) > (m = arc_stats.stat.value.ui64) && \ | |
327 | (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ | |
328 | continue; \ | |
329 | } | |
330 | ||
331 | #define ARCSTAT_MAXSTAT(stat) \ | |
332 | ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) | |
333 | ||
334 | /* | |
335 | * We define a macro to allow ARC hits/misses to be easily broken down by | |
336 | * two separate conditions, giving a total of four different subtypes for | |
337 | * each of hits and misses (so eight statistics total). | |
338 | */ | |
339 | #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ | |
340 | if (cond1) { \ | |
341 | if (cond2) { \ | |
342 | ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ | |
343 | } else { \ | |
344 | ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ | |
345 | } \ | |
346 | } else { \ | |
347 | if (cond2) { \ | |
348 | ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ | |
349 | } else { \ | |
350 | ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ | |
351 | } \ | |
352 | } | |
353 | ||
354 | kstat_t *arc_ksp; | |
355 | static arc_state_t *arc_anon; | |
356 | static arc_state_t *arc_mru; | |
357 | static arc_state_t *arc_mru_ghost; | |
358 | static arc_state_t *arc_mfu; | |
359 | static arc_state_t *arc_mfu_ghost; | |
360 | static arc_state_t *arc_l2c_only; | |
361 | ||
362 | /* | |
363 | * There are several ARC variables that are critical to export as kstats -- | |
364 | * but we don't want to have to grovel around in the kstat whenever we wish to | |
365 | * manipulate them. For these variables, we therefore define them to be in | |
366 | * terms of the statistic variable. This assures that we are not introducing | |
367 | * the possibility of inconsistency by having shadow copies of the variables, | |
368 | * while still allowing the code to be readable. | |
369 | */ | |
370 | #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ | |
371 | #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ | |
372 | #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ | |
373 | #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ | |
374 | #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ | |
375 | ||
376 | static int arc_no_grow; /* Don't try to grow cache size */ | |
377 | static uint64_t arc_tempreserve; | |
378 | static uint64_t arc_meta_used; | |
379 | static uint64_t arc_meta_limit; | |
380 | static uint64_t arc_meta_max = 0; | |
381 | ||
382 | typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; | |
383 | ||
384 | typedef struct arc_callback arc_callback_t; | |
385 | ||
386 | struct arc_callback { | |
387 | void *acb_private; | |
388 | arc_done_func_t *acb_done; | |
389 | arc_byteswap_func_t *acb_byteswap; | |
390 | arc_buf_t *acb_buf; | |
391 | zio_t *acb_zio_dummy; | |
392 | arc_callback_t *acb_next; | |
393 | }; | |
394 | ||
395 | typedef struct arc_write_callback arc_write_callback_t; | |
396 | ||
397 | struct arc_write_callback { | |
398 | void *awcb_private; | |
399 | arc_done_func_t *awcb_ready; | |
400 | arc_done_func_t *awcb_done; | |
401 | arc_buf_t *awcb_buf; | |
402 | }; | |
403 | ||
404 | struct arc_buf_hdr { | |
405 | /* protected by hash lock */ | |
406 | dva_t b_dva; | |
407 | uint64_t b_birth; | |
408 | uint64_t b_cksum0; | |
409 | ||
410 | kmutex_t b_freeze_lock; | |
411 | zio_cksum_t *b_freeze_cksum; | |
412 | ||
413 | arc_buf_hdr_t *b_hash_next; | |
414 | arc_buf_t *b_buf; | |
415 | uint32_t b_flags; | |
416 | uint32_t b_datacnt; | |
417 | ||
418 | arc_callback_t *b_acb; | |
419 | kcondvar_t b_cv; | |
420 | ||
421 | /* immutable */ | |
422 | arc_buf_contents_t b_type; | |
423 | uint64_t b_size; | |
424 | spa_t *b_spa; | |
425 | ||
426 | /* protected by arc state mutex */ | |
427 | arc_state_t *b_state; | |
428 | list_node_t b_arc_node; | |
429 | ||
430 | /* updated atomically */ | |
431 | clock_t b_arc_access; | |
432 | ||
433 | /* self protecting */ | |
434 | refcount_t b_refcnt; | |
435 | ||
436 | l2arc_buf_hdr_t *b_l2hdr; | |
437 | list_node_t b_l2node; | |
438 | }; | |
439 | ||
440 | static arc_buf_t *arc_eviction_list; | |
441 | static kmutex_t arc_eviction_mtx; | |
442 | static arc_buf_hdr_t arc_eviction_hdr; | |
443 | static void arc_get_data_buf(arc_buf_t *buf); | |
444 | static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); | |
445 | static int arc_evict_needed(arc_buf_contents_t type); | |
446 | static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); | |
447 | ||
448 | #define GHOST_STATE(state) \ | |
449 | ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ | |
450 | (state) == arc_l2c_only) | |
451 | ||
452 | /* | |
453 | * Private ARC flags. These flags are private ARC only flags that will show up | |
454 | * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can | |
455 | * be passed in as arc_flags in things like arc_read. However, these flags | |
456 | * should never be passed and should only be set by ARC code. When adding new | |
457 | * public flags, make sure not to smash the private ones. | |
458 | */ | |
459 | ||
460 | #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ | |
461 | #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ | |
462 | #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ | |
463 | #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ | |
464 | #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ | |
465 | #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ | |
466 | #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ | |
467 | #define ARC_DONT_L2CACHE (1 << 16) /* originated by prefetch */ | |
468 | #define ARC_L2_READING (1 << 17) /* L2ARC read in progress */ | |
469 | #define ARC_L2_WRITING (1 << 18) /* L2ARC write in progress */ | |
470 | #define ARC_L2_EVICTED (1 << 19) /* evicted during I/O */ | |
471 | #define ARC_L2_WRITE_HEAD (1 << 20) /* head of write list */ | |
472 | ||
473 | #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) | |
474 | #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) | |
475 | #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) | |
476 | #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) | |
477 | #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) | |
478 | #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) | |
479 | #define HDR_DONT_L2CACHE(hdr) ((hdr)->b_flags & ARC_DONT_L2CACHE) | |
480 | #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_L2_READING) | |
481 | #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) | |
482 | #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) | |
483 | #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) | |
484 | ||
485 | /* | |
486 | * Other sizes | |
487 | */ | |
488 | ||
489 | #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) | |
490 | #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) | |
491 | ||
492 | /* | |
493 | * Hash table routines | |
494 | */ | |
495 | ||
496 | #define HT_LOCK_PAD 64 | |
497 | ||
498 | struct ht_lock { | |
499 | kmutex_t ht_lock; | |
500 | #ifdef _KERNEL | |
501 | unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; | |
502 | #endif | |
503 | }; | |
504 | ||
505 | #define BUF_LOCKS 256 | |
506 | typedef struct buf_hash_table { | |
507 | uint64_t ht_mask; | |
508 | arc_buf_hdr_t **ht_table; | |
509 | struct ht_lock ht_locks[BUF_LOCKS]; | |
510 | } buf_hash_table_t; | |
511 | ||
512 | static buf_hash_table_t buf_hash_table; | |
513 | ||
514 | #define BUF_HASH_INDEX(spa, dva, birth) \ | |
515 | (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) | |
516 | #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) | |
517 | #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) | |
518 | #define HDR_LOCK(buf) \ | |
519 | (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) | |
520 | ||
521 | uint64_t zfs_crc64_table[256]; | |
522 | ||
523 | /* | |
524 | * Level 2 ARC | |
525 | */ | |
526 | ||
527 | #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ | |
528 | #define L2ARC_HEADROOM 4 /* num of writes */ | |
529 | #define L2ARC_FEED_DELAY 180 /* starting grace */ | |
530 | #define L2ARC_FEED_SECS 1 /* caching interval */ | |
531 | ||
532 | #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) | |
533 | #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) | |
534 | ||
535 | /* | |
536 | * L2ARC Performance Tunables | |
537 | */ | |
538 | uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ | |
539 | uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ | |
540 | uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ | |
541 | boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ | |
542 | ||
543 | /* | |
544 | * L2ARC Internals | |
545 | */ | |
546 | typedef struct l2arc_dev { | |
547 | vdev_t *l2ad_vdev; /* vdev */ | |
548 | spa_t *l2ad_spa; /* spa */ | |
549 | uint64_t l2ad_hand; /* next write location */ | |
550 | uint64_t l2ad_write; /* desired write size, bytes */ | |
551 | uint64_t l2ad_start; /* first addr on device */ | |
552 | uint64_t l2ad_end; /* last addr on device */ | |
553 | uint64_t l2ad_evict; /* last addr eviction reached */ | |
554 | boolean_t l2ad_first; /* first sweep through */ | |
555 | list_t *l2ad_buflist; /* buffer list */ | |
556 | list_node_t l2ad_node; /* device list node */ | |
557 | } l2arc_dev_t; | |
558 | ||
559 | static list_t L2ARC_dev_list; /* device list */ | |
560 | static list_t *l2arc_dev_list; /* device list pointer */ | |
561 | static kmutex_t l2arc_dev_mtx; /* device list mutex */ | |
562 | static l2arc_dev_t *l2arc_dev_last; /* last device used */ | |
563 | static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ | |
564 | static list_t L2ARC_free_on_write; /* free after write buf list */ | |
565 | static list_t *l2arc_free_on_write; /* free after write list ptr */ | |
566 | static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ | |
567 | static uint64_t l2arc_ndev; /* number of devices */ | |
568 | ||
569 | typedef struct l2arc_read_callback { | |
570 | arc_buf_t *l2rcb_buf; /* read buffer */ | |
571 | spa_t *l2rcb_spa; /* spa */ | |
572 | blkptr_t l2rcb_bp; /* original blkptr */ | |
573 | zbookmark_t l2rcb_zb; /* original bookmark */ | |
574 | int l2rcb_flags; /* original flags */ | |
575 | } l2arc_read_callback_t; | |
576 | ||
577 | typedef struct l2arc_write_callback { | |
578 | l2arc_dev_t *l2wcb_dev; /* device info */ | |
579 | arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ | |
580 | } l2arc_write_callback_t; | |
581 | ||
582 | struct l2arc_buf_hdr { | |
583 | /* protected by arc_buf_hdr mutex */ | |
584 | l2arc_dev_t *b_dev; /* L2ARC device */ | |
585 | daddr_t b_daddr; /* disk address, offset byte */ | |
586 | }; | |
587 | ||
588 | typedef struct l2arc_data_free { | |
589 | /* protected by l2arc_free_on_write_mtx */ | |
590 | void *l2df_data; | |
591 | size_t l2df_size; | |
592 | void (*l2df_func)(void *, size_t); | |
593 | list_node_t l2df_list_node; | |
594 | } l2arc_data_free_t; | |
595 | ||
596 | static kmutex_t l2arc_feed_thr_lock; | |
597 | static kcondvar_t l2arc_feed_thr_cv; | |
598 | static uint8_t l2arc_thread_exit; | |
599 | ||
600 | static void l2arc_read_done(zio_t *zio); | |
601 | static void l2arc_hdr_stat_add(void); | |
602 | static void l2arc_hdr_stat_remove(void); | |
603 | ||
604 | static uint64_t | |
605 | buf_hash(spa_t *spa, dva_t *dva, uint64_t birth) | |
606 | { | |
607 | uintptr_t spav = (uintptr_t)spa; | |
608 | uint8_t *vdva = (uint8_t *)dva; | |
609 | uint64_t crc = -1ULL; | |
610 | int i; | |
611 | ||
612 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
613 | ||
614 | for (i = 0; i < sizeof (dva_t); i++) | |
615 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; | |
616 | ||
617 | crc ^= (spav>>8) ^ birth; | |
618 | ||
619 | return (crc); | |
620 | } | |
621 | ||
622 | #define BUF_EMPTY(buf) \ | |
623 | ((buf)->b_dva.dva_word[0] == 0 && \ | |
624 | (buf)->b_dva.dva_word[1] == 0 && \ | |
625 | (buf)->b_birth == 0) | |
626 | ||
627 | #define BUF_EQUAL(spa, dva, birth, buf) \ | |
628 | ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ | |
629 | ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ | |
630 | ((buf)->b_birth == birth) && ((buf)->b_spa == spa) | |
631 | ||
632 | static arc_buf_hdr_t * | |
633 | buf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) | |
634 | { | |
635 | uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); | |
636 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
637 | arc_buf_hdr_t *buf; | |
638 | ||
639 | mutex_enter(hash_lock); | |
640 | for (buf = buf_hash_table.ht_table[idx]; buf != NULL; | |
641 | buf = buf->b_hash_next) { | |
642 | if (BUF_EQUAL(spa, dva, birth, buf)) { | |
643 | *lockp = hash_lock; | |
644 | return (buf); | |
645 | } | |
646 | } | |
647 | mutex_exit(hash_lock); | |
648 | *lockp = NULL; | |
649 | return (NULL); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Insert an entry into the hash table. If there is already an element | |
654 | * equal to elem in the hash table, then the already existing element | |
655 | * will be returned and the new element will not be inserted. | |
656 | * Otherwise returns NULL. | |
657 | */ | |
658 | static arc_buf_hdr_t * | |
659 | buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) | |
660 | { | |
661 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
662 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
663 | arc_buf_hdr_t *fbuf; | |
664 | uint32_t i; | |
665 | ||
666 | ASSERT(!HDR_IN_HASH_TABLE(buf)); | |
667 | *lockp = hash_lock; | |
668 | mutex_enter(hash_lock); | |
669 | for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; | |
670 | fbuf = fbuf->b_hash_next, i++) { | |
671 | if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) | |
672 | return (fbuf); | |
673 | } | |
674 | ||
675 | buf->b_hash_next = buf_hash_table.ht_table[idx]; | |
676 | buf_hash_table.ht_table[idx] = buf; | |
677 | buf->b_flags |= ARC_IN_HASH_TABLE; | |
678 | ||
679 | /* collect some hash table performance data */ | |
680 | if (i > 0) { | |
681 | ARCSTAT_BUMP(arcstat_hash_collisions); | |
682 | if (i == 1) | |
683 | ARCSTAT_BUMP(arcstat_hash_chains); | |
684 | ||
685 | ARCSTAT_MAX(arcstat_hash_chain_max, i); | |
686 | } | |
687 | ||
688 | ARCSTAT_BUMP(arcstat_hash_elements); | |
689 | ARCSTAT_MAXSTAT(arcstat_hash_elements); | |
690 | ||
691 | return (NULL); | |
692 | } | |
693 | ||
694 | static void | |
695 | buf_hash_remove(arc_buf_hdr_t *buf) | |
696 | { | |
697 | arc_buf_hdr_t *fbuf, **bufp; | |
698 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
699 | ||
700 | ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); | |
701 | ASSERT(HDR_IN_HASH_TABLE(buf)); | |
702 | ||
703 | bufp = &buf_hash_table.ht_table[idx]; | |
704 | while ((fbuf = *bufp) != buf) { | |
705 | ASSERT(fbuf != NULL); | |
706 | bufp = &fbuf->b_hash_next; | |
707 | } | |
708 | *bufp = buf->b_hash_next; | |
709 | buf->b_hash_next = NULL; | |
710 | buf->b_flags &= ~ARC_IN_HASH_TABLE; | |
711 | ||
712 | /* collect some hash table performance data */ | |
713 | ARCSTAT_BUMPDOWN(arcstat_hash_elements); | |
714 | ||
715 | if (buf_hash_table.ht_table[idx] && | |
716 | buf_hash_table.ht_table[idx]->b_hash_next == NULL) | |
717 | ARCSTAT_BUMPDOWN(arcstat_hash_chains); | |
718 | } | |
719 | ||
720 | /* | |
721 | * Global data structures and functions for the buf kmem cache. | |
722 | */ | |
723 | static kmem_cache_t *hdr_cache; | |
724 | static kmem_cache_t *buf_cache; | |
725 | ||
726 | static void | |
727 | buf_fini(void) | |
728 | { | |
729 | int i; | |
730 | ||
731 | kmem_free(buf_hash_table.ht_table, | |
732 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
733 | for (i = 0; i < BUF_LOCKS; i++) | |
734 | mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); | |
735 | kmem_cache_destroy(hdr_cache); | |
736 | kmem_cache_destroy(buf_cache); | |
737 | } | |
738 | ||
739 | /* | |
740 | * Constructor callback - called when the cache is empty | |
741 | * and a new buf is requested. | |
742 | */ | |
743 | /* ARGSUSED */ | |
744 | static int | |
745 | hdr_cons(void *vbuf, void *unused, int kmflag) | |
746 | { | |
747 | arc_buf_hdr_t *buf = vbuf; | |
748 | ||
749 | bzero(buf, sizeof (arc_buf_hdr_t)); | |
750 | refcount_create(&buf->b_refcnt); | |
751 | cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); | |
752 | mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); | |
753 | ||
754 | ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); | |
755 | return (0); | |
756 | } | |
757 | ||
758 | /* | |
759 | * Destructor callback - called when a cached buf is | |
760 | * no longer required. | |
761 | */ | |
762 | /* ARGSUSED */ | |
763 | static void | |
764 | hdr_dest(void *vbuf, void *unused) | |
765 | { | |
766 | arc_buf_hdr_t *buf = vbuf; | |
767 | ||
768 | refcount_destroy(&buf->b_refcnt); | |
769 | cv_destroy(&buf->b_cv); | |
770 | mutex_destroy(&buf->b_freeze_lock); | |
771 | ||
772 | ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); | |
773 | } | |
774 | ||
775 | /* | |
776 | * Reclaim callback -- invoked when memory is low. | |
777 | */ | |
778 | /* ARGSUSED */ | |
779 | static void | |
780 | hdr_recl(void *unused) | |
781 | { | |
782 | dprintf("hdr_recl called\n"); | |
783 | /* | |
784 | * umem calls the reclaim func when we destroy the buf cache, | |
785 | * which is after we do arc_fini(). | |
786 | */ | |
787 | if (!arc_dead) | |
788 | cv_signal(&arc_reclaim_thr_cv); | |
789 | } | |
790 | ||
791 | static void | |
792 | buf_init(void) | |
793 | { | |
794 | uint64_t *ct; | |
795 | uint64_t hsize = 1ULL << 12; | |
796 | int i, j; | |
797 | ||
798 | /* | |
799 | * The hash table is big enough to fill all of physical memory | |
800 | * with an average 64K block size. The table will take up | |
801 | * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). | |
802 | */ | |
803 | while (hsize * 65536 < physmem * PAGESIZE) | |
804 | hsize <<= 1; | |
805 | retry: | |
806 | buf_hash_table.ht_mask = hsize - 1; | |
807 | buf_hash_table.ht_table = | |
808 | kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); | |
809 | if (buf_hash_table.ht_table == NULL) { | |
810 | ASSERT(hsize > (1ULL << 8)); | |
811 | hsize >>= 1; | |
812 | goto retry; | |
813 | } | |
814 | ||
815 | hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), | |
816 | 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); | |
817 | buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), | |
818 | 0, NULL, NULL, NULL, NULL, NULL, 0); | |
819 | ||
820 | for (i = 0; i < 256; i++) | |
821 | for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) | |
822 | *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); | |
823 | ||
824 | for (i = 0; i < BUF_LOCKS; i++) { | |
825 | mutex_init(&buf_hash_table.ht_locks[i].ht_lock, | |
826 | NULL, MUTEX_DEFAULT, NULL); | |
827 | } | |
828 | } | |
829 | ||
830 | #define ARC_MINTIME (hz>>4) /* 62 ms */ | |
831 | ||
832 | static void | |
833 | arc_cksum_verify(arc_buf_t *buf) | |
834 | { | |
835 | zio_cksum_t zc; | |
836 | ||
837 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
838 | return; | |
839 | ||
840 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
841 | if (buf->b_hdr->b_freeze_cksum == NULL || | |
842 | (buf->b_hdr->b_flags & ARC_IO_ERROR)) { | |
843 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
844 | return; | |
845 | } | |
846 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
847 | if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) | |
848 | panic("buffer modified while frozen!"); | |
849 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
850 | } | |
851 | ||
852 | static int | |
853 | arc_cksum_equal(arc_buf_t *buf) | |
854 | { | |
855 | zio_cksum_t zc; | |
856 | int equal; | |
857 | ||
858 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
859 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
860 | equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); | |
861 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
862 | ||
863 | return (equal); | |
864 | } | |
865 | ||
866 | static void | |
867 | arc_cksum_compute(arc_buf_t *buf, boolean_t force) | |
868 | { | |
869 | if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) | |
870 | return; | |
871 | ||
872 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
873 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
874 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
875 | return; | |
876 | } | |
877 | buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); | |
878 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, | |
879 | buf->b_hdr->b_freeze_cksum); | |
880 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
881 | } | |
882 | ||
883 | void | |
884 | arc_buf_thaw(arc_buf_t *buf) | |
885 | { | |
886 | if (zfs_flags & ZFS_DEBUG_MODIFY) { | |
887 | if (buf->b_hdr->b_state != arc_anon) | |
888 | panic("modifying non-anon buffer!"); | |
889 | if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) | |
890 | panic("modifying buffer while i/o in progress!"); | |
891 | arc_cksum_verify(buf); | |
892 | } | |
893 | ||
894 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
895 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
896 | kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
897 | buf->b_hdr->b_freeze_cksum = NULL; | |
898 | } | |
899 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
900 | } | |
901 | ||
902 | void | |
903 | arc_buf_freeze(arc_buf_t *buf) | |
904 | { | |
905 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
906 | return; | |
907 | ||
908 | ASSERT(buf->b_hdr->b_freeze_cksum != NULL || | |
909 | buf->b_hdr->b_state == arc_anon); | |
910 | arc_cksum_compute(buf, B_FALSE); | |
911 | } | |
912 | ||
913 | static void | |
914 | add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
915 | { | |
916 | ASSERT(MUTEX_HELD(hash_lock)); | |
917 | ||
918 | if ((refcount_add(&ab->b_refcnt, tag) == 1) && | |
919 | (ab->b_state != arc_anon)) { | |
920 | uint64_t delta = ab->b_size * ab->b_datacnt; | |
921 | list_t *list = &ab->b_state->arcs_list[ab->b_type]; | |
922 | uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; | |
923 | ||
924 | ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); | |
925 | mutex_enter(&ab->b_state->arcs_mtx); | |
926 | ASSERT(list_link_active(&ab->b_arc_node)); | |
927 | list_remove(list, ab); | |
928 | if (GHOST_STATE(ab->b_state)) { | |
929 | ASSERT3U(ab->b_datacnt, ==, 0); | |
930 | ASSERT3P(ab->b_buf, ==, NULL); | |
931 | delta = ab->b_size; | |
932 | } | |
933 | ASSERT(delta > 0); | |
934 | ASSERT3U(*size, >=, delta); | |
935 | atomic_add_64(size, -delta); | |
936 | mutex_exit(&ab->b_state->arcs_mtx); | |
937 | /* remove the prefetch flag is we get a reference */ | |
938 | if (ab->b_flags & ARC_PREFETCH) | |
939 | ab->b_flags &= ~ARC_PREFETCH; | |
940 | } | |
941 | } | |
942 | ||
943 | static int | |
944 | remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
945 | { | |
946 | int cnt; | |
947 | arc_state_t *state = ab->b_state; | |
948 | ||
949 | ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); | |
950 | ASSERT(!GHOST_STATE(state)); | |
951 | ||
952 | if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && | |
953 | (state != arc_anon)) { | |
954 | uint64_t *size = &state->arcs_lsize[ab->b_type]; | |
955 | ||
956 | ASSERT(!MUTEX_HELD(&state->arcs_mtx)); | |
957 | mutex_enter(&state->arcs_mtx); | |
958 | ASSERT(!list_link_active(&ab->b_arc_node)); | |
959 | list_insert_head(&state->arcs_list[ab->b_type], ab); | |
960 | ASSERT(ab->b_datacnt > 0); | |
961 | atomic_add_64(size, ab->b_size * ab->b_datacnt); | |
962 | mutex_exit(&state->arcs_mtx); | |
963 | } | |
964 | return (cnt); | |
965 | } | |
966 | ||
967 | /* | |
968 | * Move the supplied buffer to the indicated state. The mutex | |
969 | * for the buffer must be held by the caller. | |
970 | */ | |
971 | static void | |
972 | arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) | |
973 | { | |
974 | arc_state_t *old_state = ab->b_state; | |
975 | int64_t refcnt = refcount_count(&ab->b_refcnt); | |
976 | uint64_t from_delta, to_delta; | |
977 | ||
978 | ASSERT(MUTEX_HELD(hash_lock)); | |
979 | ASSERT(new_state != old_state); | |
980 | ASSERT(refcnt == 0 || ab->b_datacnt > 0); | |
981 | ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); | |
982 | ||
983 | from_delta = to_delta = ab->b_datacnt * ab->b_size; | |
984 | ||
985 | /* | |
986 | * If this buffer is evictable, transfer it from the | |
987 | * old state list to the new state list. | |
988 | */ | |
989 | if (refcnt == 0) { | |
990 | if (old_state != arc_anon) { | |
991 | int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); | |
992 | uint64_t *size = &old_state->arcs_lsize[ab->b_type]; | |
993 | ||
994 | if (use_mutex) | |
995 | mutex_enter(&old_state->arcs_mtx); | |
996 | ||
997 | ASSERT(list_link_active(&ab->b_arc_node)); | |
998 | list_remove(&old_state->arcs_list[ab->b_type], ab); | |
999 | ||
1000 | /* | |
1001 | * If prefetching out of the ghost cache, | |
1002 | * we will have a non-null datacnt. | |
1003 | */ | |
1004 | if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { | |
1005 | /* ghost elements have a ghost size */ | |
1006 | ASSERT(ab->b_buf == NULL); | |
1007 | from_delta = ab->b_size; | |
1008 | } | |
1009 | ASSERT3U(*size, >=, from_delta); | |
1010 | atomic_add_64(size, -from_delta); | |
1011 | ||
1012 | if (use_mutex) | |
1013 | mutex_exit(&old_state->arcs_mtx); | |
1014 | } | |
1015 | if (new_state != arc_anon) { | |
1016 | int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); | |
1017 | uint64_t *size = &new_state->arcs_lsize[ab->b_type]; | |
1018 | ||
1019 | if (use_mutex) | |
1020 | mutex_enter(&new_state->arcs_mtx); | |
1021 | ||
1022 | list_insert_head(&new_state->arcs_list[ab->b_type], ab); | |
1023 | ||
1024 | /* ghost elements have a ghost size */ | |
1025 | if (GHOST_STATE(new_state)) { | |
1026 | ASSERT(ab->b_datacnt == 0); | |
1027 | ASSERT(ab->b_buf == NULL); | |
1028 | to_delta = ab->b_size; | |
1029 | } | |
1030 | atomic_add_64(size, to_delta); | |
1031 | ||
1032 | if (use_mutex) | |
1033 | mutex_exit(&new_state->arcs_mtx); | |
1034 | } | |
1035 | } | |
1036 | ||
1037 | ASSERT(!BUF_EMPTY(ab)); | |
1038 | if (new_state == arc_anon) { | |
1039 | buf_hash_remove(ab); | |
1040 | } | |
1041 | ||
1042 | /* adjust state sizes */ | |
1043 | if (to_delta) | |
1044 | atomic_add_64(&new_state->arcs_size, to_delta); | |
1045 | if (from_delta) { | |
1046 | ASSERT3U(old_state->arcs_size, >=, from_delta); | |
1047 | atomic_add_64(&old_state->arcs_size, -from_delta); | |
1048 | } | |
1049 | ab->b_state = new_state; | |
1050 | ||
1051 | /* adjust l2arc hdr stats */ | |
1052 | if (new_state == arc_l2c_only) | |
1053 | l2arc_hdr_stat_add(); | |
1054 | else if (old_state == arc_l2c_only) | |
1055 | l2arc_hdr_stat_remove(); | |
1056 | } | |
1057 | ||
1058 | void | |
1059 | arc_space_consume(uint64_t space) | |
1060 | { | |
1061 | atomic_add_64(&arc_meta_used, space); | |
1062 | atomic_add_64(&arc_size, space); | |
1063 | } | |
1064 | ||
1065 | void | |
1066 | arc_space_return(uint64_t space) | |
1067 | { | |
1068 | ASSERT(arc_meta_used >= space); | |
1069 | if (arc_meta_max < arc_meta_used) | |
1070 | arc_meta_max = arc_meta_used; | |
1071 | atomic_add_64(&arc_meta_used, -space); | |
1072 | ASSERT(arc_size >= space); | |
1073 | atomic_add_64(&arc_size, -space); | |
1074 | } | |
1075 | ||
1076 | void * | |
1077 | arc_data_buf_alloc(uint64_t size) | |
1078 | { | |
1079 | if (arc_evict_needed(ARC_BUFC_DATA)) | |
1080 | cv_signal(&arc_reclaim_thr_cv); | |
1081 | atomic_add_64(&arc_size, size); | |
1082 | return (zio_data_buf_alloc(size)); | |
1083 | } | |
1084 | ||
1085 | void | |
1086 | arc_data_buf_free(void *buf, uint64_t size) | |
1087 | { | |
1088 | zio_data_buf_free(buf, size); | |
1089 | ASSERT(arc_size >= size); | |
1090 | atomic_add_64(&arc_size, -size); | |
1091 | } | |
1092 | ||
1093 | arc_buf_t * | |
1094 | arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) | |
1095 | { | |
1096 | arc_buf_hdr_t *hdr; | |
1097 | arc_buf_t *buf; | |
1098 | ||
1099 | ASSERT3U(size, >, 0); | |
1100 | hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
1101 | ASSERT(BUF_EMPTY(hdr)); | |
1102 | hdr->b_size = size; | |
1103 | hdr->b_type = type; | |
1104 | hdr->b_spa = spa; | |
1105 | hdr->b_state = arc_anon; | |
1106 | hdr->b_arc_access = 0; | |
1107 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1108 | buf->b_hdr = hdr; | |
1109 | buf->b_data = NULL; | |
1110 | buf->b_efunc = NULL; | |
1111 | buf->b_private = NULL; | |
1112 | buf->b_next = NULL; | |
1113 | hdr->b_buf = buf; | |
1114 | arc_get_data_buf(buf); | |
1115 | hdr->b_datacnt = 1; | |
1116 | hdr->b_flags = 0; | |
1117 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1118 | (void) refcount_add(&hdr->b_refcnt, tag); | |
1119 | ||
1120 | return (buf); | |
1121 | } | |
1122 | ||
1123 | static arc_buf_t * | |
1124 | arc_buf_clone(arc_buf_t *from) | |
1125 | { | |
1126 | arc_buf_t *buf; | |
1127 | arc_buf_hdr_t *hdr = from->b_hdr; | |
1128 | uint64_t size = hdr->b_size; | |
1129 | ||
1130 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1131 | buf->b_hdr = hdr; | |
1132 | buf->b_data = NULL; | |
1133 | buf->b_efunc = NULL; | |
1134 | buf->b_private = NULL; | |
1135 | buf->b_next = hdr->b_buf; | |
1136 | hdr->b_buf = buf; | |
1137 | arc_get_data_buf(buf); | |
1138 | bcopy(from->b_data, buf->b_data, size); | |
1139 | hdr->b_datacnt += 1; | |
1140 | return (buf); | |
1141 | } | |
1142 | ||
1143 | void | |
1144 | arc_buf_add_ref(arc_buf_t *buf, void* tag) | |
1145 | { | |
1146 | arc_buf_hdr_t *hdr; | |
1147 | kmutex_t *hash_lock; | |
1148 | ||
1149 | /* | |
1150 | * Check to see if this buffer is currently being evicted via | |
1151 | * arc_do_user_evicts(). | |
1152 | */ | |
1153 | mutex_enter(&arc_eviction_mtx); | |
1154 | hdr = buf->b_hdr; | |
1155 | if (hdr == NULL) { | |
1156 | mutex_exit(&arc_eviction_mtx); | |
1157 | return; | |
1158 | } | |
1159 | hash_lock = HDR_LOCK(hdr); | |
1160 | mutex_exit(&arc_eviction_mtx); | |
1161 | ||
1162 | mutex_enter(hash_lock); | |
1163 | if (buf->b_data == NULL) { | |
1164 | /* | |
1165 | * This buffer is evicted. | |
1166 | */ | |
1167 | mutex_exit(hash_lock); | |
1168 | return; | |
1169 | } | |
1170 | ||
1171 | ASSERT(buf->b_hdr == hdr); | |
1172 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
1173 | add_reference(hdr, hash_lock, tag); | |
1174 | arc_access(hdr, hash_lock); | |
1175 | mutex_exit(hash_lock); | |
1176 | ARCSTAT_BUMP(arcstat_hits); | |
1177 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
1178 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
1179 | data, metadata, hits); | |
1180 | } | |
1181 | ||
1182 | /* | |
1183 | * Free the arc data buffer. If it is an l2arc write in progress, | |
1184 | * the buffer is placed on l2arc_free_on_write to be freed later. | |
1185 | */ | |
1186 | static void | |
1187 | arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), | |
1188 | void *data, size_t size) | |
1189 | { | |
1190 | if (HDR_L2_WRITING(hdr)) { | |
1191 | l2arc_data_free_t *df; | |
1192 | df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); | |
1193 | df->l2df_data = data; | |
1194 | df->l2df_size = size; | |
1195 | df->l2df_func = free_func; | |
1196 | mutex_enter(&l2arc_free_on_write_mtx); | |
1197 | list_insert_head(l2arc_free_on_write, df); | |
1198 | mutex_exit(&l2arc_free_on_write_mtx); | |
1199 | ARCSTAT_BUMP(arcstat_l2_free_on_write); | |
1200 | } else { | |
1201 | free_func(data, size); | |
1202 | } | |
1203 | } | |
1204 | ||
1205 | static void | |
1206 | arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) | |
1207 | { | |
1208 | arc_buf_t **bufp; | |
1209 | ||
1210 | /* free up data associated with the buf */ | |
1211 | if (buf->b_data) { | |
1212 | arc_state_t *state = buf->b_hdr->b_state; | |
1213 | uint64_t size = buf->b_hdr->b_size; | |
1214 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
1215 | ||
1216 | arc_cksum_verify(buf); | |
1217 | if (!recycle) { | |
1218 | if (type == ARC_BUFC_METADATA) { | |
1219 | arc_buf_data_free(buf->b_hdr, zio_buf_free, | |
1220 | buf->b_data, size); | |
1221 | arc_space_return(size); | |
1222 | } else { | |
1223 | ASSERT(type == ARC_BUFC_DATA); | |
1224 | arc_buf_data_free(buf->b_hdr, | |
1225 | zio_data_buf_free, buf->b_data, size); | |
1226 | atomic_add_64(&arc_size, -size); | |
1227 | } | |
1228 | } | |
1229 | if (list_link_active(&buf->b_hdr->b_arc_node)) { | |
1230 | uint64_t *cnt = &state->arcs_lsize[type]; | |
1231 | ||
1232 | ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); | |
1233 | ASSERT(state != arc_anon); | |
1234 | ||
1235 | ASSERT3U(*cnt, >=, size); | |
1236 | atomic_add_64(cnt, -size); | |
1237 | } | |
1238 | ASSERT3U(state->arcs_size, >=, size); | |
1239 | atomic_add_64(&state->arcs_size, -size); | |
1240 | buf->b_data = NULL; | |
1241 | ASSERT(buf->b_hdr->b_datacnt > 0); | |
1242 | buf->b_hdr->b_datacnt -= 1; | |
1243 | } | |
1244 | ||
1245 | /* only remove the buf if requested */ | |
1246 | if (!all) | |
1247 | return; | |
1248 | ||
1249 | /* remove the buf from the hdr list */ | |
1250 | for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) | |
1251 | continue; | |
1252 | *bufp = buf->b_next; | |
1253 | ||
1254 | ASSERT(buf->b_efunc == NULL); | |
1255 | ||
1256 | /* clean up the buf */ | |
1257 | buf->b_hdr = NULL; | |
1258 | kmem_cache_free(buf_cache, buf); | |
1259 | } | |
1260 | ||
1261 | static void | |
1262 | arc_hdr_destroy(arc_buf_hdr_t *hdr) | |
1263 | { | |
1264 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1265 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
1266 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
1267 | ||
1268 | if (hdr->b_l2hdr != NULL) { | |
1269 | if (!MUTEX_HELD(&l2arc_buflist_mtx)) { | |
1270 | /* | |
1271 | * To prevent arc_free() and l2arc_evict() from | |
1272 | * attempting to free the same buffer at the same time, | |
1273 | * a FREE_IN_PROGRESS flag is given to arc_free() to | |
1274 | * give it priority. l2arc_evict() can't destroy this | |
1275 | * header while we are waiting on l2arc_buflist_mtx. | |
1276 | */ | |
1277 | mutex_enter(&l2arc_buflist_mtx); | |
1278 | ASSERT(hdr->b_l2hdr != NULL); | |
1279 | ||
1280 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); | |
1281 | mutex_exit(&l2arc_buflist_mtx); | |
1282 | } else { | |
1283 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); | |
1284 | } | |
1285 | ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); | |
1286 | kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); | |
1287 | if (hdr->b_state == arc_l2c_only) | |
1288 | l2arc_hdr_stat_remove(); | |
1289 | hdr->b_l2hdr = NULL; | |
1290 | } | |
1291 | ||
1292 | if (!BUF_EMPTY(hdr)) { | |
1293 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
1294 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
1295 | hdr->b_birth = 0; | |
1296 | hdr->b_cksum0 = 0; | |
1297 | } | |
1298 | while (hdr->b_buf) { | |
1299 | arc_buf_t *buf = hdr->b_buf; | |
1300 | ||
1301 | if (buf->b_efunc) { | |
1302 | mutex_enter(&arc_eviction_mtx); | |
1303 | ASSERT(buf->b_hdr != NULL); | |
1304 | arc_buf_destroy(hdr->b_buf, FALSE, FALSE); | |
1305 | hdr->b_buf = buf->b_next; | |
1306 | buf->b_hdr = &arc_eviction_hdr; | |
1307 | buf->b_next = arc_eviction_list; | |
1308 | arc_eviction_list = buf; | |
1309 | mutex_exit(&arc_eviction_mtx); | |
1310 | } else { | |
1311 | arc_buf_destroy(hdr->b_buf, FALSE, TRUE); | |
1312 | } | |
1313 | } | |
1314 | if (hdr->b_freeze_cksum != NULL) { | |
1315 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
1316 | hdr->b_freeze_cksum = NULL; | |
1317 | } | |
1318 | ||
1319 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
1320 | ASSERT3P(hdr->b_hash_next, ==, NULL); | |
1321 | ASSERT3P(hdr->b_acb, ==, NULL); | |
1322 | kmem_cache_free(hdr_cache, hdr); | |
1323 | } | |
1324 | ||
1325 | void | |
1326 | arc_buf_free(arc_buf_t *buf, void *tag) | |
1327 | { | |
1328 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1329 | int hashed = hdr->b_state != arc_anon; | |
1330 | ||
1331 | ASSERT(buf->b_efunc == NULL); | |
1332 | ASSERT(buf->b_data != NULL); | |
1333 | ||
1334 | if (hashed) { | |
1335 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1336 | ||
1337 | mutex_enter(hash_lock); | |
1338 | (void) remove_reference(hdr, hash_lock, tag); | |
1339 | if (hdr->b_datacnt > 1) | |
1340 | arc_buf_destroy(buf, FALSE, TRUE); | |
1341 | else | |
1342 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1343 | mutex_exit(hash_lock); | |
1344 | } else if (HDR_IO_IN_PROGRESS(hdr)) { | |
1345 | int destroy_hdr; | |
1346 | /* | |
1347 | * We are in the middle of an async write. Don't destroy | |
1348 | * this buffer unless the write completes before we finish | |
1349 | * decrementing the reference count. | |
1350 | */ | |
1351 | mutex_enter(&arc_eviction_mtx); | |
1352 | (void) remove_reference(hdr, NULL, tag); | |
1353 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1354 | destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); | |
1355 | mutex_exit(&arc_eviction_mtx); | |
1356 | if (destroy_hdr) | |
1357 | arc_hdr_destroy(hdr); | |
1358 | } else { | |
1359 | if (remove_reference(hdr, NULL, tag) > 0) { | |
1360 | ASSERT(HDR_IO_ERROR(hdr)); | |
1361 | arc_buf_destroy(buf, FALSE, TRUE); | |
1362 | } else { | |
1363 | arc_hdr_destroy(hdr); | |
1364 | } | |
1365 | } | |
1366 | } | |
1367 | ||
1368 | int | |
1369 | arc_buf_remove_ref(arc_buf_t *buf, void* tag) | |
1370 | { | |
1371 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1372 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1373 | int no_callback = (buf->b_efunc == NULL); | |
1374 | ||
1375 | if (hdr->b_state == arc_anon) { | |
1376 | arc_buf_free(buf, tag); | |
1377 | return (no_callback); | |
1378 | } | |
1379 | ||
1380 | mutex_enter(hash_lock); | |
1381 | ASSERT(hdr->b_state != arc_anon); | |
1382 | ASSERT(buf->b_data != NULL); | |
1383 | ||
1384 | (void) remove_reference(hdr, hash_lock, tag); | |
1385 | if (hdr->b_datacnt > 1) { | |
1386 | if (no_callback) | |
1387 | arc_buf_destroy(buf, FALSE, TRUE); | |
1388 | } else if (no_callback) { | |
1389 | ASSERT(hdr->b_buf == buf && buf->b_next == NULL); | |
1390 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1391 | } | |
1392 | ASSERT(no_callback || hdr->b_datacnt > 1 || | |
1393 | refcount_is_zero(&hdr->b_refcnt)); | |
1394 | mutex_exit(hash_lock); | |
1395 | return (no_callback); | |
1396 | } | |
1397 | ||
1398 | int | |
1399 | arc_buf_size(arc_buf_t *buf) | |
1400 | { | |
1401 | return (buf->b_hdr->b_size); | |
1402 | } | |
1403 | ||
1404 | /* | |
1405 | * Evict buffers from list until we've removed the specified number of | |
1406 | * bytes. Move the removed buffers to the appropriate evict state. | |
1407 | * If the recycle flag is set, then attempt to "recycle" a buffer: | |
1408 | * - look for a buffer to evict that is `bytes' long. | |
1409 | * - return the data block from this buffer rather than freeing it. | |
1410 | * This flag is used by callers that are trying to make space for a | |
1411 | * new buffer in a full arc cache. | |
1412 | * | |
1413 | * This function makes a "best effort". It skips over any buffers | |
1414 | * it can't get a hash_lock on, and so may not catch all candidates. | |
1415 | * It may also return without evicting as much space as requested. | |
1416 | */ | |
1417 | static void * | |
1418 | arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, | |
1419 | arc_buf_contents_t type) | |
1420 | { | |
1421 | arc_state_t *evicted_state; | |
1422 | uint64_t bytes_evicted = 0, skipped = 0, missed = 0; | |
1423 | arc_buf_hdr_t *ab, *ab_prev = NULL; | |
1424 | list_t *list = &state->arcs_list[type]; | |
1425 | kmutex_t *hash_lock; | |
1426 | boolean_t have_lock; | |
1427 | void *stolen = NULL; | |
1428 | ||
1429 | ASSERT(state == arc_mru || state == arc_mfu); | |
1430 | ||
1431 | evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
1432 | ||
1433 | mutex_enter(&state->arcs_mtx); | |
1434 | mutex_enter(&evicted_state->arcs_mtx); | |
1435 | ||
1436 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1437 | ab_prev = list_prev(list, ab); | |
1438 | /* prefetch buffers have a minimum lifespan */ | |
1439 | if (HDR_IO_IN_PROGRESS(ab) || | |
1440 | (spa && ab->b_spa != spa) || | |
1441 | (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && | |
1442 | lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { | |
1443 | skipped++; | |
1444 | continue; | |
1445 | } | |
1446 | /* "lookahead" for better eviction candidate */ | |
1447 | if (recycle && ab->b_size != bytes && | |
1448 | ab_prev && ab_prev->b_size == bytes) | |
1449 | continue; | |
1450 | hash_lock = HDR_LOCK(ab); | |
1451 | have_lock = MUTEX_HELD(hash_lock); | |
1452 | if (have_lock || mutex_tryenter(hash_lock)) { | |
1453 | ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); | |
1454 | ASSERT(ab->b_datacnt > 0); | |
1455 | while (ab->b_buf) { | |
1456 | arc_buf_t *buf = ab->b_buf; | |
1457 | if (buf->b_data) { | |
1458 | bytes_evicted += ab->b_size; | |
1459 | if (recycle && ab->b_type == type && | |
1460 | ab->b_size == bytes && | |
1461 | !HDR_L2_WRITING(ab)) { | |
1462 | stolen = buf->b_data; | |
1463 | recycle = FALSE; | |
1464 | } | |
1465 | } | |
1466 | if (buf->b_efunc) { | |
1467 | mutex_enter(&arc_eviction_mtx); | |
1468 | arc_buf_destroy(buf, | |
1469 | buf->b_data == stolen, FALSE); | |
1470 | ab->b_buf = buf->b_next; | |
1471 | buf->b_hdr = &arc_eviction_hdr; | |
1472 | buf->b_next = arc_eviction_list; | |
1473 | arc_eviction_list = buf; | |
1474 | mutex_exit(&arc_eviction_mtx); | |
1475 | } else { | |
1476 | arc_buf_destroy(buf, | |
1477 | buf->b_data == stolen, TRUE); | |
1478 | } | |
1479 | } | |
1480 | ASSERT(ab->b_datacnt == 0); | |
1481 | arc_change_state(evicted_state, ab, hash_lock); | |
1482 | ASSERT(HDR_IN_HASH_TABLE(ab)); | |
1483 | ab->b_flags |= ARC_IN_HASH_TABLE; | |
1484 | ab->b_flags &= ~ARC_BUF_AVAILABLE; | |
1485 | DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); | |
1486 | if (!have_lock) | |
1487 | mutex_exit(hash_lock); | |
1488 | if (bytes >= 0 && bytes_evicted >= bytes) | |
1489 | break; | |
1490 | } else { | |
1491 | missed += 1; | |
1492 | } | |
1493 | } | |
1494 | ||
1495 | mutex_exit(&evicted_state->arcs_mtx); | |
1496 | mutex_exit(&state->arcs_mtx); | |
1497 | ||
1498 | if (bytes_evicted < bytes) | |
1499 | dprintf("only evicted %lld bytes from %x", | |
1500 | (longlong_t)bytes_evicted, state); | |
1501 | ||
1502 | if (skipped) | |
1503 | ARCSTAT_INCR(arcstat_evict_skip, skipped); | |
1504 | ||
1505 | if (missed) | |
1506 | ARCSTAT_INCR(arcstat_mutex_miss, missed); | |
1507 | ||
1508 | /* | |
1509 | * We have just evicted some date into the ghost state, make | |
1510 | * sure we also adjust the ghost state size if necessary. | |
1511 | */ | |
1512 | if (arc_no_grow && | |
1513 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { | |
1514 | int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + | |
1515 | arc_mru_ghost->arcs_size - arc_c; | |
1516 | ||
1517 | if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { | |
1518 | int64_t todelete = | |
1519 | MIN(arc_mru_ghost->arcs_lsize[type], mru_over); | |
1520 | arc_evict_ghost(arc_mru_ghost, NULL, todelete); | |
1521 | } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { | |
1522 | int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], | |
1523 | arc_mru_ghost->arcs_size + | |
1524 | arc_mfu_ghost->arcs_size - arc_c); | |
1525 | arc_evict_ghost(arc_mfu_ghost, NULL, todelete); | |
1526 | } | |
1527 | } | |
1528 | ||
1529 | return (stolen); | |
1530 | } | |
1531 | ||
1532 | /* | |
1533 | * Remove buffers from list until we've removed the specified number of | |
1534 | * bytes. Destroy the buffers that are removed. | |
1535 | */ | |
1536 | static void | |
1537 | arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) | |
1538 | { | |
1539 | arc_buf_hdr_t *ab, *ab_prev; | |
1540 | list_t *list = &state->arcs_list[ARC_BUFC_DATA]; | |
1541 | kmutex_t *hash_lock; | |
1542 | uint64_t bytes_deleted = 0; | |
1543 | uint64_t bufs_skipped = 0; | |
1544 | ||
1545 | ASSERT(GHOST_STATE(state)); | |
1546 | top: | |
1547 | mutex_enter(&state->arcs_mtx); | |
1548 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1549 | ab_prev = list_prev(list, ab); | |
1550 | if (spa && ab->b_spa != spa) | |
1551 | continue; | |
1552 | hash_lock = HDR_LOCK(ab); | |
1553 | if (mutex_tryenter(hash_lock)) { | |
1554 | ASSERT(!HDR_IO_IN_PROGRESS(ab)); | |
1555 | ASSERT(ab->b_buf == NULL); | |
1556 | ARCSTAT_BUMP(arcstat_deleted); | |
1557 | bytes_deleted += ab->b_size; | |
1558 | ||
1559 | if (ab->b_l2hdr != NULL) { | |
1560 | /* | |
1561 | * This buffer is cached on the 2nd Level ARC; | |
1562 | * don't destroy the header. | |
1563 | */ | |
1564 | arc_change_state(arc_l2c_only, ab, hash_lock); | |
1565 | mutex_exit(hash_lock); | |
1566 | } else { | |
1567 | arc_change_state(arc_anon, ab, hash_lock); | |
1568 | mutex_exit(hash_lock); | |
1569 | arc_hdr_destroy(ab); | |
1570 | } | |
1571 | ||
1572 | DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); | |
1573 | if (bytes >= 0 && bytes_deleted >= bytes) | |
1574 | break; | |
1575 | } else { | |
1576 | if (bytes < 0) { | |
1577 | mutex_exit(&state->arcs_mtx); | |
1578 | mutex_enter(hash_lock); | |
1579 | mutex_exit(hash_lock); | |
1580 | goto top; | |
1581 | } | |
1582 | bufs_skipped += 1; | |
1583 | } | |
1584 | } | |
1585 | mutex_exit(&state->arcs_mtx); | |
1586 | ||
1587 | if (list == &state->arcs_list[ARC_BUFC_DATA] && | |
1588 | (bytes < 0 || bytes_deleted < bytes)) { | |
1589 | list = &state->arcs_list[ARC_BUFC_METADATA]; | |
1590 | goto top; | |
1591 | } | |
1592 | ||
1593 | if (bufs_skipped) { | |
1594 | ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); | |
1595 | ASSERT(bytes >= 0); | |
1596 | } | |
1597 | ||
1598 | if (bytes_deleted < bytes) | |
1599 | dprintf("only deleted %lld bytes from %p", | |
1600 | (longlong_t)bytes_deleted, state); | |
1601 | } | |
1602 | ||
1603 | static void | |
1604 | arc_adjust(void) | |
1605 | { | |
1606 | int64_t top_sz, mru_over, arc_over, todelete; | |
1607 | ||
1608 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; | |
1609 | ||
1610 | if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { | |
1611 | int64_t toevict = | |
1612 | MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); | |
1613 | (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); | |
1614 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size; | |
1615 | } | |
1616 | ||
1617 | if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { | |
1618 | int64_t toevict = | |
1619 | MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); | |
1620 | (void) arc_evict(arc_mru, NULL, toevict, FALSE, | |
1621 | ARC_BUFC_METADATA); | |
1622 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size; | |
1623 | } | |
1624 | ||
1625 | mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; | |
1626 | ||
1627 | if (mru_over > 0) { | |
1628 | if (arc_mru_ghost->arcs_size > 0) { | |
1629 | todelete = MIN(arc_mru_ghost->arcs_size, mru_over); | |
1630 | arc_evict_ghost(arc_mru_ghost, NULL, todelete); | |
1631 | } | |
1632 | } | |
1633 | ||
1634 | if ((arc_over = arc_size - arc_c) > 0) { | |
1635 | int64_t tbl_over; | |
1636 | ||
1637 | if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { | |
1638 | int64_t toevict = | |
1639 | MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); | |
1640 | (void) arc_evict(arc_mfu, NULL, toevict, FALSE, | |
1641 | ARC_BUFC_DATA); | |
1642 | arc_over = arc_size - arc_c; | |
1643 | } | |
1644 | ||
1645 | if (arc_over > 0 && | |
1646 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { | |
1647 | int64_t toevict = | |
1648 | MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], | |
1649 | arc_over); | |
1650 | (void) arc_evict(arc_mfu, NULL, toevict, FALSE, | |
1651 | ARC_BUFC_METADATA); | |
1652 | } | |
1653 | ||
1654 | tbl_over = arc_size + arc_mru_ghost->arcs_size + | |
1655 | arc_mfu_ghost->arcs_size - arc_c * 2; | |
1656 | ||
1657 | if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { | |
1658 | todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); | |
1659 | arc_evict_ghost(arc_mfu_ghost, NULL, todelete); | |
1660 | } | |
1661 | } | |
1662 | } | |
1663 | ||
1664 | static void | |
1665 | arc_do_user_evicts(void) | |
1666 | { | |
1667 | mutex_enter(&arc_eviction_mtx); | |
1668 | while (arc_eviction_list != NULL) { | |
1669 | arc_buf_t *buf = arc_eviction_list; | |
1670 | arc_eviction_list = buf->b_next; | |
1671 | buf->b_hdr = NULL; | |
1672 | mutex_exit(&arc_eviction_mtx); | |
1673 | ||
1674 | if (buf->b_efunc != NULL) | |
1675 | VERIFY(buf->b_efunc(buf) == 0); | |
1676 | ||
1677 | buf->b_efunc = NULL; | |
1678 | buf->b_private = NULL; | |
1679 | kmem_cache_free(buf_cache, buf); | |
1680 | mutex_enter(&arc_eviction_mtx); | |
1681 | } | |
1682 | mutex_exit(&arc_eviction_mtx); | |
1683 | } | |
1684 | ||
1685 | /* | |
1686 | * Flush all *evictable* data from the cache for the given spa. | |
1687 | * NOTE: this will not touch "active" (i.e. referenced) data. | |
1688 | */ | |
1689 | void | |
1690 | arc_flush(spa_t *spa) | |
1691 | { | |
1692 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { | |
1693 | (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); | |
1694 | if (spa) | |
1695 | break; | |
1696 | } | |
1697 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { | |
1698 | (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); | |
1699 | if (spa) | |
1700 | break; | |
1701 | } | |
1702 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { | |
1703 | (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); | |
1704 | if (spa) | |
1705 | break; | |
1706 | } | |
1707 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { | |
1708 | (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); | |
1709 | if (spa) | |
1710 | break; | |
1711 | } | |
1712 | ||
1713 | arc_evict_ghost(arc_mru_ghost, spa, -1); | |
1714 | arc_evict_ghost(arc_mfu_ghost, spa, -1); | |
1715 | ||
1716 | mutex_enter(&arc_reclaim_thr_lock); | |
1717 | arc_do_user_evicts(); | |
1718 | mutex_exit(&arc_reclaim_thr_lock); | |
1719 | ASSERT(spa || arc_eviction_list == NULL); | |
1720 | } | |
1721 | ||
1722 | int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ | |
1723 | ||
1724 | void | |
1725 | arc_shrink(void) | |
1726 | { | |
1727 | if (arc_c > arc_c_min) { | |
1728 | uint64_t to_free; | |
1729 | ||
1730 | #ifdef _KERNEL | |
1731 | to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); | |
1732 | #else | |
1733 | to_free = arc_c >> arc_shrink_shift; | |
1734 | #endif | |
1735 | if (arc_c > arc_c_min + to_free) | |
1736 | atomic_add_64(&arc_c, -to_free); | |
1737 | else | |
1738 | arc_c = arc_c_min; | |
1739 | ||
1740 | atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); | |
1741 | if (arc_c > arc_size) | |
1742 | arc_c = MAX(arc_size, arc_c_min); | |
1743 | if (arc_p > arc_c) | |
1744 | arc_p = (arc_c >> 1); | |
1745 | ASSERT(arc_c >= arc_c_min); | |
1746 | ASSERT((int64_t)arc_p >= 0); | |
1747 | } | |
1748 | ||
1749 | if (arc_size > arc_c) | |
1750 | arc_adjust(); | |
1751 | } | |
1752 | ||
1753 | static int | |
1754 | arc_reclaim_needed(void) | |
1755 | { | |
1756 | uint64_t extra; | |
1757 | ||
1758 | #ifdef _KERNEL | |
1759 | ||
1760 | if (needfree) | |
1761 | return (1); | |
1762 | ||
1763 | /* | |
1764 | * take 'desfree' extra pages, so we reclaim sooner, rather than later | |
1765 | */ | |
1766 | extra = desfree; | |
1767 | ||
1768 | /* | |
1769 | * check that we're out of range of the pageout scanner. It starts to | |
1770 | * schedule paging if freemem is less than lotsfree and needfree. | |
1771 | * lotsfree is the high-water mark for pageout, and needfree is the | |
1772 | * number of needed free pages. We add extra pages here to make sure | |
1773 | * the scanner doesn't start up while we're freeing memory. | |
1774 | */ | |
1775 | if (freemem < lotsfree + needfree + extra) | |
1776 | return (1); | |
1777 | ||
1778 | /* | |
1779 | * check to make sure that swapfs has enough space so that anon | |
1780 | * reservations can still succeed. anon_resvmem() checks that the | |
1781 | * availrmem is greater than swapfs_minfree, and the number of reserved | |
1782 | * swap pages. We also add a bit of extra here just to prevent | |
1783 | * circumstances from getting really dire. | |
1784 | */ | |
1785 | if (availrmem < swapfs_minfree + swapfs_reserve + extra) | |
1786 | return (1); | |
1787 | ||
1788 | #if defined(__i386) | |
1789 | /* | |
1790 | * If we're on an i386 platform, it's possible that we'll exhaust the | |
1791 | * kernel heap space before we ever run out of available physical | |
1792 | * memory. Most checks of the size of the heap_area compare against | |
1793 | * tune.t_minarmem, which is the minimum available real memory that we | |
1794 | * can have in the system. However, this is generally fixed at 25 pages | |
1795 | * which is so low that it's useless. In this comparison, we seek to | |
1796 | * calculate the total heap-size, and reclaim if more than 3/4ths of the | |
1797 | * heap is allocated. (Or, in the calculation, if less than 1/4th is | |
1798 | * free) | |
1799 | */ | |
1800 | if (btop(vmem_size(heap_arena, VMEM_FREE)) < | |
1801 | (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) | |
1802 | return (1); | |
1803 | #endif | |
1804 | ||
1805 | #else | |
1806 | if (spa_get_random(100) == 0) | |
1807 | return (1); | |
1808 | #endif | |
1809 | return (0); | |
1810 | } | |
1811 | ||
1812 | static void | |
1813 | arc_kmem_reap_now(arc_reclaim_strategy_t strat) | |
1814 | { | |
1815 | size_t i; | |
1816 | kmem_cache_t *prev_cache = NULL; | |
1817 | kmem_cache_t *prev_data_cache = NULL; | |
1818 | extern kmem_cache_t *zio_buf_cache[]; | |
1819 | extern kmem_cache_t *zio_data_buf_cache[]; | |
1820 | ||
1821 | #ifdef _KERNEL | |
1822 | if (arc_meta_used >= arc_meta_limit) { | |
1823 | /* | |
1824 | * We are exceeding our meta-data cache limit. | |
1825 | * Purge some DNLC entries to release holds on meta-data. | |
1826 | */ | |
1827 | dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); | |
1828 | } | |
1829 | #if defined(__i386) | |
1830 | /* | |
1831 | * Reclaim unused memory from all kmem caches. | |
1832 | */ | |
1833 | kmem_reap(); | |
1834 | #endif | |
1835 | #endif | |
1836 | ||
1837 | /* | |
1838 | * An aggressive reclamation will shrink the cache size as well as | |
1839 | * reap free buffers from the arc kmem caches. | |
1840 | */ | |
1841 | if (strat == ARC_RECLAIM_AGGR) | |
1842 | arc_shrink(); | |
1843 | ||
1844 | for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { | |
1845 | if (zio_buf_cache[i] != prev_cache) { | |
1846 | prev_cache = zio_buf_cache[i]; | |
1847 | kmem_cache_reap_now(zio_buf_cache[i]); | |
1848 | } | |
1849 | if (zio_data_buf_cache[i] != prev_data_cache) { | |
1850 | prev_data_cache = zio_data_buf_cache[i]; | |
1851 | kmem_cache_reap_now(zio_data_buf_cache[i]); | |
1852 | } | |
1853 | } | |
1854 | kmem_cache_reap_now(buf_cache); | |
1855 | kmem_cache_reap_now(hdr_cache); | |
1856 | } | |
1857 | ||
1858 | static void | |
1859 | arc_reclaim_thread(void) | |
1860 | { | |
1861 | clock_t growtime = 0; | |
1862 | arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; | |
1863 | callb_cpr_t cpr; | |
1864 | ||
1865 | CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); | |
1866 | ||
1867 | mutex_enter(&arc_reclaim_thr_lock); | |
1868 | while (arc_thread_exit == 0) { | |
1869 | if (arc_reclaim_needed()) { | |
1870 | ||
1871 | if (arc_no_grow) { | |
1872 | if (last_reclaim == ARC_RECLAIM_CONS) { | |
1873 | last_reclaim = ARC_RECLAIM_AGGR; | |
1874 | } else { | |
1875 | last_reclaim = ARC_RECLAIM_CONS; | |
1876 | } | |
1877 | } else { | |
1878 | arc_no_grow = TRUE; | |
1879 | last_reclaim = ARC_RECLAIM_AGGR; | |
1880 | membar_producer(); | |
1881 | } | |
1882 | ||
1883 | /* reset the growth delay for every reclaim */ | |
1884 | growtime = lbolt + (arc_grow_retry * hz); | |
1885 | ||
1886 | arc_kmem_reap_now(last_reclaim); | |
1887 | ||
1888 | } else if (arc_no_grow && lbolt >= growtime) { | |
1889 | arc_no_grow = FALSE; | |
1890 | } | |
1891 | ||
1892 | if (2 * arc_c < arc_size + | |
1893 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) | |
1894 | arc_adjust(); | |
1895 | ||
1896 | if (arc_eviction_list != NULL) | |
1897 | arc_do_user_evicts(); | |
1898 | ||
1899 | /* block until needed, or one second, whichever is shorter */ | |
1900 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
1901 | (void) cv_timedwait(&arc_reclaim_thr_cv, | |
1902 | &arc_reclaim_thr_lock, (lbolt + hz)); | |
1903 | CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); | |
1904 | } | |
1905 | ||
1906 | arc_thread_exit = 0; | |
1907 | cv_broadcast(&arc_reclaim_thr_cv); | |
1908 | CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ | |
1909 | thread_exit(); | |
1910 | } | |
1911 | ||
1912 | /* | |
1913 | * Adapt arc info given the number of bytes we are trying to add and | |
1914 | * the state that we are comming from. This function is only called | |
1915 | * when we are adding new content to the cache. | |
1916 | */ | |
1917 | static void | |
1918 | arc_adapt(int bytes, arc_state_t *state) | |
1919 | { | |
1920 | int mult; | |
1921 | ||
1922 | if (state == arc_l2c_only) | |
1923 | return; | |
1924 | ||
1925 | ASSERT(bytes > 0); | |
1926 | /* | |
1927 | * Adapt the target size of the MRU list: | |
1928 | * - if we just hit in the MRU ghost list, then increase | |
1929 | * the target size of the MRU list. | |
1930 | * - if we just hit in the MFU ghost list, then increase | |
1931 | * the target size of the MFU list by decreasing the | |
1932 | * target size of the MRU list. | |
1933 | */ | |
1934 | if (state == arc_mru_ghost) { | |
1935 | mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? | |
1936 | 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); | |
1937 | ||
1938 | arc_p = MIN(arc_c, arc_p + bytes * mult); | |
1939 | } else if (state == arc_mfu_ghost) { | |
1940 | mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? | |
1941 | 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); | |
1942 | ||
1943 | arc_p = MAX(0, (int64_t)arc_p - bytes * mult); | |
1944 | } | |
1945 | ASSERT((int64_t)arc_p >= 0); | |
1946 | ||
1947 | if (arc_reclaim_needed()) { | |
1948 | cv_signal(&arc_reclaim_thr_cv); | |
1949 | return; | |
1950 | } | |
1951 | ||
1952 | if (arc_no_grow) | |
1953 | return; | |
1954 | ||
1955 | if (arc_c >= arc_c_max) | |
1956 | return; | |
1957 | ||
1958 | /* | |
1959 | * If we're within (2 * maxblocksize) bytes of the target | |
1960 | * cache size, increment the target cache size | |
1961 | */ | |
1962 | if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { | |
1963 | atomic_add_64(&arc_c, (int64_t)bytes); | |
1964 | if (arc_c > arc_c_max) | |
1965 | arc_c = arc_c_max; | |
1966 | else if (state == arc_anon) | |
1967 | atomic_add_64(&arc_p, (int64_t)bytes); | |
1968 | if (arc_p > arc_c) | |
1969 | arc_p = arc_c; | |
1970 | } | |
1971 | ASSERT((int64_t)arc_p >= 0); | |
1972 | } | |
1973 | ||
1974 | /* | |
1975 | * Check if the cache has reached its limits and eviction is required | |
1976 | * prior to insert. | |
1977 | */ | |
1978 | static int | |
1979 | arc_evict_needed(arc_buf_contents_t type) | |
1980 | { | |
1981 | if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) | |
1982 | return (1); | |
1983 | ||
1984 | #ifdef _KERNEL | |
1985 | /* | |
1986 | * If zio data pages are being allocated out of a separate heap segment, | |
1987 | * then enforce that the size of available vmem for this area remains | |
1988 | * above about 1/32nd free. | |
1989 | */ | |
1990 | if (type == ARC_BUFC_DATA && zio_arena != NULL && | |
1991 | vmem_size(zio_arena, VMEM_FREE) < | |
1992 | (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) | |
1993 | return (1); | |
1994 | #endif | |
1995 | ||
1996 | if (arc_reclaim_needed()) | |
1997 | return (1); | |
1998 | ||
1999 | return (arc_size > arc_c); | |
2000 | } | |
2001 | ||
2002 | /* | |
2003 | * The buffer, supplied as the first argument, needs a data block. | |
2004 | * So, if we are at cache max, determine which cache should be victimized. | |
2005 | * We have the following cases: | |
2006 | * | |
2007 | * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> | |
2008 | * In this situation if we're out of space, but the resident size of the MFU is | |
2009 | * under the limit, victimize the MFU cache to satisfy this insertion request. | |
2010 | * | |
2011 | * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> | |
2012 | * Here, we've used up all of the available space for the MRU, so we need to | |
2013 | * evict from our own cache instead. Evict from the set of resident MRU | |
2014 | * entries. | |
2015 | * | |
2016 | * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> | |
2017 | * c minus p represents the MFU space in the cache, since p is the size of the | |
2018 | * cache that is dedicated to the MRU. In this situation there's still space on | |
2019 | * the MFU side, so the MRU side needs to be victimized. | |
2020 | * | |
2021 | * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> | |
2022 | * MFU's resident set is consuming more space than it has been allotted. In | |
2023 | * this situation, we must victimize our own cache, the MFU, for this insertion. | |
2024 | */ | |
2025 | static void | |
2026 | arc_get_data_buf(arc_buf_t *buf) | |
2027 | { | |
2028 | arc_state_t *state = buf->b_hdr->b_state; | |
2029 | uint64_t size = buf->b_hdr->b_size; | |
2030 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
2031 | ||
2032 | arc_adapt(size, state); | |
2033 | ||
2034 | /* | |
2035 | * We have not yet reached cache maximum size, | |
2036 | * just allocate a new buffer. | |
2037 | */ | |
2038 | if (!arc_evict_needed(type)) { | |
2039 | if (type == ARC_BUFC_METADATA) { | |
2040 | buf->b_data = zio_buf_alloc(size); | |
2041 | arc_space_consume(size); | |
2042 | } else { | |
2043 | ASSERT(type == ARC_BUFC_DATA); | |
2044 | buf->b_data = zio_data_buf_alloc(size); | |
2045 | atomic_add_64(&arc_size, size); | |
2046 | } | |
2047 | goto out; | |
2048 | } | |
2049 | ||
2050 | /* | |
2051 | * If we are prefetching from the mfu ghost list, this buffer | |
2052 | * will end up on the mru list; so steal space from there. | |
2053 | */ | |
2054 | if (state == arc_mfu_ghost) | |
2055 | state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; | |
2056 | else if (state == arc_mru_ghost) | |
2057 | state = arc_mru; | |
2058 | ||
2059 | if (state == arc_mru || state == arc_anon) { | |
2060 | uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; | |
2061 | state = (arc_mfu->arcs_lsize[type] > 0 && | |
2062 | arc_p > mru_used) ? arc_mfu : arc_mru; | |
2063 | } else { | |
2064 | /* MFU cases */ | |
2065 | uint64_t mfu_space = arc_c - arc_p; | |
2066 | state = (arc_mru->arcs_lsize[type] > 0 && | |
2067 | mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; | |
2068 | } | |
2069 | if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { | |
2070 | if (type == ARC_BUFC_METADATA) { | |
2071 | buf->b_data = zio_buf_alloc(size); | |
2072 | arc_space_consume(size); | |
2073 | } else { | |
2074 | ASSERT(type == ARC_BUFC_DATA); | |
2075 | buf->b_data = zio_data_buf_alloc(size); | |
2076 | atomic_add_64(&arc_size, size); | |
2077 | } | |
2078 | ARCSTAT_BUMP(arcstat_recycle_miss); | |
2079 | } | |
2080 | ASSERT(buf->b_data != NULL); | |
2081 | out: | |
2082 | /* | |
2083 | * Update the state size. Note that ghost states have a | |
2084 | * "ghost size" and so don't need to be updated. | |
2085 | */ | |
2086 | if (!GHOST_STATE(buf->b_hdr->b_state)) { | |
2087 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2088 | ||
2089 | atomic_add_64(&hdr->b_state->arcs_size, size); | |
2090 | if (list_link_active(&hdr->b_arc_node)) { | |
2091 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2092 | atomic_add_64(&hdr->b_state->arcs_lsize[type], size); | |
2093 | } | |
2094 | /* | |
2095 | * If we are growing the cache, and we are adding anonymous | |
2096 | * data, and we have outgrown arc_p, update arc_p | |
2097 | */ | |
2098 | if (arc_size < arc_c && hdr->b_state == arc_anon && | |
2099 | arc_anon->arcs_size + arc_mru->arcs_size > arc_p) | |
2100 | arc_p = MIN(arc_c, arc_p + size); | |
2101 | } | |
2102 | } | |
2103 | ||
2104 | /* | |
2105 | * This routine is called whenever a buffer is accessed. | |
2106 | * NOTE: the hash lock is dropped in this function. | |
2107 | */ | |
2108 | static void | |
2109 | arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) | |
2110 | { | |
2111 | ASSERT(MUTEX_HELD(hash_lock)); | |
2112 | ||
2113 | if (buf->b_state == arc_anon) { | |
2114 | /* | |
2115 | * This buffer is not in the cache, and does not | |
2116 | * appear in our "ghost" list. Add the new buffer | |
2117 | * to the MRU state. | |
2118 | */ | |
2119 | ||
2120 | ASSERT(buf->b_arc_access == 0); | |
2121 | buf->b_arc_access = lbolt; | |
2122 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2123 | arc_change_state(arc_mru, buf, hash_lock); | |
2124 | ||
2125 | } else if (buf->b_state == arc_mru) { | |
2126 | /* | |
2127 | * If this buffer is here because of a prefetch, then either: | |
2128 | * - clear the flag if this is a "referencing" read | |
2129 | * (any subsequent access will bump this into the MFU state). | |
2130 | * or | |
2131 | * - move the buffer to the head of the list if this is | |
2132 | * another prefetch (to make it less likely to be evicted). | |
2133 | */ | |
2134 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2135 | if (refcount_count(&buf->b_refcnt) == 0) { | |
2136 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2137 | } else { | |
2138 | buf->b_flags &= ~ARC_PREFETCH; | |
2139 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2140 | } | |
2141 | buf->b_arc_access = lbolt; | |
2142 | return; | |
2143 | } | |
2144 | ||
2145 | /* | |
2146 | * This buffer has been "accessed" only once so far, | |
2147 | * but it is still in the cache. Move it to the MFU | |
2148 | * state. | |
2149 | */ | |
2150 | if (lbolt > buf->b_arc_access + ARC_MINTIME) { | |
2151 | /* | |
2152 | * More than 125ms have passed since we | |
2153 | * instantiated this buffer. Move it to the | |
2154 | * most frequently used state. | |
2155 | */ | |
2156 | buf->b_arc_access = lbolt; | |
2157 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2158 | arc_change_state(arc_mfu, buf, hash_lock); | |
2159 | } | |
2160 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2161 | } else if (buf->b_state == arc_mru_ghost) { | |
2162 | arc_state_t *new_state; | |
2163 | /* | |
2164 | * This buffer has been "accessed" recently, but | |
2165 | * was evicted from the cache. Move it to the | |
2166 | * MFU state. | |
2167 | */ | |
2168 | ||
2169 | if (buf->b_flags & ARC_PREFETCH) { | |
2170 | new_state = arc_mru; | |
2171 | if (refcount_count(&buf->b_refcnt) > 0) | |
2172 | buf->b_flags &= ~ARC_PREFETCH; | |
2173 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2174 | } else { | |
2175 | new_state = arc_mfu; | |
2176 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2177 | } | |
2178 | ||
2179 | buf->b_arc_access = lbolt; | |
2180 | arc_change_state(new_state, buf, hash_lock); | |
2181 | ||
2182 | ARCSTAT_BUMP(arcstat_mru_ghost_hits); | |
2183 | } else if (buf->b_state == arc_mfu) { | |
2184 | /* | |
2185 | * This buffer has been accessed more than once and is | |
2186 | * still in the cache. Keep it in the MFU state. | |
2187 | * | |
2188 | * NOTE: an add_reference() that occurred when we did | |
2189 | * the arc_read() will have kicked this off the list. | |
2190 | * If it was a prefetch, we will explicitly move it to | |
2191 | * the head of the list now. | |
2192 | */ | |
2193 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2194 | ASSERT(refcount_count(&buf->b_refcnt) == 0); | |
2195 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2196 | } | |
2197 | ARCSTAT_BUMP(arcstat_mfu_hits); | |
2198 | buf->b_arc_access = lbolt; | |
2199 | } else if (buf->b_state == arc_mfu_ghost) { | |
2200 | arc_state_t *new_state = arc_mfu; | |
2201 | /* | |
2202 | * This buffer has been accessed more than once but has | |
2203 | * been evicted from the cache. Move it back to the | |
2204 | * MFU state. | |
2205 | */ | |
2206 | ||
2207 | if (buf->b_flags & ARC_PREFETCH) { | |
2208 | /* | |
2209 | * This is a prefetch access... | |
2210 | * move this block back to the MRU state. | |
2211 | */ | |
2212 | ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); | |
2213 | new_state = arc_mru; | |
2214 | } | |
2215 | ||
2216 | buf->b_arc_access = lbolt; | |
2217 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2218 | arc_change_state(new_state, buf, hash_lock); | |
2219 | ||
2220 | ARCSTAT_BUMP(arcstat_mfu_ghost_hits); | |
2221 | } else if (buf->b_state == arc_l2c_only) { | |
2222 | /* | |
2223 | * This buffer is on the 2nd Level ARC. | |
2224 | */ | |
2225 | ||
2226 | buf->b_arc_access = lbolt; | |
2227 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2228 | arc_change_state(arc_mfu, buf, hash_lock); | |
2229 | } else { | |
2230 | ASSERT(!"invalid arc state"); | |
2231 | } | |
2232 | } | |
2233 | ||
2234 | /* a generic arc_done_func_t which you can use */ | |
2235 | /* ARGSUSED */ | |
2236 | void | |
2237 | arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2238 | { | |
2239 | bcopy(buf->b_data, arg, buf->b_hdr->b_size); | |
2240 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2241 | } | |
2242 | ||
2243 | /* a generic arc_done_func_t */ | |
2244 | void | |
2245 | arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2246 | { | |
2247 | arc_buf_t **bufp = arg; | |
2248 | if (zio && zio->io_error) { | |
2249 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2250 | *bufp = NULL; | |
2251 | } else { | |
2252 | *bufp = buf; | |
2253 | } | |
2254 | } | |
2255 | ||
2256 | static void | |
2257 | arc_read_done(zio_t *zio) | |
2258 | { | |
2259 | arc_buf_hdr_t *hdr, *found; | |
2260 | arc_buf_t *buf; | |
2261 | arc_buf_t *abuf; /* buffer we're assigning to callback */ | |
2262 | kmutex_t *hash_lock; | |
2263 | arc_callback_t *callback_list, *acb; | |
2264 | int freeable = FALSE; | |
2265 | ||
2266 | buf = zio->io_private; | |
2267 | hdr = buf->b_hdr; | |
2268 | ||
2269 | /* | |
2270 | * The hdr was inserted into hash-table and removed from lists | |
2271 | * prior to starting I/O. We should find this header, since | |
2272 | * it's in the hash table, and it should be legit since it's | |
2273 | * not possible to evict it during the I/O. The only possible | |
2274 | * reason for it not to be found is if we were freed during the | |
2275 | * read. | |
2276 | */ | |
2277 | found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, | |
2278 | &hash_lock); | |
2279 | ||
2280 | ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || | |
2281 | (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || | |
2282 | (found == hdr && HDR_L2_READING(hdr))); | |
2283 | ||
2284 | hdr->b_flags &= ~(ARC_L2_READING|ARC_L2_EVICTED); | |
2285 | if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) | |
2286 | hdr->b_flags |= ARC_DONT_L2CACHE; | |
2287 | ||
2288 | /* byteswap if necessary */ | |
2289 | callback_list = hdr->b_acb; | |
2290 | ASSERT(callback_list != NULL); | |
2291 | if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) | |
2292 | callback_list->acb_byteswap(buf->b_data, hdr->b_size); | |
2293 | ||
2294 | arc_cksum_compute(buf, B_FALSE); | |
2295 | ||
2296 | /* create copies of the data buffer for the callers */ | |
2297 | abuf = buf; | |
2298 | for (acb = callback_list; acb; acb = acb->acb_next) { | |
2299 | if (acb->acb_done) { | |
2300 | if (abuf == NULL) | |
2301 | abuf = arc_buf_clone(buf); | |
2302 | acb->acb_buf = abuf; | |
2303 | abuf = NULL; | |
2304 | } | |
2305 | } | |
2306 | hdr->b_acb = NULL; | |
2307 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2308 | ASSERT(!HDR_BUF_AVAILABLE(hdr)); | |
2309 | if (abuf == buf) | |
2310 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
2311 | ||
2312 | ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); | |
2313 | ||
2314 | if (zio->io_error != 0) { | |
2315 | hdr->b_flags |= ARC_IO_ERROR; | |
2316 | if (hdr->b_state != arc_anon) | |
2317 | arc_change_state(arc_anon, hdr, hash_lock); | |
2318 | if (HDR_IN_HASH_TABLE(hdr)) | |
2319 | buf_hash_remove(hdr); | |
2320 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
2321 | /* convert checksum errors into IO errors */ | |
2322 | if (zio->io_error == ECKSUM) | |
2323 | zio->io_error = EIO; | |
2324 | } | |
2325 | ||
2326 | /* | |
2327 | * Broadcast before we drop the hash_lock to avoid the possibility | |
2328 | * that the hdr (and hence the cv) might be freed before we get to | |
2329 | * the cv_broadcast(). | |
2330 | */ | |
2331 | cv_broadcast(&hdr->b_cv); | |
2332 | ||
2333 | if (hash_lock) { | |
2334 | /* | |
2335 | * Only call arc_access on anonymous buffers. This is because | |
2336 | * if we've issued an I/O for an evicted buffer, we've already | |
2337 | * called arc_access (to prevent any simultaneous readers from | |
2338 | * getting confused). | |
2339 | */ | |
2340 | if (zio->io_error == 0 && hdr->b_state == arc_anon) | |
2341 | arc_access(hdr, hash_lock); | |
2342 | mutex_exit(hash_lock); | |
2343 | } else { | |
2344 | /* | |
2345 | * This block was freed while we waited for the read to | |
2346 | * complete. It has been removed from the hash table and | |
2347 | * moved to the anonymous state (so that it won't show up | |
2348 | * in the cache). | |
2349 | */ | |
2350 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
2351 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
2352 | } | |
2353 | ||
2354 | /* execute each callback and free its structure */ | |
2355 | while ((acb = callback_list) != NULL) { | |
2356 | if (acb->acb_done) | |
2357 | acb->acb_done(zio, acb->acb_buf, acb->acb_private); | |
2358 | ||
2359 | if (acb->acb_zio_dummy != NULL) { | |
2360 | acb->acb_zio_dummy->io_error = zio->io_error; | |
2361 | zio_nowait(acb->acb_zio_dummy); | |
2362 | } | |
2363 | ||
2364 | callback_list = acb->acb_next; | |
2365 | kmem_free(acb, sizeof (arc_callback_t)); | |
2366 | } | |
2367 | ||
2368 | if (freeable) | |
2369 | arc_hdr_destroy(hdr); | |
2370 | } | |
2371 | ||
2372 | /* | |
2373 | * "Read" the block block at the specified DVA (in bp) via the | |
2374 | * cache. If the block is found in the cache, invoke the provided | |
2375 | * callback immediately and return. Note that the `zio' parameter | |
2376 | * in the callback will be NULL in this case, since no IO was | |
2377 | * required. If the block is not in the cache pass the read request | |
2378 | * on to the spa with a substitute callback function, so that the | |
2379 | * requested block will be added to the cache. | |
2380 | * | |
2381 | * If a read request arrives for a block that has a read in-progress, | |
2382 | * either wait for the in-progress read to complete (and return the | |
2383 | * results); or, if this is a read with a "done" func, add a record | |
2384 | * to the read to invoke the "done" func when the read completes, | |
2385 | * and return; or just return. | |
2386 | * | |
2387 | * arc_read_done() will invoke all the requested "done" functions | |
2388 | * for readers of this block. | |
2389 | */ | |
2390 | int | |
2391 | arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, | |
2392 | arc_done_func_t *done, void *private, int priority, int flags, | |
2393 | uint32_t *arc_flags, zbookmark_t *zb) | |
2394 | { | |
2395 | arc_buf_hdr_t *hdr; | |
2396 | arc_buf_t *buf; | |
2397 | kmutex_t *hash_lock; | |
2398 | zio_t *rzio; | |
2399 | ||
2400 | top: | |
2401 | hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); | |
2402 | if (hdr && hdr->b_datacnt > 0) { | |
2403 | ||
2404 | *arc_flags |= ARC_CACHED; | |
2405 | ||
2406 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
2407 | ||
2408 | if (*arc_flags & ARC_WAIT) { | |
2409 | cv_wait(&hdr->b_cv, hash_lock); | |
2410 | mutex_exit(hash_lock); | |
2411 | goto top; | |
2412 | } | |
2413 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2414 | ||
2415 | if (done) { | |
2416 | arc_callback_t *acb = NULL; | |
2417 | ||
2418 | acb = kmem_zalloc(sizeof (arc_callback_t), | |
2419 | KM_SLEEP); | |
2420 | acb->acb_done = done; | |
2421 | acb->acb_private = private; | |
2422 | acb->acb_byteswap = swap; | |
2423 | if (pio != NULL) | |
2424 | acb->acb_zio_dummy = zio_null(pio, | |
2425 | spa, NULL, NULL, flags); | |
2426 | ||
2427 | ASSERT(acb->acb_done != NULL); | |
2428 | acb->acb_next = hdr->b_acb; | |
2429 | hdr->b_acb = acb; | |
2430 | add_reference(hdr, hash_lock, private); | |
2431 | mutex_exit(hash_lock); | |
2432 | return (0); | |
2433 | } | |
2434 | mutex_exit(hash_lock); | |
2435 | return (0); | |
2436 | } | |
2437 | ||
2438 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2439 | ||
2440 | if (done) { | |
2441 | add_reference(hdr, hash_lock, private); | |
2442 | /* | |
2443 | * If this block is already in use, create a new | |
2444 | * copy of the data so that we will be guaranteed | |
2445 | * that arc_release() will always succeed. | |
2446 | */ | |
2447 | buf = hdr->b_buf; | |
2448 | ASSERT(buf); | |
2449 | ASSERT(buf->b_data); | |
2450 | if (HDR_BUF_AVAILABLE(hdr)) { | |
2451 | ASSERT(buf->b_efunc == NULL); | |
2452 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2453 | } else { | |
2454 | buf = arc_buf_clone(buf); | |
2455 | } | |
2456 | } else if (*arc_flags & ARC_PREFETCH && | |
2457 | refcount_count(&hdr->b_refcnt) == 0) { | |
2458 | hdr->b_flags |= ARC_PREFETCH; | |
2459 | } | |
2460 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); | |
2461 | arc_access(hdr, hash_lock); | |
2462 | mutex_exit(hash_lock); | |
2463 | ARCSTAT_BUMP(arcstat_hits); | |
2464 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2465 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2466 | data, metadata, hits); | |
2467 | ||
2468 | if (done) | |
2469 | done(NULL, buf, private); | |
2470 | } else { | |
2471 | uint64_t size = BP_GET_LSIZE(bp); | |
2472 | arc_callback_t *acb; | |
2473 | ||
2474 | if (hdr == NULL) { | |
2475 | /* this block is not in the cache */ | |
2476 | arc_buf_hdr_t *exists; | |
2477 | arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); | |
2478 | buf = arc_buf_alloc(spa, size, private, type); | |
2479 | hdr = buf->b_hdr; | |
2480 | hdr->b_dva = *BP_IDENTITY(bp); | |
2481 | hdr->b_birth = bp->blk_birth; | |
2482 | hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; | |
2483 | exists = buf_hash_insert(hdr, &hash_lock); | |
2484 | if (exists) { | |
2485 | /* somebody beat us to the hash insert */ | |
2486 | mutex_exit(hash_lock); | |
2487 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
2488 | hdr->b_birth = 0; | |
2489 | hdr->b_cksum0 = 0; | |
2490 | (void) arc_buf_remove_ref(buf, private); | |
2491 | goto top; /* restart the IO request */ | |
2492 | } | |
2493 | /* if this is a prefetch, we don't have a reference */ | |
2494 | if (*arc_flags & ARC_PREFETCH) { | |
2495 | (void) remove_reference(hdr, hash_lock, | |
2496 | private); | |
2497 | hdr->b_flags |= ARC_PREFETCH; | |
2498 | } | |
2499 | if (BP_GET_LEVEL(bp) > 0) | |
2500 | hdr->b_flags |= ARC_INDIRECT; | |
2501 | } else { | |
2502 | /* this block is in the ghost cache */ | |
2503 | ASSERT(GHOST_STATE(hdr->b_state)); | |
2504 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2505 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); | |
2506 | ASSERT(hdr->b_buf == NULL); | |
2507 | ||
2508 | /* if this is a prefetch, we don't have a reference */ | |
2509 | if (*arc_flags & ARC_PREFETCH) | |
2510 | hdr->b_flags |= ARC_PREFETCH; | |
2511 | else | |
2512 | add_reference(hdr, hash_lock, private); | |
2513 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
2514 | buf->b_hdr = hdr; | |
2515 | buf->b_data = NULL; | |
2516 | buf->b_efunc = NULL; | |
2517 | buf->b_private = NULL; | |
2518 | buf->b_next = NULL; | |
2519 | hdr->b_buf = buf; | |
2520 | arc_get_data_buf(buf); | |
2521 | ASSERT(hdr->b_datacnt == 0); | |
2522 | hdr->b_datacnt = 1; | |
2523 | ||
2524 | } | |
2525 | ||
2526 | acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); | |
2527 | acb->acb_done = done; | |
2528 | acb->acb_private = private; | |
2529 | acb->acb_byteswap = swap; | |
2530 | ||
2531 | ASSERT(hdr->b_acb == NULL); | |
2532 | hdr->b_acb = acb; | |
2533 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
2534 | ||
2535 | /* | |
2536 | * If the buffer has been evicted, migrate it to a present state | |
2537 | * before issuing the I/O. Once we drop the hash-table lock, | |
2538 | * the header will be marked as I/O in progress and have an | |
2539 | * attached buffer. At this point, anybody who finds this | |
2540 | * buffer ought to notice that it's legit but has a pending I/O. | |
2541 | */ | |
2542 | ||
2543 | if (GHOST_STATE(hdr->b_state)) | |
2544 | arc_access(hdr, hash_lock); | |
2545 | ||
2546 | ASSERT3U(hdr->b_size, ==, size); | |
2547 | DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, | |
2548 | zbookmark_t *, zb); | |
2549 | ARCSTAT_BUMP(arcstat_misses); | |
2550 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2551 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2552 | data, metadata, misses); | |
2553 | ||
2554 | if (l2arc_ndev != 0) { | |
2555 | /* | |
2556 | * Read from the L2ARC if the following are true: | |
2557 | * 1. This buffer has L2ARC metadata. | |
2558 | * 2. This buffer isn't currently writing to the L2ARC. | |
2559 | */ | |
2560 | if (hdr->b_l2hdr != NULL && !HDR_L2_WRITING(hdr)) { | |
2561 | vdev_t *vd = hdr->b_l2hdr->b_dev->l2ad_vdev; | |
2562 | daddr_t addr = hdr->b_l2hdr->b_daddr; | |
2563 | l2arc_read_callback_t *cb; | |
2564 | ||
2565 | DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); | |
2566 | ARCSTAT_BUMP(arcstat_l2_hits); | |
2567 | ||
2568 | hdr->b_flags |= ARC_L2_READING; | |
2569 | mutex_exit(hash_lock); | |
2570 | ||
2571 | cb = kmem_zalloc(sizeof (l2arc_read_callback_t), | |
2572 | KM_SLEEP); | |
2573 | cb->l2rcb_buf = buf; | |
2574 | cb->l2rcb_spa = spa; | |
2575 | cb->l2rcb_bp = *bp; | |
2576 | cb->l2rcb_zb = *zb; | |
2577 | cb->l2rcb_flags = flags; | |
2578 | ||
2579 | /* | |
2580 | * l2arc read. | |
2581 | */ | |
2582 | rzio = zio_read_phys(pio, vd, addr, size, | |
2583 | buf->b_data, ZIO_CHECKSUM_OFF, | |
2584 | l2arc_read_done, cb, priority, | |
2585 | flags | ZIO_FLAG_DONT_CACHE, B_FALSE); | |
2586 | DTRACE_PROBE2(l2arc__read, vdev_t *, vd, | |
2587 | zio_t *, rzio); | |
2588 | ||
2589 | if (*arc_flags & ARC_WAIT) | |
2590 | return (zio_wait(rzio)); | |
2591 | ||
2592 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2593 | zio_nowait(rzio); | |
2594 | return (0); | |
2595 | } else { | |
2596 | DTRACE_PROBE1(l2arc__miss, | |
2597 | arc_buf_hdr_t *, hdr); | |
2598 | ARCSTAT_BUMP(arcstat_l2_misses); | |
2599 | if (HDR_L2_WRITING(hdr)) | |
2600 | ARCSTAT_BUMP(arcstat_l2_rw_clash); | |
2601 | } | |
2602 | } | |
2603 | mutex_exit(hash_lock); | |
2604 | ||
2605 | rzio = zio_read(pio, spa, bp, buf->b_data, size, | |
2606 | arc_read_done, buf, priority, flags, zb); | |
2607 | ||
2608 | if (*arc_flags & ARC_WAIT) | |
2609 | return (zio_wait(rzio)); | |
2610 | ||
2611 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2612 | zio_nowait(rzio); | |
2613 | } | |
2614 | return (0); | |
2615 | } | |
2616 | ||
2617 | /* | |
2618 | * arc_read() variant to support pool traversal. If the block is already | |
2619 | * in the ARC, make a copy of it; otherwise, the caller will do the I/O. | |
2620 | * The idea is that we don't want pool traversal filling up memory, but | |
2621 | * if the ARC already has the data anyway, we shouldn't pay for the I/O. | |
2622 | */ | |
2623 | int | |
2624 | arc_tryread(spa_t *spa, blkptr_t *bp, void *data) | |
2625 | { | |
2626 | arc_buf_hdr_t *hdr; | |
2627 | kmutex_t *hash_mtx; | |
2628 | int rc = 0; | |
2629 | ||
2630 | hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); | |
2631 | ||
2632 | if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { | |
2633 | arc_buf_t *buf = hdr->b_buf; | |
2634 | ||
2635 | ASSERT(buf); | |
2636 | while (buf->b_data == NULL) { | |
2637 | buf = buf->b_next; | |
2638 | ASSERT(buf); | |
2639 | } | |
2640 | bcopy(buf->b_data, data, hdr->b_size); | |
2641 | } else { | |
2642 | rc = ENOENT; | |
2643 | } | |
2644 | ||
2645 | if (hash_mtx) | |
2646 | mutex_exit(hash_mtx); | |
2647 | ||
2648 | return (rc); | |
2649 | } | |
2650 | ||
2651 | void | |
2652 | arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) | |
2653 | { | |
2654 | ASSERT(buf->b_hdr != NULL); | |
2655 | ASSERT(buf->b_hdr->b_state != arc_anon); | |
2656 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); | |
2657 | buf->b_efunc = func; | |
2658 | buf->b_private = private; | |
2659 | } | |
2660 | ||
2661 | /* | |
2662 | * This is used by the DMU to let the ARC know that a buffer is | |
2663 | * being evicted, so the ARC should clean up. If this arc buf | |
2664 | * is not yet in the evicted state, it will be put there. | |
2665 | */ | |
2666 | int | |
2667 | arc_buf_evict(arc_buf_t *buf) | |
2668 | { | |
2669 | arc_buf_hdr_t *hdr; | |
2670 | kmutex_t *hash_lock; | |
2671 | arc_buf_t **bufp; | |
2672 | ||
2673 | mutex_enter(&arc_eviction_mtx); | |
2674 | hdr = buf->b_hdr; | |
2675 | if (hdr == NULL) { | |
2676 | /* | |
2677 | * We are in arc_do_user_evicts(). | |
2678 | */ | |
2679 | ASSERT(buf->b_data == NULL); | |
2680 | mutex_exit(&arc_eviction_mtx); | |
2681 | return (0); | |
2682 | } | |
2683 | hash_lock = HDR_LOCK(hdr); | |
2684 | mutex_exit(&arc_eviction_mtx); | |
2685 | ||
2686 | mutex_enter(hash_lock); | |
2687 | ||
2688 | if (buf->b_data == NULL) { | |
2689 | /* | |
2690 | * We are on the eviction list. | |
2691 | */ | |
2692 | mutex_exit(hash_lock); | |
2693 | mutex_enter(&arc_eviction_mtx); | |
2694 | if (buf->b_hdr == NULL) { | |
2695 | /* | |
2696 | * We are already in arc_do_user_evicts(). | |
2697 | */ | |
2698 | mutex_exit(&arc_eviction_mtx); | |
2699 | return (0); | |
2700 | } else { | |
2701 | arc_buf_t copy = *buf; /* structure assignment */ | |
2702 | /* | |
2703 | * Process this buffer now | |
2704 | * but let arc_do_user_evicts() do the reaping. | |
2705 | */ | |
2706 | buf->b_efunc = NULL; | |
2707 | mutex_exit(&arc_eviction_mtx); | |
2708 | VERIFY(copy.b_efunc(©) == 0); | |
2709 | return (1); | |
2710 | } | |
2711 | } | |
2712 | ||
2713 | ASSERT(buf->b_hdr == hdr); | |
2714 | ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); | |
2715 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2716 | ||
2717 | /* | |
2718 | * Pull this buffer off of the hdr | |
2719 | */ | |
2720 | bufp = &hdr->b_buf; | |
2721 | while (*bufp != buf) | |
2722 | bufp = &(*bufp)->b_next; | |
2723 | *bufp = buf->b_next; | |
2724 | ||
2725 | ASSERT(buf->b_data != NULL); | |
2726 | arc_buf_destroy(buf, FALSE, FALSE); | |
2727 | ||
2728 | if (hdr->b_datacnt == 0) { | |
2729 | arc_state_t *old_state = hdr->b_state; | |
2730 | arc_state_t *evicted_state; | |
2731 | ||
2732 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2733 | ||
2734 | evicted_state = | |
2735 | (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
2736 | ||
2737 | mutex_enter(&old_state->arcs_mtx); | |
2738 | mutex_enter(&evicted_state->arcs_mtx); | |
2739 | ||
2740 | arc_change_state(evicted_state, hdr, hash_lock); | |
2741 | ASSERT(HDR_IN_HASH_TABLE(hdr)); | |
2742 | hdr->b_flags |= ARC_IN_HASH_TABLE; | |
2743 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2744 | ||
2745 | mutex_exit(&evicted_state->arcs_mtx); | |
2746 | mutex_exit(&old_state->arcs_mtx); | |
2747 | } | |
2748 | mutex_exit(hash_lock); | |
2749 | ||
2750 | VERIFY(buf->b_efunc(buf) == 0); | |
2751 | buf->b_efunc = NULL; | |
2752 | buf->b_private = NULL; | |
2753 | buf->b_hdr = NULL; | |
2754 | kmem_cache_free(buf_cache, buf); | |
2755 | return (1); | |
2756 | } | |
2757 | ||
2758 | /* | |
2759 | * Release this buffer from the cache. This must be done | |
2760 | * after a read and prior to modifying the buffer contents. | |
2761 | * If the buffer has more than one reference, we must make | |
2762 | * make a new hdr for the buffer. | |
2763 | */ | |
2764 | void | |
2765 | arc_release(arc_buf_t *buf, void *tag) | |
2766 | { | |
2767 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2768 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
2769 | l2arc_buf_hdr_t *l2hdr = NULL; | |
2770 | uint64_t buf_size; | |
2771 | ||
2772 | /* this buffer is not on any list */ | |
2773 | ASSERT(refcount_count(&hdr->b_refcnt) > 0); | |
2774 | ||
2775 | if (hdr->b_state == arc_anon) { | |
2776 | /* this buffer is already released */ | |
2777 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); | |
2778 | ASSERT(BUF_EMPTY(hdr)); | |
2779 | ASSERT(buf->b_efunc == NULL); | |
2780 | arc_buf_thaw(buf); | |
2781 | return; | |
2782 | } | |
2783 | ||
2784 | mutex_enter(hash_lock); | |
2785 | ||
2786 | /* | |
2787 | * Do we have more than one buf? | |
2788 | */ | |
2789 | if (hdr->b_buf != buf || buf->b_next != NULL) { | |
2790 | arc_buf_hdr_t *nhdr; | |
2791 | arc_buf_t **bufp; | |
2792 | uint64_t blksz = hdr->b_size; | |
2793 | spa_t *spa = hdr->b_spa; | |
2794 | arc_buf_contents_t type = hdr->b_type; | |
2795 | uint32_t flags = hdr->b_flags; | |
2796 | ||
2797 | ASSERT(hdr->b_datacnt > 1); | |
2798 | /* | |
2799 | * Pull the data off of this buf and attach it to | |
2800 | * a new anonymous buf. | |
2801 | */ | |
2802 | (void) remove_reference(hdr, hash_lock, tag); | |
2803 | bufp = &hdr->b_buf; | |
2804 | while (*bufp != buf) | |
2805 | bufp = &(*bufp)->b_next; | |
2806 | *bufp = (*bufp)->b_next; | |
2807 | buf->b_next = NULL; | |
2808 | ||
2809 | ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); | |
2810 | atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); | |
2811 | if (refcount_is_zero(&hdr->b_refcnt)) { | |
2812 | uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; | |
2813 | ASSERT3U(*size, >=, hdr->b_size); | |
2814 | atomic_add_64(size, -hdr->b_size); | |
2815 | } | |
2816 | hdr->b_datacnt -= 1; | |
2817 | if (hdr->b_l2hdr != NULL) { | |
2818 | mutex_enter(&l2arc_buflist_mtx); | |
2819 | l2hdr = hdr->b_l2hdr; | |
2820 | hdr->b_l2hdr = NULL; | |
2821 | buf_size = hdr->b_size; | |
2822 | } | |
2823 | arc_cksum_verify(buf); | |
2824 | ||
2825 | mutex_exit(hash_lock); | |
2826 | ||
2827 | nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
2828 | nhdr->b_size = blksz; | |
2829 | nhdr->b_spa = spa; | |
2830 | nhdr->b_type = type; | |
2831 | nhdr->b_buf = buf; | |
2832 | nhdr->b_state = arc_anon; | |
2833 | nhdr->b_arc_access = 0; | |
2834 | nhdr->b_flags = flags & ARC_L2_WRITING; | |
2835 | nhdr->b_l2hdr = NULL; | |
2836 | nhdr->b_datacnt = 1; | |
2837 | nhdr->b_freeze_cksum = NULL; | |
2838 | (void) refcount_add(&nhdr->b_refcnt, tag); | |
2839 | buf->b_hdr = nhdr; | |
2840 | atomic_add_64(&arc_anon->arcs_size, blksz); | |
2841 | } else { | |
2842 | ASSERT(refcount_count(&hdr->b_refcnt) == 1); | |
2843 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
2844 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2845 | arc_change_state(arc_anon, hdr, hash_lock); | |
2846 | hdr->b_arc_access = 0; | |
2847 | if (hdr->b_l2hdr != NULL) { | |
2848 | mutex_enter(&l2arc_buflist_mtx); | |
2849 | l2hdr = hdr->b_l2hdr; | |
2850 | hdr->b_l2hdr = NULL; | |
2851 | buf_size = hdr->b_size; | |
2852 | } | |
2853 | mutex_exit(hash_lock); | |
2854 | ||
2855 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
2856 | hdr->b_birth = 0; | |
2857 | hdr->b_cksum0 = 0; | |
2858 | arc_buf_thaw(buf); | |
2859 | } | |
2860 | buf->b_efunc = NULL; | |
2861 | buf->b_private = NULL; | |
2862 | ||
2863 | if (l2hdr) { | |
2864 | list_remove(l2hdr->b_dev->l2ad_buflist, hdr); | |
2865 | kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); | |
2866 | ARCSTAT_INCR(arcstat_l2_size, -buf_size); | |
2867 | } | |
2868 | if (MUTEX_HELD(&l2arc_buflist_mtx)) | |
2869 | mutex_exit(&l2arc_buflist_mtx); | |
2870 | } | |
2871 | ||
2872 | int | |
2873 | arc_released(arc_buf_t *buf) | |
2874 | { | |
2875 | return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); | |
2876 | } | |
2877 | ||
2878 | int | |
2879 | arc_has_callback(arc_buf_t *buf) | |
2880 | { | |
2881 | return (buf->b_efunc != NULL); | |
2882 | } | |
2883 | ||
2884 | #ifdef ZFS_DEBUG | |
2885 | int | |
2886 | arc_referenced(arc_buf_t *buf) | |
2887 | { | |
2888 | return (refcount_count(&buf->b_hdr->b_refcnt)); | |
2889 | } | |
2890 | #endif | |
2891 | ||
2892 | static void | |
2893 | arc_write_ready(zio_t *zio) | |
2894 | { | |
2895 | arc_write_callback_t *callback = zio->io_private; | |
2896 | arc_buf_t *buf = callback->awcb_buf; | |
2897 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2898 | ||
2899 | if (zio->io_error == 0 && callback->awcb_ready) { | |
2900 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); | |
2901 | callback->awcb_ready(zio, buf, callback->awcb_private); | |
2902 | } | |
2903 | /* | |
2904 | * If the IO is already in progress, then this is a re-write | |
2905 | * attempt, so we need to thaw and re-compute the cksum. It is | |
2906 | * the responsibility of the callback to handle the freeing | |
2907 | * and accounting for any re-write attempt. If we don't have a | |
2908 | * callback registered then simply free the block here. | |
2909 | */ | |
2910 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
2911 | if (!BP_IS_HOLE(&zio->io_bp_orig) && | |
2912 | callback->awcb_ready == NULL) { | |
2913 | zio_nowait(zio_free(zio, zio->io_spa, zio->io_txg, | |
2914 | &zio->io_bp_orig, NULL, NULL)); | |
2915 | } | |
2916 | mutex_enter(&hdr->b_freeze_lock); | |
2917 | if (hdr->b_freeze_cksum != NULL) { | |
2918 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
2919 | hdr->b_freeze_cksum = NULL; | |
2920 | } | |
2921 | mutex_exit(&hdr->b_freeze_lock); | |
2922 | } | |
2923 | arc_cksum_compute(buf, B_FALSE); | |
2924 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
2925 | } | |
2926 | ||
2927 | static void | |
2928 | arc_write_done(zio_t *zio) | |
2929 | { | |
2930 | arc_write_callback_t *callback = zio->io_private; | |
2931 | arc_buf_t *buf = callback->awcb_buf; | |
2932 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2933 | ||
2934 | hdr->b_acb = NULL; | |
2935 | ||
2936 | /* this buffer is on no lists and is not in the hash table */ | |
2937 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
2938 | ||
2939 | hdr->b_dva = *BP_IDENTITY(zio->io_bp); | |
2940 | hdr->b_birth = zio->io_bp->blk_birth; | |
2941 | hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; | |
2942 | /* | |
2943 | * If the block to be written was all-zero, we may have | |
2944 | * compressed it away. In this case no write was performed | |
2945 | * so there will be no dva/birth-date/checksum. The buffer | |
2946 | * must therefor remain anonymous (and uncached). | |
2947 | */ | |
2948 | if (!BUF_EMPTY(hdr)) { | |
2949 | arc_buf_hdr_t *exists; | |
2950 | kmutex_t *hash_lock; | |
2951 | ||
2952 | arc_cksum_verify(buf); | |
2953 | ||
2954 | exists = buf_hash_insert(hdr, &hash_lock); | |
2955 | if (exists) { | |
2956 | /* | |
2957 | * This can only happen if we overwrite for | |
2958 | * sync-to-convergence, because we remove | |
2959 | * buffers from the hash table when we arc_free(). | |
2960 | */ | |
2961 | ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), | |
2962 | BP_IDENTITY(zio->io_bp))); | |
2963 | ASSERT3U(zio->io_bp_orig.blk_birth, ==, | |
2964 | zio->io_bp->blk_birth); | |
2965 | ||
2966 | ASSERT(refcount_is_zero(&exists->b_refcnt)); | |
2967 | arc_change_state(arc_anon, exists, hash_lock); | |
2968 | mutex_exit(hash_lock); | |
2969 | arc_hdr_destroy(exists); | |
2970 | exists = buf_hash_insert(hdr, &hash_lock); | |
2971 | ASSERT3P(exists, ==, NULL); | |
2972 | } | |
2973 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2974 | arc_access(hdr, hash_lock); | |
2975 | mutex_exit(hash_lock); | |
2976 | } else if (callback->awcb_done == NULL) { | |
2977 | int destroy_hdr; | |
2978 | /* | |
2979 | * This is an anonymous buffer with no user callback, | |
2980 | * destroy it if there are no active references. | |
2981 | */ | |
2982 | mutex_enter(&arc_eviction_mtx); | |
2983 | destroy_hdr = refcount_is_zero(&hdr->b_refcnt); | |
2984 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2985 | mutex_exit(&arc_eviction_mtx); | |
2986 | if (destroy_hdr) | |
2987 | arc_hdr_destroy(hdr); | |
2988 | } else { | |
2989 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2990 | } | |
2991 | ||
2992 | if (callback->awcb_done) { | |
2993 | ASSERT(!refcount_is_zero(&hdr->b_refcnt)); | |
2994 | callback->awcb_done(zio, buf, callback->awcb_private); | |
2995 | } | |
2996 | ||
2997 | kmem_free(callback, sizeof (arc_write_callback_t)); | |
2998 | } | |
2999 | ||
3000 | zio_t * | |
3001 | arc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, | |
3002 | uint64_t txg, blkptr_t *bp, arc_buf_t *buf, | |
3003 | arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, | |
3004 | int flags, zbookmark_t *zb) | |
3005 | { | |
3006 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3007 | arc_write_callback_t *callback; | |
3008 | zio_t *zio; | |
3009 | ||
3010 | /* this is a private buffer - no locking required */ | |
3011 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
3012 | ASSERT(BUF_EMPTY(hdr)); | |
3013 | ASSERT(!HDR_IO_ERROR(hdr)); | |
3014 | ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); | |
3015 | ASSERT(hdr->b_acb == 0); | |
3016 | callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); | |
3017 | callback->awcb_ready = ready; | |
3018 | callback->awcb_done = done; | |
3019 | callback->awcb_private = private; | |
3020 | callback->awcb_buf = buf; | |
3021 | zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, | |
3022 | buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, | |
3023 | priority, flags, zb); | |
3024 | ||
3025 | return (zio); | |
3026 | } | |
3027 | ||
3028 | int | |
3029 | arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, | |
3030 | zio_done_func_t *done, void *private, uint32_t arc_flags) | |
3031 | { | |
3032 | arc_buf_hdr_t *ab; | |
3033 | kmutex_t *hash_lock; | |
3034 | zio_t *zio; | |
3035 | ||
3036 | /* | |
3037 | * If this buffer is in the cache, release it, so it | |
3038 | * can be re-used. | |
3039 | */ | |
3040 | ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); | |
3041 | if (ab != NULL) { | |
3042 | /* | |
3043 | * The checksum of blocks to free is not always | |
3044 | * preserved (eg. on the deadlist). However, if it is | |
3045 | * nonzero, it should match what we have in the cache. | |
3046 | */ | |
3047 | ASSERT(bp->blk_cksum.zc_word[0] == 0 || | |
3048 | ab->b_cksum0 == bp->blk_cksum.zc_word[0]); | |
3049 | if (ab->b_state != arc_anon) | |
3050 | arc_change_state(arc_anon, ab, hash_lock); | |
3051 | if (HDR_IO_IN_PROGRESS(ab)) { | |
3052 | /* | |
3053 | * This should only happen when we prefetch. | |
3054 | */ | |
3055 | ASSERT(ab->b_flags & ARC_PREFETCH); | |
3056 | ASSERT3U(ab->b_datacnt, ==, 1); | |
3057 | ab->b_flags |= ARC_FREED_IN_READ; | |
3058 | if (HDR_IN_HASH_TABLE(ab)) | |
3059 | buf_hash_remove(ab); | |
3060 | ab->b_arc_access = 0; | |
3061 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3062 | ab->b_birth = 0; | |
3063 | ab->b_cksum0 = 0; | |
3064 | ab->b_buf->b_efunc = NULL; | |
3065 | ab->b_buf->b_private = NULL; | |
3066 | mutex_exit(hash_lock); | |
3067 | } else if (refcount_is_zero(&ab->b_refcnt)) { | |
3068 | ab->b_flags |= ARC_FREE_IN_PROGRESS; | |
3069 | mutex_exit(hash_lock); | |
3070 | arc_hdr_destroy(ab); | |
3071 | ARCSTAT_BUMP(arcstat_deleted); | |
3072 | } else { | |
3073 | /* | |
3074 | * We still have an active reference on this | |
3075 | * buffer. This can happen, e.g., from | |
3076 | * dbuf_unoverride(). | |
3077 | */ | |
3078 | ASSERT(!HDR_IN_HASH_TABLE(ab)); | |
3079 | ab->b_arc_access = 0; | |
3080 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3081 | ab->b_birth = 0; | |
3082 | ab->b_cksum0 = 0; | |
3083 | ab->b_buf->b_efunc = NULL; | |
3084 | ab->b_buf->b_private = NULL; | |
3085 | mutex_exit(hash_lock); | |
3086 | } | |
3087 | } | |
3088 | ||
3089 | zio = zio_free(pio, spa, txg, bp, done, private); | |
3090 | ||
3091 | if (arc_flags & ARC_WAIT) | |
3092 | return (zio_wait(zio)); | |
3093 | ||
3094 | ASSERT(arc_flags & ARC_NOWAIT); | |
3095 | zio_nowait(zio); | |
3096 | ||
3097 | return (0); | |
3098 | } | |
3099 | ||
3100 | static int | |
3101 | arc_memory_throttle(uint64_t reserve, uint64_t txg) | |
3102 | { | |
3103 | #ifdef _KERNEL | |
3104 | uint64_t inflight_data = arc_anon->arcs_size; | |
3105 | uint64_t available_memory = ptob(freemem); | |
3106 | static uint64_t page_load = 0; | |
3107 | static uint64_t last_txg = 0; | |
3108 | ||
3109 | #if defined(__i386) | |
3110 | available_memory = | |
3111 | MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); | |
3112 | #endif | |
3113 | if (available_memory >= zfs_write_limit_max) | |
3114 | return (0); | |
3115 | ||
3116 | if (txg > last_txg) { | |
3117 | last_txg = txg; | |
3118 | page_load = 0; | |
3119 | } | |
3120 | /* | |
3121 | * If we are in pageout, we know that memory is already tight, | |
3122 | * the arc is already going to be evicting, so we just want to | |
3123 | * continue to let page writes occur as quickly as possible. | |
3124 | */ | |
3125 | if (curproc == proc_pageout) { | |
3126 | if (page_load > MAX(ptob(minfree), available_memory) / 4) | |
3127 | return (ERESTART); | |
3128 | /* Note: reserve is inflated, so we deflate */ | |
3129 | page_load += reserve / 8; | |
3130 | return (0); | |
3131 | } else if (page_load > 0 && arc_reclaim_needed()) { | |
3132 | /* memory is low, delay before restarting */ | |
3133 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3134 | return (EAGAIN); | |
3135 | } | |
3136 | page_load = 0; | |
3137 | ||
3138 | if (arc_size > arc_c_min) { | |
3139 | uint64_t evictable_memory = | |
3140 | arc_mru->arcs_lsize[ARC_BUFC_DATA] + | |
3141 | arc_mru->arcs_lsize[ARC_BUFC_METADATA] + | |
3142 | arc_mfu->arcs_lsize[ARC_BUFC_DATA] + | |
3143 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; | |
3144 | available_memory += MIN(evictable_memory, arc_size - arc_c_min); | |
3145 | } | |
3146 | ||
3147 | if (inflight_data > available_memory / 4) { | |
3148 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3149 | return (ERESTART); | |
3150 | } | |
3151 | #endif | |
3152 | return (0); | |
3153 | } | |
3154 | ||
3155 | void | |
3156 | arc_tempreserve_clear(uint64_t reserve) | |
3157 | { | |
3158 | atomic_add_64(&arc_tempreserve, -reserve); | |
3159 | ASSERT((int64_t)arc_tempreserve >= 0); | |
3160 | } | |
3161 | ||
3162 | int | |
3163 | arc_tempreserve_space(uint64_t reserve, uint64_t txg) | |
3164 | { | |
3165 | int error; | |
3166 | ||
3167 | #ifdef ZFS_DEBUG | |
3168 | /* | |
3169 | * Once in a while, fail for no reason. Everything should cope. | |
3170 | */ | |
3171 | if (spa_get_random(10000) == 0) { | |
3172 | dprintf("forcing random failure\n"); | |
3173 | return (ERESTART); | |
3174 | } | |
3175 | #endif | |
3176 | if (reserve > arc_c/4 && !arc_no_grow) | |
3177 | arc_c = MIN(arc_c_max, reserve * 4); | |
3178 | if (reserve > arc_c) | |
3179 | return (ENOMEM); | |
3180 | ||
3181 | /* | |
3182 | * Writes will, almost always, require additional memory allocations | |
3183 | * in order to compress/encrypt/etc the data. We therefor need to | |
3184 | * make sure that there is sufficient available memory for this. | |
3185 | */ | |
3186 | if (error = arc_memory_throttle(reserve, txg)) | |
3187 | return (error); | |
3188 | ||
3189 | /* | |
3190 | * Throttle writes when the amount of dirty data in the cache | |
3191 | * gets too large. We try to keep the cache less than half full | |
3192 | * of dirty blocks so that our sync times don't grow too large. | |
3193 | * Note: if two requests come in concurrently, we might let them | |
3194 | * both succeed, when one of them should fail. Not a huge deal. | |
3195 | */ | |
3196 | if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && | |
3197 | arc_anon->arcs_size > arc_c / 4) { | |
3198 | dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " | |
3199 | "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", | |
3200 | arc_tempreserve>>10, | |
3201 | arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, | |
3202 | arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, | |
3203 | reserve>>10, arc_c>>10); | |
3204 | return (ERESTART); | |
3205 | } | |
3206 | atomic_add_64(&arc_tempreserve, reserve); | |
3207 | return (0); | |
3208 | } | |
3209 | ||
3210 | void | |
3211 | arc_init(void) | |
3212 | { | |
3213 | mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
3214 | cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); | |
3215 | ||
3216 | /* Convert seconds to clock ticks */ | |
3217 | arc_min_prefetch_lifespan = 1 * hz; | |
3218 | ||
3219 | /* Start out with 1/8 of all memory */ | |
3220 | arc_c = physmem * PAGESIZE / 8; | |
3221 | ||
3222 | #ifdef _KERNEL | |
3223 | /* | |
3224 | * On architectures where the physical memory can be larger | |
3225 | * than the addressable space (intel in 32-bit mode), we may | |
3226 | * need to limit the cache to 1/8 of VM size. | |
3227 | */ | |
3228 | arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); | |
3229 | #endif | |
3230 | ||
3231 | /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ | |
3232 | arc_c_min = MAX(arc_c / 4, 64<<20); | |
3233 | /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ | |
3234 | if (arc_c * 8 >= 1<<30) | |
3235 | arc_c_max = (arc_c * 8) - (1<<30); | |
3236 | else | |
3237 | arc_c_max = arc_c_min; | |
3238 | arc_c_max = MAX(arc_c * 6, arc_c_max); | |
3239 | ||
3240 | /* | |
3241 | * Allow the tunables to override our calculations if they are | |
3242 | * reasonable (ie. over 64MB) | |
3243 | */ | |
3244 | if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) | |
3245 | arc_c_max = zfs_arc_max; | |
3246 | if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) | |
3247 | arc_c_min = zfs_arc_min; | |
3248 | ||
3249 | arc_c = arc_c_max; | |
3250 | arc_p = (arc_c >> 1); | |
3251 | ||
3252 | /* limit meta-data to 1/4 of the arc capacity */ | |
3253 | arc_meta_limit = arc_c_max / 4; | |
3254 | ||
3255 | /* Allow the tunable to override if it is reasonable */ | |
3256 | if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) | |
3257 | arc_meta_limit = zfs_arc_meta_limit; | |
3258 | ||
3259 | if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) | |
3260 | arc_c_min = arc_meta_limit / 2; | |
3261 | ||
3262 | /* if kmem_flags are set, lets try to use less memory */ | |
3263 | if (kmem_debugging()) | |
3264 | arc_c = arc_c / 2; | |
3265 | if (arc_c < arc_c_min) | |
3266 | arc_c = arc_c_min; | |
3267 | ||
3268 | arc_anon = &ARC_anon; | |
3269 | arc_mru = &ARC_mru; | |
3270 | arc_mru_ghost = &ARC_mru_ghost; | |
3271 | arc_mfu = &ARC_mfu; | |
3272 | arc_mfu_ghost = &ARC_mfu_ghost; | |
3273 | arc_l2c_only = &ARC_l2c_only; | |
3274 | arc_size = 0; | |
3275 | ||
3276 | mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3277 | mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3278 | mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3279 | mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3280 | mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3281 | mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3282 | ||
3283 | list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], | |
3284 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3285 | list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], | |
3286 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3287 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], | |
3288 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3289 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], | |
3290 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3291 | list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], | |
3292 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3293 | list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], | |
3294 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3295 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], | |
3296 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3297 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], | |
3298 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3299 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], | |
3300 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3301 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], | |
3302 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3303 | ||
3304 | buf_init(); | |
3305 | ||
3306 | arc_thread_exit = 0; | |
3307 | arc_eviction_list = NULL; | |
3308 | mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3309 | bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); | |
3310 | ||
3311 | arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, | |
3312 | sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
3313 | ||
3314 | if (arc_ksp != NULL) { | |
3315 | arc_ksp->ks_data = &arc_stats; | |
3316 | kstat_install(arc_ksp); | |
3317 | } | |
3318 | ||
3319 | (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, | |
3320 | TS_RUN, minclsyspri); | |
3321 | ||
3322 | arc_dead = FALSE; | |
3323 | ||
3324 | if (zfs_write_limit_max == 0) | |
3325 | zfs_write_limit_max = physmem * PAGESIZE >> | |
3326 | zfs_write_limit_shift; | |
3327 | else | |
3328 | zfs_write_limit_shift = 0; | |
3329 | } | |
3330 | ||
3331 | void | |
3332 | arc_fini(void) | |
3333 | { | |
3334 | mutex_enter(&arc_reclaim_thr_lock); | |
3335 | arc_thread_exit = 1; | |
3336 | while (arc_thread_exit != 0) | |
3337 | cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); | |
3338 | mutex_exit(&arc_reclaim_thr_lock); | |
3339 | ||
3340 | arc_flush(NULL); | |
3341 | ||
3342 | arc_dead = TRUE; | |
3343 | ||
3344 | if (arc_ksp != NULL) { | |
3345 | kstat_delete(arc_ksp); | |
3346 | arc_ksp = NULL; | |
3347 | } | |
3348 | ||
3349 | mutex_destroy(&arc_eviction_mtx); | |
3350 | mutex_destroy(&arc_reclaim_thr_lock); | |
3351 | cv_destroy(&arc_reclaim_thr_cv); | |
3352 | ||
3353 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); | |
3354 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3355 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); | |
3356 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3357 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); | |
3358 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); | |
3359 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); | |
3360 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); | |
3361 | ||
3362 | mutex_destroy(&arc_anon->arcs_mtx); | |
3363 | mutex_destroy(&arc_mru->arcs_mtx); | |
3364 | mutex_destroy(&arc_mru_ghost->arcs_mtx); | |
3365 | mutex_destroy(&arc_mfu->arcs_mtx); | |
3366 | mutex_destroy(&arc_mfu_ghost->arcs_mtx); | |
3367 | ||
3368 | buf_fini(); | |
3369 | } | |
3370 | ||
3371 | /* | |
3372 | * Level 2 ARC | |
3373 | * | |
3374 | * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. | |
3375 | * It uses dedicated storage devices to hold cached data, which are populated | |
3376 | * using large infrequent writes. The main role of this cache is to boost | |
3377 | * the performance of random read workloads. The intended L2ARC devices | |
3378 | * include short-stroked disks, solid state disks, and other media with | |
3379 | * substantially faster read latency than disk. | |
3380 | * | |
3381 | * +-----------------------+ | |
3382 | * | ARC | | |
3383 | * +-----------------------+ | |
3384 | * | ^ ^ | |
3385 | * | | | | |
3386 | * l2arc_feed_thread() arc_read() | |
3387 | * | | | | |
3388 | * | l2arc read | | |
3389 | * V | | | |
3390 | * +---------------+ | | |
3391 | * | L2ARC | | | |
3392 | * +---------------+ | | |
3393 | * | ^ | | |
3394 | * l2arc_write() | | | |
3395 | * | | | | |
3396 | * V | | | |
3397 | * +-------+ +-------+ | |
3398 | * | vdev | | vdev | | |
3399 | * | cache | | cache | | |
3400 | * +-------+ +-------+ | |
3401 | * +=========+ .-----. | |
3402 | * : L2ARC : |-_____-| | |
3403 | * : devices : | Disks | | |
3404 | * +=========+ `-_____-' | |
3405 | * | |
3406 | * Read requests are satisfied from the following sources, in order: | |
3407 | * | |
3408 | * 1) ARC | |
3409 | * 2) vdev cache of L2ARC devices | |
3410 | * 3) L2ARC devices | |
3411 | * 4) vdev cache of disks | |
3412 | * 5) disks | |
3413 | * | |
3414 | * Some L2ARC device types exhibit extremely slow write performance. | |
3415 | * To accommodate for this there are some significant differences between | |
3416 | * the L2ARC and traditional cache design: | |
3417 | * | |
3418 | * 1. There is no eviction path from the ARC to the L2ARC. Evictions from | |
3419 | * the ARC behave as usual, freeing buffers and placing headers on ghost | |
3420 | * lists. The ARC does not send buffers to the L2ARC during eviction as | |
3421 | * this would add inflated write latencies for all ARC memory pressure. | |
3422 | * | |
3423 | * 2. The L2ARC attempts to cache data from the ARC before it is evicted. | |
3424 | * It does this by periodically scanning buffers from the eviction-end of | |
3425 | * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are | |
3426 | * not already there. It scans until a headroom of buffers is satisfied, | |
3427 | * which itself is a buffer for ARC eviction. The thread that does this is | |
3428 | * l2arc_feed_thread(), illustrated below; example sizes are included to | |
3429 | * provide a better sense of ratio than this diagram: | |
3430 | * | |
3431 | * head --> tail | |
3432 | * +---------------------+----------+ | |
3433 | * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC | |
3434 | * +---------------------+----------+ | o L2ARC eligible | |
3435 | * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer | |
3436 | * +---------------------+----------+ | | |
3437 | * 15.9 Gbytes ^ 32 Mbytes | | |
3438 | * headroom | | |
3439 | * l2arc_feed_thread() | |
3440 | * | | |
3441 | * l2arc write hand <--[oooo]--' | |
3442 | * | 8 Mbyte | |
3443 | * | write max | |
3444 | * V | |
3445 | * +==============================+ | |
3446 | * L2ARC dev |####|#|###|###| |####| ... | | |
3447 | * +==============================+ | |
3448 | * 32 Gbytes | |
3449 | * | |
3450 | * 3. If an ARC buffer is copied to the L2ARC but then hit instead of | |
3451 | * evicted, then the L2ARC has cached a buffer much sooner than it probably | |
3452 | * needed to, potentially wasting L2ARC device bandwidth and storage. It is | |
3453 | * safe to say that this is an uncommon case, since buffers at the end of | |
3454 | * the ARC lists have moved there due to inactivity. | |
3455 | * | |
3456 | * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, | |
3457 | * then the L2ARC simply misses copying some buffers. This serves as a | |
3458 | * pressure valve to prevent heavy read workloads from both stalling the ARC | |
3459 | * with waits and clogging the L2ARC with writes. This also helps prevent | |
3460 | * the potential for the L2ARC to churn if it attempts to cache content too | |
3461 | * quickly, such as during backups of the entire pool. | |
3462 | * | |
3463 | * 5. Writes to the L2ARC devices are grouped and sent in-sequence, so that | |
3464 | * the vdev queue can aggregate them into larger and fewer writes. Each | |
3465 | * device is written to in a rotor fashion, sweeping writes through | |
3466 | * available space then repeating. | |
3467 | * | |
3468 | * 6. The L2ARC does not store dirty content. It never needs to flush | |
3469 | * write buffers back to disk based storage. | |
3470 | * | |
3471 | * 7. If an ARC buffer is written (and dirtied) which also exists in the | |
3472 | * L2ARC, the now stale L2ARC buffer is immediately dropped. | |
3473 | * | |
3474 | * The performance of the L2ARC can be tweaked by a number of tunables, which | |
3475 | * may be necessary for different workloads: | |
3476 | * | |
3477 | * l2arc_write_max max write bytes per interval | |
3478 | * l2arc_noprefetch skip caching prefetched buffers | |
3479 | * l2arc_headroom number of max device writes to precache | |
3480 | * l2arc_feed_secs seconds between L2ARC writing | |
3481 | * | |
3482 | * Tunables may be removed or added as future performance improvements are | |
3483 | * integrated, and also may become zpool properties. | |
3484 | */ | |
3485 | ||
3486 | static void | |
3487 | l2arc_hdr_stat_add(void) | |
3488 | { | |
3489 | ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); | |
3490 | ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); | |
3491 | } | |
3492 | ||
3493 | static void | |
3494 | l2arc_hdr_stat_remove(void) | |
3495 | { | |
3496 | ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); | |
3497 | ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); | |
3498 | } | |
3499 | ||
3500 | /* | |
3501 | * Cycle through L2ARC devices. This is how L2ARC load balances. | |
3502 | * This is called with l2arc_dev_mtx held, which also locks out spa removal. | |
3503 | */ | |
3504 | static l2arc_dev_t * | |
3505 | l2arc_dev_get_next(void) | |
3506 | { | |
3507 | l2arc_dev_t *next; | |
3508 | ||
3509 | if (l2arc_dev_last == NULL) { | |
3510 | next = list_head(l2arc_dev_list); | |
3511 | } else { | |
3512 | next = list_next(l2arc_dev_list, l2arc_dev_last); | |
3513 | if (next == NULL) | |
3514 | next = list_head(l2arc_dev_list); | |
3515 | } | |
3516 | ||
3517 | l2arc_dev_last = next; | |
3518 | ||
3519 | return (next); | |
3520 | } | |
3521 | ||
3522 | /* | |
3523 | * A write to a cache device has completed. Update all headers to allow | |
3524 | * reads from these buffers to begin. | |
3525 | */ | |
3526 | static void | |
3527 | l2arc_write_done(zio_t *zio) | |
3528 | { | |
3529 | l2arc_write_callback_t *cb; | |
3530 | l2arc_dev_t *dev; | |
3531 | list_t *buflist; | |
3532 | l2arc_data_free_t *df, *df_prev; | |
3533 | arc_buf_hdr_t *head, *ab, *ab_prev; | |
3534 | kmutex_t *hash_lock; | |
3535 | ||
3536 | cb = zio->io_private; | |
3537 | ASSERT(cb != NULL); | |
3538 | dev = cb->l2wcb_dev; | |
3539 | ASSERT(dev != NULL); | |
3540 | head = cb->l2wcb_head; | |
3541 | ASSERT(head != NULL); | |
3542 | buflist = dev->l2ad_buflist; | |
3543 | ASSERT(buflist != NULL); | |
3544 | DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, | |
3545 | l2arc_write_callback_t *, cb); | |
3546 | ||
3547 | if (zio->io_error != 0) | |
3548 | ARCSTAT_BUMP(arcstat_l2_writes_error); | |
3549 | ||
3550 | mutex_enter(&l2arc_buflist_mtx); | |
3551 | ||
3552 | /* | |
3553 | * All writes completed, or an error was hit. | |
3554 | */ | |
3555 | for (ab = list_prev(buflist, head); ab; ab = ab_prev) { | |
3556 | ab_prev = list_prev(buflist, ab); | |
3557 | ||
3558 | hash_lock = HDR_LOCK(ab); | |
3559 | if (!mutex_tryenter(hash_lock)) { | |
3560 | /* | |
3561 | * This buffer misses out. It may be in a stage | |
3562 | * of eviction. Its ARC_L2_WRITING flag will be | |
3563 | * left set, denying reads to this buffer. | |
3564 | */ | |
3565 | ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); | |
3566 | continue; | |
3567 | } | |
3568 | ||
3569 | if (zio->io_error != 0) { | |
3570 | /* | |
3571 | * Error - invalidate L2ARC entry. | |
3572 | */ | |
3573 | ab->b_l2hdr = NULL; | |
3574 | } | |
3575 | ||
3576 | /* | |
3577 | * Allow ARC to begin reads to this L2ARC entry. | |
3578 | */ | |
3579 | ab->b_flags &= ~ARC_L2_WRITING; | |
3580 | ||
3581 | mutex_exit(hash_lock); | |
3582 | } | |
3583 | ||
3584 | atomic_inc_64(&l2arc_writes_done); | |
3585 | list_remove(buflist, head); | |
3586 | kmem_cache_free(hdr_cache, head); | |
3587 | mutex_exit(&l2arc_buflist_mtx); | |
3588 | ||
3589 | /* | |
3590 | * Free buffers that were tagged for destruction. | |
3591 | */ | |
3592 | mutex_enter(&l2arc_free_on_write_mtx); | |
3593 | buflist = l2arc_free_on_write; | |
3594 | for (df = list_tail(buflist); df; df = df_prev) { | |
3595 | df_prev = list_prev(buflist, df); | |
3596 | ASSERT(df->l2df_data != NULL); | |
3597 | ASSERT(df->l2df_func != NULL); | |
3598 | df->l2df_func(df->l2df_data, df->l2df_size); | |
3599 | list_remove(buflist, df); | |
3600 | kmem_free(df, sizeof (l2arc_data_free_t)); | |
3601 | } | |
3602 | mutex_exit(&l2arc_free_on_write_mtx); | |
3603 | ||
3604 | kmem_free(cb, sizeof (l2arc_write_callback_t)); | |
3605 | } | |
3606 | ||
3607 | /* | |
3608 | * A read to a cache device completed. Validate buffer contents before | |
3609 | * handing over to the regular ARC routines. | |
3610 | */ | |
3611 | static void | |
3612 | l2arc_read_done(zio_t *zio) | |
3613 | { | |
3614 | l2arc_read_callback_t *cb; | |
3615 | arc_buf_hdr_t *hdr; | |
3616 | arc_buf_t *buf; | |
3617 | zio_t *rzio; | |
3618 | kmutex_t *hash_lock; | |
3619 | int equal, err = 0; | |
3620 | ||
3621 | cb = zio->io_private; | |
3622 | ASSERT(cb != NULL); | |
3623 | buf = cb->l2rcb_buf; | |
3624 | ASSERT(buf != NULL); | |
3625 | hdr = buf->b_hdr; | |
3626 | ASSERT(hdr != NULL); | |
3627 | ||
3628 | hash_lock = HDR_LOCK(hdr); | |
3629 | mutex_enter(hash_lock); | |
3630 | ||
3631 | /* | |
3632 | * Check this survived the L2ARC journey. | |
3633 | */ | |
3634 | equal = arc_cksum_equal(buf); | |
3635 | if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { | |
3636 | mutex_exit(hash_lock); | |
3637 | zio->io_private = buf; | |
3638 | arc_read_done(zio); | |
3639 | } else { | |
3640 | mutex_exit(hash_lock); | |
3641 | /* | |
3642 | * Buffer didn't survive caching. Increment stats and | |
3643 | * reissue to the original storage device. | |
3644 | */ | |
3645 | if (zio->io_error != 0) | |
3646 | ARCSTAT_BUMP(arcstat_l2_io_error); | |
3647 | if (!equal) | |
3648 | ARCSTAT_BUMP(arcstat_l2_cksum_bad); | |
3649 | ||
3650 | zio->io_flags &= ~ZIO_FLAG_DONT_CACHE; | |
3651 | rzio = zio_read(NULL, cb->l2rcb_spa, &cb->l2rcb_bp, | |
3652 | buf->b_data, zio->io_size, arc_read_done, buf, | |
3653 | zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb); | |
3654 | ||
3655 | /* | |
3656 | * Since this is a seperate thread, we can wait on this | |
3657 | * I/O whether there is an io_waiter or not. | |
3658 | */ | |
3659 | err = zio_wait(rzio); | |
3660 | ||
3661 | /* | |
3662 | * Let the resent I/O call arc_read_done() instead. | |
3663 | * io_error is set to the reissued I/O error status. | |
3664 | */ | |
3665 | zio->io_done = NULL; | |
3666 | zio->io_waiter = NULL; | |
3667 | zio->io_error = err; | |
3668 | } | |
3669 | ||
3670 | kmem_free(cb, sizeof (l2arc_read_callback_t)); | |
3671 | } | |
3672 | ||
3673 | /* | |
3674 | * This is the list priority from which the L2ARC will search for pages to | |
3675 | * cache. This is used within loops (0..3) to cycle through lists in the | |
3676 | * desired order. This order can have a significant effect on cache | |
3677 | * performance. | |
3678 | * | |
3679 | * Currently the metadata lists are hit first, MFU then MRU, followed by | |
3680 | * the data lists. This function returns a locked list, and also returns | |
3681 | * the lock pointer. | |
3682 | */ | |
3683 | static list_t * | |
3684 | l2arc_list_locked(int list_num, kmutex_t **lock) | |
3685 | { | |
3686 | list_t *list; | |
3687 | ||
3688 | ASSERT(list_num >= 0 && list_num <= 3); | |
3689 | ||
3690 | switch (list_num) { | |
3691 | case 0: | |
3692 | list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; | |
3693 | *lock = &arc_mfu->arcs_mtx; | |
3694 | break; | |
3695 | case 1: | |
3696 | list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; | |
3697 | *lock = &arc_mru->arcs_mtx; | |
3698 | break; | |
3699 | case 2: | |
3700 | list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; | |
3701 | *lock = &arc_mfu->arcs_mtx; | |
3702 | break; | |
3703 | case 3: | |
3704 | list = &arc_mru->arcs_list[ARC_BUFC_DATA]; | |
3705 | *lock = &arc_mru->arcs_mtx; | |
3706 | break; | |
3707 | } | |
3708 | ||
3709 | ASSERT(!(MUTEX_HELD(*lock))); | |
3710 | mutex_enter(*lock); | |
3711 | return (list); | |
3712 | } | |
3713 | ||
3714 | /* | |
3715 | * Evict buffers from the device write hand to the distance specified in | |
3716 | * bytes. This distance may span populated buffers, it may span nothing. | |
3717 | * This is clearing a region on the L2ARC device ready for writing. | |
3718 | * If the 'all' boolean is set, every buffer is evicted. | |
3719 | */ | |
3720 | static void | |
3721 | l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) | |
3722 | { | |
3723 | list_t *buflist; | |
3724 | l2arc_buf_hdr_t *abl2; | |
3725 | arc_buf_hdr_t *ab, *ab_prev; | |
3726 | kmutex_t *hash_lock; | |
3727 | uint64_t taddr; | |
3728 | ||
3729 | ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); | |
3730 | ||
3731 | buflist = dev->l2ad_buflist; | |
3732 | ||
3733 | if (buflist == NULL) | |
3734 | return; | |
3735 | ||
3736 | if (!all && dev->l2ad_first) { | |
3737 | /* | |
3738 | * This is the first sweep through the device. There is | |
3739 | * nothing to evict. | |
3740 | */ | |
3741 | return; | |
3742 | } | |
3743 | ||
3744 | if (dev->l2ad_hand >= (dev->l2ad_end - (2 * dev->l2ad_write))) { | |
3745 | /* | |
3746 | * When nearing the end of the device, evict to the end | |
3747 | * before the device write hand jumps to the start. | |
3748 | */ | |
3749 | taddr = dev->l2ad_end; | |
3750 | } else { | |
3751 | taddr = dev->l2ad_hand + distance; | |
3752 | } | |
3753 | DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, | |
3754 | uint64_t, taddr, boolean_t, all); | |
3755 | ||
3756 | top: | |
3757 | mutex_enter(&l2arc_buflist_mtx); | |
3758 | for (ab = list_tail(buflist); ab; ab = ab_prev) { | |
3759 | ab_prev = list_prev(buflist, ab); | |
3760 | ||
3761 | hash_lock = HDR_LOCK(ab); | |
3762 | if (!mutex_tryenter(hash_lock)) { | |
3763 | /* | |
3764 | * Missed the hash lock. Retry. | |
3765 | */ | |
3766 | ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); | |
3767 | mutex_exit(&l2arc_buflist_mtx); | |
3768 | mutex_enter(hash_lock); | |
3769 | mutex_exit(hash_lock); | |
3770 | goto top; | |
3771 | } | |
3772 | ||
3773 | if (HDR_L2_WRITE_HEAD(ab)) { | |
3774 | /* | |
3775 | * We hit a write head node. Leave it for | |
3776 | * l2arc_write_done(). | |
3777 | */ | |
3778 | list_remove(buflist, ab); | |
3779 | mutex_exit(hash_lock); | |
3780 | continue; | |
3781 | } | |
3782 | ||
3783 | if (!all && ab->b_l2hdr != NULL && | |
3784 | (ab->b_l2hdr->b_daddr > taddr || | |
3785 | ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { | |
3786 | /* | |
3787 | * We've evicted to the target address, | |
3788 | * or the end of the device. | |
3789 | */ | |
3790 | mutex_exit(hash_lock); | |
3791 | break; | |
3792 | } | |
3793 | ||
3794 | if (HDR_FREE_IN_PROGRESS(ab)) { | |
3795 | /* | |
3796 | * Already on the path to destruction. | |
3797 | */ | |
3798 | mutex_exit(hash_lock); | |
3799 | continue; | |
3800 | } | |
3801 | ||
3802 | if (ab->b_state == arc_l2c_only) { | |
3803 | ASSERT(!HDR_L2_READING(ab)); | |
3804 | /* | |
3805 | * This doesn't exist in the ARC. Destroy. | |
3806 | * arc_hdr_destroy() will call list_remove() | |
3807 | * and decrement arcstat_l2_size. | |
3808 | */ | |
3809 | arc_change_state(arc_anon, ab, hash_lock); | |
3810 | arc_hdr_destroy(ab); | |
3811 | } else { | |
3812 | /* | |
3813 | * Tell ARC this no longer exists in L2ARC. | |
3814 | */ | |
3815 | if (ab->b_l2hdr != NULL) { | |
3816 | abl2 = ab->b_l2hdr; | |
3817 | ab->b_l2hdr = NULL; | |
3818 | kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); | |
3819 | ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); | |
3820 | } | |
3821 | list_remove(buflist, ab); | |
3822 | ||
3823 | /* | |
3824 | * This may have been leftover after a | |
3825 | * failed write. | |
3826 | */ | |
3827 | ab->b_flags &= ~ARC_L2_WRITING; | |
3828 | ||
3829 | /* | |
3830 | * Invalidate issued or about to be issued | |
3831 | * reads, since we may be about to write | |
3832 | * over this location. | |
3833 | */ | |
3834 | if (HDR_L2_READING(ab)) { | |
3835 | ARCSTAT_BUMP(arcstat_l2_evict_reading); | |
3836 | ab->b_flags |= ARC_L2_EVICTED; | |
3837 | } | |
3838 | } | |
3839 | mutex_exit(hash_lock); | |
3840 | } | |
3841 | mutex_exit(&l2arc_buflist_mtx); | |
3842 | ||
3843 | spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); | |
3844 | dev->l2ad_evict = taddr; | |
3845 | } | |
3846 | ||
3847 | /* | |
3848 | * Find and write ARC buffers to the L2ARC device. | |
3849 | * | |
3850 | * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid | |
3851 | * for reading until they have completed writing. | |
3852 | */ | |
3853 | static void | |
3854 | l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev) | |
3855 | { | |
3856 | arc_buf_hdr_t *ab, *ab_prev, *head; | |
3857 | l2arc_buf_hdr_t *hdrl2; | |
3858 | list_t *list; | |
3859 | uint64_t passed_sz, write_sz, buf_sz; | |
3860 | uint64_t target_sz = dev->l2ad_write; | |
3861 | uint64_t headroom = dev->l2ad_write * l2arc_headroom; | |
3862 | void *buf_data; | |
3863 | kmutex_t *hash_lock, *list_lock; | |
3864 | boolean_t have_lock, full; | |
3865 | l2arc_write_callback_t *cb; | |
3866 | zio_t *pio, *wzio; | |
3867 | ||
3868 | ASSERT(MUTEX_HELD(&l2arc_dev_mtx)); | |
3869 | ASSERT(dev->l2ad_vdev != NULL); | |
3870 | ||
3871 | pio = NULL; | |
3872 | write_sz = 0; | |
3873 | full = B_FALSE; | |
3874 | head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
3875 | head->b_flags |= ARC_L2_WRITE_HEAD; | |
3876 | ||
3877 | /* | |
3878 | * Copy buffers for L2ARC writing. | |
3879 | */ | |
3880 | mutex_enter(&l2arc_buflist_mtx); | |
3881 | for (int try = 0; try <= 3; try++) { | |
3882 | list = l2arc_list_locked(try, &list_lock); | |
3883 | passed_sz = 0; | |
3884 | ||
3885 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
3886 | ab_prev = list_prev(list, ab); | |
3887 | ||
3888 | hash_lock = HDR_LOCK(ab); | |
3889 | have_lock = MUTEX_HELD(hash_lock); | |
3890 | if (!have_lock && !mutex_tryenter(hash_lock)) { | |
3891 | /* | |
3892 | * Skip this buffer rather than waiting. | |
3893 | */ | |
3894 | continue; | |
3895 | } | |
3896 | ||
3897 | passed_sz += ab->b_size; | |
3898 | if (passed_sz > headroom) { | |
3899 | /* | |
3900 | * Searched too far. | |
3901 | */ | |
3902 | mutex_exit(hash_lock); | |
3903 | break; | |
3904 | } | |
3905 | ||
3906 | if (ab->b_spa != spa) { | |
3907 | mutex_exit(hash_lock); | |
3908 | continue; | |
3909 | } | |
3910 | ||
3911 | if (ab->b_l2hdr != NULL) { | |
3912 | /* | |
3913 | * Already in L2ARC. | |
3914 | */ | |
3915 | mutex_exit(hash_lock); | |
3916 | continue; | |
3917 | } | |
3918 | ||
3919 | if (HDR_IO_IN_PROGRESS(ab) || HDR_DONT_L2CACHE(ab)) { | |
3920 | mutex_exit(hash_lock); | |
3921 | continue; | |
3922 | } | |
3923 | ||
3924 | if ((write_sz + ab->b_size) > target_sz) { | |
3925 | full = B_TRUE; | |
3926 | mutex_exit(hash_lock); | |
3927 | break; | |
3928 | } | |
3929 | ||
3930 | if (ab->b_buf == NULL) { | |
3931 | DTRACE_PROBE1(l2arc__buf__null, void *, ab); | |
3932 | mutex_exit(hash_lock); | |
3933 | continue; | |
3934 | } | |
3935 | ||
3936 | if (pio == NULL) { | |
3937 | /* | |
3938 | * Insert a dummy header on the buflist so | |
3939 | * l2arc_write_done() can find where the | |
3940 | * write buffers begin without searching. | |
3941 | */ | |
3942 | list_insert_head(dev->l2ad_buflist, head); | |
3943 | ||
3944 | cb = kmem_alloc( | |
3945 | sizeof (l2arc_write_callback_t), KM_SLEEP); | |
3946 | cb->l2wcb_dev = dev; | |
3947 | cb->l2wcb_head = head; | |
3948 | pio = zio_root(spa, l2arc_write_done, cb, | |
3949 | ZIO_FLAG_CANFAIL); | |
3950 | } | |
3951 | ||
3952 | /* | |
3953 | * Create and add a new L2ARC header. | |
3954 | */ | |
3955 | hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); | |
3956 | hdrl2->b_dev = dev; | |
3957 | hdrl2->b_daddr = dev->l2ad_hand; | |
3958 | ||
3959 | ab->b_flags |= ARC_L2_WRITING; | |
3960 | ab->b_l2hdr = hdrl2; | |
3961 | list_insert_head(dev->l2ad_buflist, ab); | |
3962 | buf_data = ab->b_buf->b_data; | |
3963 | buf_sz = ab->b_size; | |
3964 | ||
3965 | /* | |
3966 | * Compute and store the buffer cksum before | |
3967 | * writing. On debug the cksum is verified first. | |
3968 | */ | |
3969 | arc_cksum_verify(ab->b_buf); | |
3970 | arc_cksum_compute(ab->b_buf, B_TRUE); | |
3971 | ||
3972 | mutex_exit(hash_lock); | |
3973 | ||
3974 | wzio = zio_write_phys(pio, dev->l2ad_vdev, | |
3975 | dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, | |
3976 | NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, | |
3977 | ZIO_FLAG_CANFAIL, B_FALSE); | |
3978 | ||
3979 | DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, | |
3980 | zio_t *, wzio); | |
3981 | (void) zio_nowait(wzio); | |
3982 | ||
3983 | write_sz += buf_sz; | |
3984 | dev->l2ad_hand += buf_sz; | |
3985 | } | |
3986 | ||
3987 | mutex_exit(list_lock); | |
3988 | ||
3989 | if (full == B_TRUE) | |
3990 | break; | |
3991 | } | |
3992 | mutex_exit(&l2arc_buflist_mtx); | |
3993 | ||
3994 | if (pio == NULL) { | |
3995 | ASSERT3U(write_sz, ==, 0); | |
3996 | kmem_cache_free(hdr_cache, head); | |
3997 | return; | |
3998 | } | |
3999 | ||
4000 | ASSERT3U(write_sz, <=, target_sz); | |
4001 | ARCSTAT_BUMP(arcstat_l2_writes_sent); | |
4002 | ARCSTAT_INCR(arcstat_l2_size, write_sz); | |
4003 | spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); | |
4004 | ||
4005 | /* | |
4006 | * Bump device hand to the device start if it is approaching the end. | |
4007 | * l2arc_evict() will already have evicted ahead for this case. | |
4008 | */ | |
4009 | if (dev->l2ad_hand >= (dev->l2ad_end - dev->l2ad_write)) { | |
4010 | spa_l2cache_space_update(dev->l2ad_vdev, 0, | |
4011 | dev->l2ad_end - dev->l2ad_hand); | |
4012 | dev->l2ad_hand = dev->l2ad_start; | |
4013 | dev->l2ad_evict = dev->l2ad_start; | |
4014 | dev->l2ad_first = B_FALSE; | |
4015 | } | |
4016 | ||
4017 | (void) zio_wait(pio); | |
4018 | } | |
4019 | ||
4020 | /* | |
4021 | * This thread feeds the L2ARC at regular intervals. This is the beating | |
4022 | * heart of the L2ARC. | |
4023 | */ | |
4024 | static void | |
4025 | l2arc_feed_thread(void) | |
4026 | { | |
4027 | callb_cpr_t cpr; | |
4028 | l2arc_dev_t *dev; | |
4029 | spa_t *spa; | |
4030 | int interval; | |
4031 | boolean_t startup = B_TRUE; | |
4032 | ||
4033 | CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); | |
4034 | ||
4035 | mutex_enter(&l2arc_feed_thr_lock); | |
4036 | ||
4037 | while (l2arc_thread_exit == 0) { | |
4038 | /* | |
4039 | * Initially pause for L2ARC_FEED_DELAY seconds as a grace | |
4040 | * interval during boot, followed by l2arc_feed_secs seconds | |
4041 | * thereafter. | |
4042 | */ | |
4043 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
4044 | if (startup) { | |
4045 | interval = L2ARC_FEED_DELAY; | |
4046 | startup = B_FALSE; | |
4047 | } else { | |
4048 | interval = l2arc_feed_secs; | |
4049 | } | |
4050 | (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, | |
4051 | lbolt + (hz * interval)); | |
4052 | CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); | |
4053 | ||
4054 | /* | |
4055 | * Do nothing until L2ARC devices exist. | |
4056 | */ | |
4057 | mutex_enter(&l2arc_dev_mtx); | |
4058 | if (l2arc_ndev == 0) { | |
4059 | mutex_exit(&l2arc_dev_mtx); | |
4060 | continue; | |
4061 | } | |
4062 | ||
4063 | /* | |
4064 | * Avoid contributing to memory pressure. | |
4065 | */ | |
4066 | if (arc_reclaim_needed()) { | |
4067 | ARCSTAT_BUMP(arcstat_l2_abort_lowmem); | |
4068 | mutex_exit(&l2arc_dev_mtx); | |
4069 | continue; | |
4070 | } | |
4071 | ||
4072 | /* | |
4073 | * This selects the next l2arc device to write to, and in | |
4074 | * doing so the next spa to feed from: dev->l2ad_spa. | |
4075 | */ | |
4076 | if ((dev = l2arc_dev_get_next()) == NULL) { | |
4077 | mutex_exit(&l2arc_dev_mtx); | |
4078 | continue; | |
4079 | } | |
4080 | spa = dev->l2ad_spa; | |
4081 | ASSERT(spa != NULL); | |
4082 | ARCSTAT_BUMP(arcstat_l2_feeds); | |
4083 | ||
4084 | /* | |
4085 | * Evict L2ARC buffers that will be overwritten. | |
4086 | */ | |
4087 | l2arc_evict(dev, dev->l2ad_write, B_FALSE); | |
4088 | ||
4089 | /* | |
4090 | * Write ARC buffers. | |
4091 | */ | |
4092 | l2arc_write_buffers(spa, dev); | |
4093 | mutex_exit(&l2arc_dev_mtx); | |
4094 | } | |
4095 | ||
4096 | l2arc_thread_exit = 0; | |
4097 | cv_broadcast(&l2arc_feed_thr_cv); | |
4098 | CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ | |
4099 | thread_exit(); | |
4100 | } | |
4101 | ||
4102 | /* | |
4103 | * Add a vdev for use by the L2ARC. By this point the spa has already | |
4104 | * validated the vdev and opened it. | |
4105 | */ | |
4106 | void | |
4107 | l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) | |
4108 | { | |
4109 | l2arc_dev_t *adddev; | |
4110 | ||
4111 | /* | |
4112 | * Create a new l2arc device entry. | |
4113 | */ | |
4114 | adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); | |
4115 | adddev->l2ad_spa = spa; | |
4116 | adddev->l2ad_vdev = vd; | |
4117 | adddev->l2ad_write = l2arc_write_max; | |
4118 | adddev->l2ad_start = start; | |
4119 | adddev->l2ad_end = end; | |
4120 | adddev->l2ad_hand = adddev->l2ad_start; | |
4121 | adddev->l2ad_evict = adddev->l2ad_start; | |
4122 | adddev->l2ad_first = B_TRUE; | |
4123 | ASSERT3U(adddev->l2ad_write, >, 0); | |
4124 | ||
4125 | /* | |
4126 | * This is a list of all ARC buffers that are still valid on the | |
4127 | * device. | |
4128 | */ | |
4129 | adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); | |
4130 | list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), | |
4131 | offsetof(arc_buf_hdr_t, b_l2node)); | |
4132 | ||
4133 | spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); | |
4134 | ||
4135 | /* | |
4136 | * Add device to global list | |
4137 | */ | |
4138 | mutex_enter(&l2arc_dev_mtx); | |
4139 | list_insert_head(l2arc_dev_list, adddev); | |
4140 | atomic_inc_64(&l2arc_ndev); | |
4141 | mutex_exit(&l2arc_dev_mtx); | |
4142 | } | |
4143 | ||
4144 | /* | |
4145 | * Remove a vdev from the L2ARC. | |
4146 | */ | |
4147 | void | |
4148 | l2arc_remove_vdev(vdev_t *vd) | |
4149 | { | |
4150 | l2arc_dev_t *dev, *nextdev, *remdev = NULL; | |
4151 | ||
4152 | /* | |
4153 | * We can only grab the spa config lock when cache device writes | |
4154 | * complete. | |
4155 | */ | |
4156 | ASSERT3U(l2arc_writes_sent, ==, l2arc_writes_done); | |
4157 | ||
4158 | /* | |
4159 | * Find the device by vdev | |
4160 | */ | |
4161 | mutex_enter(&l2arc_dev_mtx); | |
4162 | for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { | |
4163 | nextdev = list_next(l2arc_dev_list, dev); | |
4164 | if (vd == dev->l2ad_vdev) { | |
4165 | remdev = dev; | |
4166 | break; | |
4167 | } | |
4168 | } | |
4169 | ASSERT(remdev != NULL); | |
4170 | ||
4171 | /* | |
4172 | * Remove device from global list | |
4173 | */ | |
4174 | list_remove(l2arc_dev_list, remdev); | |
4175 | l2arc_dev_last = NULL; /* may have been invalidated */ | |
4176 | ||
4177 | /* | |
4178 | * Clear all buflists and ARC references. L2ARC device flush. | |
4179 | */ | |
4180 | l2arc_evict(remdev, 0, B_TRUE); | |
4181 | list_destroy(remdev->l2ad_buflist); | |
4182 | kmem_free(remdev->l2ad_buflist, sizeof (list_t)); | |
4183 | kmem_free(remdev, sizeof (l2arc_dev_t)); | |
4184 | ||
4185 | atomic_dec_64(&l2arc_ndev); | |
4186 | mutex_exit(&l2arc_dev_mtx); | |
4187 | } | |
4188 | ||
4189 | void | |
4190 | l2arc_init() | |
4191 | { | |
4192 | l2arc_thread_exit = 0; | |
4193 | l2arc_ndev = 0; | |
4194 | l2arc_writes_sent = 0; | |
4195 | l2arc_writes_done = 0; | |
4196 | ||
4197 | mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
4198 | cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); | |
4199 | mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4200 | mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4201 | mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4202 | ||
4203 | l2arc_dev_list = &L2ARC_dev_list; | |
4204 | l2arc_free_on_write = &L2ARC_free_on_write; | |
4205 | list_create(l2arc_dev_list, sizeof (l2arc_dev_t), | |
4206 | offsetof(l2arc_dev_t, l2ad_node)); | |
4207 | list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), | |
4208 | offsetof(l2arc_data_free_t, l2df_list_node)); | |
4209 | ||
4210 | (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, | |
4211 | TS_RUN, minclsyspri); | |
4212 | } | |
4213 | ||
4214 | void | |
4215 | l2arc_fini() | |
4216 | { | |
4217 | mutex_enter(&l2arc_feed_thr_lock); | |
4218 | cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ | |
4219 | l2arc_thread_exit = 1; | |
4220 | while (l2arc_thread_exit != 0) | |
4221 | cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); | |
4222 | mutex_exit(&l2arc_feed_thr_lock); | |
4223 | ||
4224 | mutex_destroy(&l2arc_feed_thr_lock); | |
4225 | cv_destroy(&l2arc_feed_thr_cv); | |
4226 | mutex_destroy(&l2arc_dev_mtx); | |
4227 | mutex_destroy(&l2arc_buflist_mtx); | |
4228 | mutex_destroy(&l2arc_free_on_write_mtx); | |
4229 | ||
4230 | list_destroy(l2arc_dev_list); | |
4231 | list_destroy(l2arc_free_on_write); | |
4232 | } |