]>
Commit | Line | Data |
---|---|---|
34dc7c2f BB |
1 | /* |
2 | * CDDL HEADER START | |
3 | * | |
4 | * The contents of this file are subject to the terms of the | |
5 | * Common Development and Distribution License (the "License"). | |
6 | * You may not use this file except in compliance with the License. | |
7 | * | |
8 | * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE | |
9 | * or http://www.opensolaris.org/os/licensing. | |
10 | * See the License for the specific language governing permissions | |
11 | * and limitations under the License. | |
12 | * | |
13 | * When distributing Covered Code, include this CDDL HEADER in each | |
14 | * file and include the License file at usr/src/OPENSOLARIS.LICENSE. | |
15 | * If applicable, add the following below this CDDL HEADER, with the | |
16 | * fields enclosed by brackets "[]" replaced with your own identifying | |
17 | * information: Portions Copyright [yyyy] [name of copyright owner] | |
18 | * | |
19 | * CDDL HEADER END | |
20 | */ | |
21 | /* | |
22 | * Copyright 2008 Sun Microsystems, Inc. All rights reserved. | |
23 | * Use is subject to license terms. | |
24 | */ | |
25 | ||
34dc7c2f BB |
26 | /* |
27 | * DVA-based Adjustable Replacement Cache | |
28 | * | |
29 | * While much of the theory of operation used here is | |
30 | * based on the self-tuning, low overhead replacement cache | |
31 | * presented by Megiddo and Modha at FAST 2003, there are some | |
32 | * significant differences: | |
33 | * | |
34 | * 1. The Megiddo and Modha model assumes any page is evictable. | |
35 | * Pages in its cache cannot be "locked" into memory. This makes | |
36 | * the eviction algorithm simple: evict the last page in the list. | |
37 | * This also make the performance characteristics easy to reason | |
38 | * about. Our cache is not so simple. At any given moment, some | |
39 | * subset of the blocks in the cache are un-evictable because we | |
40 | * have handed out a reference to them. Blocks are only evictable | |
41 | * when there are no external references active. This makes | |
42 | * eviction far more problematic: we choose to evict the evictable | |
43 | * blocks that are the "lowest" in the list. | |
44 | * | |
45 | * There are times when it is not possible to evict the requested | |
46 | * space. In these circumstances we are unable to adjust the cache | |
47 | * size. To prevent the cache growing unbounded at these times we | |
48 | * implement a "cache throttle" that slows the flow of new data | |
49 | * into the cache until we can make space available. | |
50 | * | |
51 | * 2. The Megiddo and Modha model assumes a fixed cache size. | |
52 | * Pages are evicted when the cache is full and there is a cache | |
53 | * miss. Our model has a variable sized cache. It grows with | |
54 | * high use, but also tries to react to memory pressure from the | |
55 | * operating system: decreasing its size when system memory is | |
56 | * tight. | |
57 | * | |
58 | * 3. The Megiddo and Modha model assumes a fixed page size. All | |
59 | * elements of the cache are therefor exactly the same size. So | |
60 | * when adjusting the cache size following a cache miss, its simply | |
61 | * a matter of choosing a single page to evict. In our model, we | |
62 | * have variable sized cache blocks (rangeing from 512 bytes to | |
63 | * 128K bytes). We therefor choose a set of blocks to evict to make | |
64 | * space for a cache miss that approximates as closely as possible | |
65 | * the space used by the new block. | |
66 | * | |
67 | * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" | |
68 | * by N. Megiddo & D. Modha, FAST 2003 | |
69 | */ | |
70 | ||
71 | /* | |
72 | * The locking model: | |
73 | * | |
74 | * A new reference to a cache buffer can be obtained in two | |
75 | * ways: 1) via a hash table lookup using the DVA as a key, | |
76 | * or 2) via one of the ARC lists. The arc_read() interface | |
77 | * uses method 1, while the internal arc algorithms for | |
78 | * adjusting the cache use method 2. We therefor provide two | |
79 | * types of locks: 1) the hash table lock array, and 2) the | |
80 | * arc list locks. | |
81 | * | |
82 | * Buffers do not have their own mutexs, rather they rely on the | |
83 | * hash table mutexs for the bulk of their protection (i.e. most | |
84 | * fields in the arc_buf_hdr_t are protected by these mutexs). | |
85 | * | |
86 | * buf_hash_find() returns the appropriate mutex (held) when it | |
87 | * locates the requested buffer in the hash table. It returns | |
88 | * NULL for the mutex if the buffer was not in the table. | |
89 | * | |
90 | * buf_hash_remove() expects the appropriate hash mutex to be | |
91 | * already held before it is invoked. | |
92 | * | |
93 | * Each arc state also has a mutex which is used to protect the | |
94 | * buffer list associated with the state. When attempting to | |
95 | * obtain a hash table lock while holding an arc list lock you | |
96 | * must use: mutex_tryenter() to avoid deadlock. Also note that | |
97 | * the active state mutex must be held before the ghost state mutex. | |
98 | * | |
99 | * Arc buffers may have an associated eviction callback function. | |
100 | * This function will be invoked prior to removing the buffer (e.g. | |
101 | * in arc_do_user_evicts()). Note however that the data associated | |
102 | * with the buffer may be evicted prior to the callback. The callback | |
103 | * must be made with *no locks held* (to prevent deadlock). Additionally, | |
104 | * the users of callbacks must ensure that their private data is | |
105 | * protected from simultaneous callbacks from arc_buf_evict() | |
106 | * and arc_do_user_evicts(). | |
107 | * | |
108 | * Note that the majority of the performance stats are manipulated | |
109 | * with atomic operations. | |
110 | * | |
111 | * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: | |
112 | * | |
113 | * - L2ARC buflist creation | |
114 | * - L2ARC buflist eviction | |
115 | * - L2ARC write completion, which walks L2ARC buflists | |
116 | * - ARC header destruction, as it removes from L2ARC buflists | |
117 | * - ARC header release, as it removes from L2ARC buflists | |
118 | */ | |
119 | ||
120 | #include <sys/spa.h> | |
121 | #include <sys/zio.h> | |
122 | #include <sys/zio_checksum.h> | |
123 | #include <sys/zfs_context.h> | |
124 | #include <sys/arc.h> | |
125 | #include <sys/refcount.h> | |
b128c09f | 126 | #include <sys/vdev.h> |
34dc7c2f BB |
127 | #ifdef _KERNEL |
128 | #include <sys/vmsystm.h> | |
129 | #include <vm/anon.h> | |
130 | #include <sys/fs/swapnode.h> | |
131 | #include <sys/dnlc.h> | |
132 | #endif | |
133 | #include <sys/callb.h> | |
134 | #include <sys/kstat.h> | |
135 | ||
136 | static kmutex_t arc_reclaim_thr_lock; | |
137 | static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ | |
138 | static uint8_t arc_thread_exit; | |
139 | ||
140 | extern int zfs_write_limit_shift; | |
141 | extern uint64_t zfs_write_limit_max; | |
b128c09f | 142 | extern kmutex_t zfs_write_limit_lock; |
34dc7c2f BB |
143 | |
144 | #define ARC_REDUCE_DNLC_PERCENT 3 | |
145 | uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; | |
146 | ||
147 | typedef enum arc_reclaim_strategy { | |
148 | ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ | |
149 | ARC_RECLAIM_CONS /* Conservative reclaim strategy */ | |
150 | } arc_reclaim_strategy_t; | |
151 | ||
152 | /* number of seconds before growing cache again */ | |
153 | static int arc_grow_retry = 60; | |
154 | ||
155 | /* | |
156 | * minimum lifespan of a prefetch block in clock ticks | |
157 | * (initialized in arc_init()) | |
158 | */ | |
159 | static int arc_min_prefetch_lifespan; | |
160 | ||
161 | static int arc_dead; | |
162 | ||
b128c09f BB |
163 | /* |
164 | * The arc has filled available memory and has now warmed up. | |
165 | */ | |
166 | static boolean_t arc_warm; | |
167 | ||
34dc7c2f BB |
168 | /* |
169 | * These tunables are for performance analysis. | |
170 | */ | |
171 | uint64_t zfs_arc_max; | |
172 | uint64_t zfs_arc_min; | |
173 | uint64_t zfs_arc_meta_limit = 0; | |
b128c09f | 174 | int zfs_mdcomp_disable = 0; |
34dc7c2f BB |
175 | |
176 | /* | |
177 | * Note that buffers can be in one of 6 states: | |
178 | * ARC_anon - anonymous (discussed below) | |
179 | * ARC_mru - recently used, currently cached | |
180 | * ARC_mru_ghost - recentely used, no longer in cache | |
181 | * ARC_mfu - frequently used, currently cached | |
182 | * ARC_mfu_ghost - frequently used, no longer in cache | |
183 | * ARC_l2c_only - exists in L2ARC but not other states | |
184 | * When there are no active references to the buffer, they are | |
185 | * are linked onto a list in one of these arc states. These are | |
186 | * the only buffers that can be evicted or deleted. Within each | |
187 | * state there are multiple lists, one for meta-data and one for | |
188 | * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, | |
189 | * etc.) is tracked separately so that it can be managed more | |
190 | * explicitly: favored over data, limited explicitly. | |
191 | * | |
192 | * Anonymous buffers are buffers that are not associated with | |
193 | * a DVA. These are buffers that hold dirty block copies | |
194 | * before they are written to stable storage. By definition, | |
195 | * they are "ref'd" and are considered part of arc_mru | |
196 | * that cannot be freed. Generally, they will aquire a DVA | |
197 | * as they are written and migrate onto the arc_mru list. | |
198 | * | |
199 | * The ARC_l2c_only state is for buffers that are in the second | |
200 | * level ARC but no longer in any of the ARC_m* lists. The second | |
201 | * level ARC itself may also contain buffers that are in any of | |
202 | * the ARC_m* states - meaning that a buffer can exist in two | |
203 | * places. The reason for the ARC_l2c_only state is to keep the | |
204 | * buffer header in the hash table, so that reads that hit the | |
205 | * second level ARC benefit from these fast lookups. | |
206 | */ | |
207 | ||
208 | typedef struct arc_state { | |
209 | list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ | |
210 | uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ | |
211 | uint64_t arcs_size; /* total amount of data in this state */ | |
212 | kmutex_t arcs_mtx; | |
213 | } arc_state_t; | |
214 | ||
215 | /* The 6 states: */ | |
216 | static arc_state_t ARC_anon; | |
217 | static arc_state_t ARC_mru; | |
218 | static arc_state_t ARC_mru_ghost; | |
219 | static arc_state_t ARC_mfu; | |
220 | static arc_state_t ARC_mfu_ghost; | |
221 | static arc_state_t ARC_l2c_only; | |
222 | ||
223 | typedef struct arc_stats { | |
224 | kstat_named_t arcstat_hits; | |
225 | kstat_named_t arcstat_misses; | |
226 | kstat_named_t arcstat_demand_data_hits; | |
227 | kstat_named_t arcstat_demand_data_misses; | |
228 | kstat_named_t arcstat_demand_metadata_hits; | |
229 | kstat_named_t arcstat_demand_metadata_misses; | |
230 | kstat_named_t arcstat_prefetch_data_hits; | |
231 | kstat_named_t arcstat_prefetch_data_misses; | |
232 | kstat_named_t arcstat_prefetch_metadata_hits; | |
233 | kstat_named_t arcstat_prefetch_metadata_misses; | |
234 | kstat_named_t arcstat_mru_hits; | |
235 | kstat_named_t arcstat_mru_ghost_hits; | |
236 | kstat_named_t arcstat_mfu_hits; | |
237 | kstat_named_t arcstat_mfu_ghost_hits; | |
238 | kstat_named_t arcstat_deleted; | |
239 | kstat_named_t arcstat_recycle_miss; | |
240 | kstat_named_t arcstat_mutex_miss; | |
241 | kstat_named_t arcstat_evict_skip; | |
242 | kstat_named_t arcstat_hash_elements; | |
243 | kstat_named_t arcstat_hash_elements_max; | |
244 | kstat_named_t arcstat_hash_collisions; | |
245 | kstat_named_t arcstat_hash_chains; | |
246 | kstat_named_t arcstat_hash_chain_max; | |
247 | kstat_named_t arcstat_p; | |
248 | kstat_named_t arcstat_c; | |
249 | kstat_named_t arcstat_c_min; | |
250 | kstat_named_t arcstat_c_max; | |
251 | kstat_named_t arcstat_size; | |
252 | kstat_named_t arcstat_hdr_size; | |
253 | kstat_named_t arcstat_l2_hits; | |
254 | kstat_named_t arcstat_l2_misses; | |
255 | kstat_named_t arcstat_l2_feeds; | |
256 | kstat_named_t arcstat_l2_rw_clash; | |
257 | kstat_named_t arcstat_l2_writes_sent; | |
258 | kstat_named_t arcstat_l2_writes_done; | |
259 | kstat_named_t arcstat_l2_writes_error; | |
260 | kstat_named_t arcstat_l2_writes_hdr_miss; | |
261 | kstat_named_t arcstat_l2_evict_lock_retry; | |
262 | kstat_named_t arcstat_l2_evict_reading; | |
263 | kstat_named_t arcstat_l2_free_on_write; | |
264 | kstat_named_t arcstat_l2_abort_lowmem; | |
265 | kstat_named_t arcstat_l2_cksum_bad; | |
266 | kstat_named_t arcstat_l2_io_error; | |
267 | kstat_named_t arcstat_l2_size; | |
268 | kstat_named_t arcstat_l2_hdr_size; | |
269 | kstat_named_t arcstat_memory_throttle_count; | |
270 | } arc_stats_t; | |
271 | ||
272 | static arc_stats_t arc_stats = { | |
273 | { "hits", KSTAT_DATA_UINT64 }, | |
274 | { "misses", KSTAT_DATA_UINT64 }, | |
275 | { "demand_data_hits", KSTAT_DATA_UINT64 }, | |
276 | { "demand_data_misses", KSTAT_DATA_UINT64 }, | |
277 | { "demand_metadata_hits", KSTAT_DATA_UINT64 }, | |
278 | { "demand_metadata_misses", KSTAT_DATA_UINT64 }, | |
279 | { "prefetch_data_hits", KSTAT_DATA_UINT64 }, | |
280 | { "prefetch_data_misses", KSTAT_DATA_UINT64 }, | |
281 | { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, | |
282 | { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, | |
283 | { "mru_hits", KSTAT_DATA_UINT64 }, | |
284 | { "mru_ghost_hits", KSTAT_DATA_UINT64 }, | |
285 | { "mfu_hits", KSTAT_DATA_UINT64 }, | |
286 | { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, | |
287 | { "deleted", KSTAT_DATA_UINT64 }, | |
288 | { "recycle_miss", KSTAT_DATA_UINT64 }, | |
289 | { "mutex_miss", KSTAT_DATA_UINT64 }, | |
290 | { "evict_skip", KSTAT_DATA_UINT64 }, | |
291 | { "hash_elements", KSTAT_DATA_UINT64 }, | |
292 | { "hash_elements_max", KSTAT_DATA_UINT64 }, | |
293 | { "hash_collisions", KSTAT_DATA_UINT64 }, | |
294 | { "hash_chains", KSTAT_DATA_UINT64 }, | |
295 | { "hash_chain_max", KSTAT_DATA_UINT64 }, | |
296 | { "p", KSTAT_DATA_UINT64 }, | |
297 | { "c", KSTAT_DATA_UINT64 }, | |
298 | { "c_min", KSTAT_DATA_UINT64 }, | |
299 | { "c_max", KSTAT_DATA_UINT64 }, | |
300 | { "size", KSTAT_DATA_UINT64 }, | |
301 | { "hdr_size", KSTAT_DATA_UINT64 }, | |
302 | { "l2_hits", KSTAT_DATA_UINT64 }, | |
303 | { "l2_misses", KSTAT_DATA_UINT64 }, | |
304 | { "l2_feeds", KSTAT_DATA_UINT64 }, | |
305 | { "l2_rw_clash", KSTAT_DATA_UINT64 }, | |
306 | { "l2_writes_sent", KSTAT_DATA_UINT64 }, | |
307 | { "l2_writes_done", KSTAT_DATA_UINT64 }, | |
308 | { "l2_writes_error", KSTAT_DATA_UINT64 }, | |
309 | { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, | |
310 | { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, | |
311 | { "l2_evict_reading", KSTAT_DATA_UINT64 }, | |
312 | { "l2_free_on_write", KSTAT_DATA_UINT64 }, | |
313 | { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, | |
314 | { "l2_cksum_bad", KSTAT_DATA_UINT64 }, | |
315 | { "l2_io_error", KSTAT_DATA_UINT64 }, | |
316 | { "l2_size", KSTAT_DATA_UINT64 }, | |
317 | { "l2_hdr_size", KSTAT_DATA_UINT64 }, | |
318 | { "memory_throttle_count", KSTAT_DATA_UINT64 } | |
319 | }; | |
320 | ||
321 | #define ARCSTAT(stat) (arc_stats.stat.value.ui64) | |
322 | ||
323 | #define ARCSTAT_INCR(stat, val) \ | |
324 | atomic_add_64(&arc_stats.stat.value.ui64, (val)); | |
325 | ||
326 | #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) | |
327 | #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) | |
328 | ||
329 | #define ARCSTAT_MAX(stat, val) { \ | |
330 | uint64_t m; \ | |
331 | while ((val) > (m = arc_stats.stat.value.ui64) && \ | |
332 | (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ | |
333 | continue; \ | |
334 | } | |
335 | ||
336 | #define ARCSTAT_MAXSTAT(stat) \ | |
337 | ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) | |
338 | ||
339 | /* | |
340 | * We define a macro to allow ARC hits/misses to be easily broken down by | |
341 | * two separate conditions, giving a total of four different subtypes for | |
342 | * each of hits and misses (so eight statistics total). | |
343 | */ | |
344 | #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ | |
345 | if (cond1) { \ | |
346 | if (cond2) { \ | |
347 | ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ | |
348 | } else { \ | |
349 | ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ | |
350 | } \ | |
351 | } else { \ | |
352 | if (cond2) { \ | |
353 | ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ | |
354 | } else { \ | |
355 | ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ | |
356 | } \ | |
357 | } | |
358 | ||
359 | kstat_t *arc_ksp; | |
360 | static arc_state_t *arc_anon; | |
361 | static arc_state_t *arc_mru; | |
362 | static arc_state_t *arc_mru_ghost; | |
363 | static arc_state_t *arc_mfu; | |
364 | static arc_state_t *arc_mfu_ghost; | |
365 | static arc_state_t *arc_l2c_only; | |
366 | ||
367 | /* | |
368 | * There are several ARC variables that are critical to export as kstats -- | |
369 | * but we don't want to have to grovel around in the kstat whenever we wish to | |
370 | * manipulate them. For these variables, we therefore define them to be in | |
371 | * terms of the statistic variable. This assures that we are not introducing | |
372 | * the possibility of inconsistency by having shadow copies of the variables, | |
373 | * while still allowing the code to be readable. | |
374 | */ | |
375 | #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ | |
376 | #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ | |
377 | #define arc_c ARCSTAT(arcstat_c) /* target size of cache */ | |
378 | #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ | |
379 | #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ | |
380 | ||
381 | static int arc_no_grow; /* Don't try to grow cache size */ | |
382 | static uint64_t arc_tempreserve; | |
383 | static uint64_t arc_meta_used; | |
384 | static uint64_t arc_meta_limit; | |
385 | static uint64_t arc_meta_max = 0; | |
386 | ||
387 | typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; | |
388 | ||
389 | typedef struct arc_callback arc_callback_t; | |
390 | ||
391 | struct arc_callback { | |
392 | void *acb_private; | |
393 | arc_done_func_t *acb_done; | |
34dc7c2f BB |
394 | arc_buf_t *acb_buf; |
395 | zio_t *acb_zio_dummy; | |
396 | arc_callback_t *acb_next; | |
397 | }; | |
398 | ||
399 | typedef struct arc_write_callback arc_write_callback_t; | |
400 | ||
401 | struct arc_write_callback { | |
402 | void *awcb_private; | |
403 | arc_done_func_t *awcb_ready; | |
404 | arc_done_func_t *awcb_done; | |
405 | arc_buf_t *awcb_buf; | |
406 | }; | |
407 | ||
408 | struct arc_buf_hdr { | |
409 | /* protected by hash lock */ | |
410 | dva_t b_dva; | |
411 | uint64_t b_birth; | |
412 | uint64_t b_cksum0; | |
413 | ||
414 | kmutex_t b_freeze_lock; | |
415 | zio_cksum_t *b_freeze_cksum; | |
416 | ||
417 | arc_buf_hdr_t *b_hash_next; | |
418 | arc_buf_t *b_buf; | |
419 | uint32_t b_flags; | |
420 | uint32_t b_datacnt; | |
421 | ||
422 | arc_callback_t *b_acb; | |
423 | kcondvar_t b_cv; | |
424 | ||
425 | /* immutable */ | |
426 | arc_buf_contents_t b_type; | |
427 | uint64_t b_size; | |
428 | spa_t *b_spa; | |
429 | ||
430 | /* protected by arc state mutex */ | |
431 | arc_state_t *b_state; | |
432 | list_node_t b_arc_node; | |
433 | ||
434 | /* updated atomically */ | |
435 | clock_t b_arc_access; | |
436 | ||
437 | /* self protecting */ | |
438 | refcount_t b_refcnt; | |
439 | ||
440 | l2arc_buf_hdr_t *b_l2hdr; | |
441 | list_node_t b_l2node; | |
442 | }; | |
443 | ||
444 | static arc_buf_t *arc_eviction_list; | |
445 | static kmutex_t arc_eviction_mtx; | |
446 | static arc_buf_hdr_t arc_eviction_hdr; | |
447 | static void arc_get_data_buf(arc_buf_t *buf); | |
448 | static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); | |
449 | static int arc_evict_needed(arc_buf_contents_t type); | |
450 | static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); | |
451 | ||
452 | #define GHOST_STATE(state) \ | |
453 | ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ | |
454 | (state) == arc_l2c_only) | |
455 | ||
456 | /* | |
457 | * Private ARC flags. These flags are private ARC only flags that will show up | |
458 | * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can | |
459 | * be passed in as arc_flags in things like arc_read. However, these flags | |
460 | * should never be passed and should only be set by ARC code. When adding new | |
461 | * public flags, make sure not to smash the private ones. | |
462 | */ | |
463 | ||
464 | #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ | |
465 | #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ | |
466 | #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ | |
467 | #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ | |
468 | #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ | |
469 | #define ARC_INDIRECT (1 << 14) /* this is an indirect block */ | |
470 | #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ | |
b128c09f BB |
471 | #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ |
472 | #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ | |
473 | #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ | |
474 | #define ARC_STORED (1 << 19) /* has been store()d to */ | |
34dc7c2f BB |
475 | |
476 | #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) | |
477 | #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) | |
478 | #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) | |
479 | #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) | |
480 | #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) | |
481 | #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) | |
b128c09f BB |
482 | #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) |
483 | #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ | |
484 | (hdr)->b_l2hdr != NULL) | |
34dc7c2f BB |
485 | #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) |
486 | #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) | |
487 | #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) | |
488 | ||
489 | /* | |
490 | * Other sizes | |
491 | */ | |
492 | ||
493 | #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) | |
494 | #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) | |
495 | ||
496 | /* | |
497 | * Hash table routines | |
498 | */ | |
499 | ||
500 | #define HT_LOCK_PAD 64 | |
501 | ||
502 | struct ht_lock { | |
503 | kmutex_t ht_lock; | |
504 | #ifdef _KERNEL | |
505 | unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; | |
506 | #endif | |
507 | }; | |
508 | ||
509 | #define BUF_LOCKS 256 | |
510 | typedef struct buf_hash_table { | |
511 | uint64_t ht_mask; | |
512 | arc_buf_hdr_t **ht_table; | |
513 | struct ht_lock ht_locks[BUF_LOCKS]; | |
514 | } buf_hash_table_t; | |
515 | ||
516 | static buf_hash_table_t buf_hash_table; | |
517 | ||
518 | #define BUF_HASH_INDEX(spa, dva, birth) \ | |
519 | (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) | |
520 | #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) | |
521 | #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) | |
522 | #define HDR_LOCK(buf) \ | |
523 | (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) | |
524 | ||
525 | uint64_t zfs_crc64_table[256]; | |
526 | ||
527 | /* | |
528 | * Level 2 ARC | |
529 | */ | |
530 | ||
531 | #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ | |
532 | #define L2ARC_HEADROOM 4 /* num of writes */ | |
34dc7c2f BB |
533 | #define L2ARC_FEED_SECS 1 /* caching interval */ |
534 | ||
535 | #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) | |
536 | #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) | |
537 | ||
538 | /* | |
539 | * L2ARC Performance Tunables | |
540 | */ | |
541 | uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ | |
b128c09f | 542 | uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ |
34dc7c2f BB |
543 | uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ |
544 | uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ | |
545 | boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ | |
546 | ||
547 | /* | |
548 | * L2ARC Internals | |
549 | */ | |
550 | typedef struct l2arc_dev { | |
551 | vdev_t *l2ad_vdev; /* vdev */ | |
552 | spa_t *l2ad_spa; /* spa */ | |
553 | uint64_t l2ad_hand; /* next write location */ | |
554 | uint64_t l2ad_write; /* desired write size, bytes */ | |
b128c09f | 555 | uint64_t l2ad_boost; /* warmup write boost, bytes */ |
34dc7c2f BB |
556 | uint64_t l2ad_start; /* first addr on device */ |
557 | uint64_t l2ad_end; /* last addr on device */ | |
558 | uint64_t l2ad_evict; /* last addr eviction reached */ | |
559 | boolean_t l2ad_first; /* first sweep through */ | |
560 | list_t *l2ad_buflist; /* buffer list */ | |
561 | list_node_t l2ad_node; /* device list node */ | |
562 | } l2arc_dev_t; | |
563 | ||
564 | static list_t L2ARC_dev_list; /* device list */ | |
565 | static list_t *l2arc_dev_list; /* device list pointer */ | |
566 | static kmutex_t l2arc_dev_mtx; /* device list mutex */ | |
567 | static l2arc_dev_t *l2arc_dev_last; /* last device used */ | |
568 | static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ | |
569 | static list_t L2ARC_free_on_write; /* free after write buf list */ | |
570 | static list_t *l2arc_free_on_write; /* free after write list ptr */ | |
571 | static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ | |
572 | static uint64_t l2arc_ndev; /* number of devices */ | |
573 | ||
574 | typedef struct l2arc_read_callback { | |
575 | arc_buf_t *l2rcb_buf; /* read buffer */ | |
576 | spa_t *l2rcb_spa; /* spa */ | |
577 | blkptr_t l2rcb_bp; /* original blkptr */ | |
578 | zbookmark_t l2rcb_zb; /* original bookmark */ | |
579 | int l2rcb_flags; /* original flags */ | |
580 | } l2arc_read_callback_t; | |
581 | ||
582 | typedef struct l2arc_write_callback { | |
583 | l2arc_dev_t *l2wcb_dev; /* device info */ | |
584 | arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ | |
585 | } l2arc_write_callback_t; | |
586 | ||
587 | struct l2arc_buf_hdr { | |
588 | /* protected by arc_buf_hdr mutex */ | |
589 | l2arc_dev_t *b_dev; /* L2ARC device */ | |
590 | daddr_t b_daddr; /* disk address, offset byte */ | |
591 | }; | |
592 | ||
593 | typedef struct l2arc_data_free { | |
594 | /* protected by l2arc_free_on_write_mtx */ | |
595 | void *l2df_data; | |
596 | size_t l2df_size; | |
597 | void (*l2df_func)(void *, size_t); | |
598 | list_node_t l2df_list_node; | |
599 | } l2arc_data_free_t; | |
600 | ||
601 | static kmutex_t l2arc_feed_thr_lock; | |
602 | static kcondvar_t l2arc_feed_thr_cv; | |
603 | static uint8_t l2arc_thread_exit; | |
604 | ||
605 | static void l2arc_read_done(zio_t *zio); | |
606 | static void l2arc_hdr_stat_add(void); | |
607 | static void l2arc_hdr_stat_remove(void); | |
608 | ||
609 | static uint64_t | |
b128c09f | 610 | buf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) |
34dc7c2f BB |
611 | { |
612 | uintptr_t spav = (uintptr_t)spa; | |
613 | uint8_t *vdva = (uint8_t *)dva; | |
614 | uint64_t crc = -1ULL; | |
615 | int i; | |
616 | ||
617 | ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); | |
618 | ||
619 | for (i = 0; i < sizeof (dva_t); i++) | |
620 | crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; | |
621 | ||
622 | crc ^= (spav>>8) ^ birth; | |
623 | ||
624 | return (crc); | |
625 | } | |
626 | ||
627 | #define BUF_EMPTY(buf) \ | |
628 | ((buf)->b_dva.dva_word[0] == 0 && \ | |
629 | (buf)->b_dva.dva_word[1] == 0 && \ | |
630 | (buf)->b_birth == 0) | |
631 | ||
632 | #define BUF_EQUAL(spa, dva, birth, buf) \ | |
633 | ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ | |
634 | ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ | |
635 | ((buf)->b_birth == birth) && ((buf)->b_spa == spa) | |
636 | ||
637 | static arc_buf_hdr_t * | |
b128c09f | 638 | buf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) |
34dc7c2f BB |
639 | { |
640 | uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); | |
641 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
642 | arc_buf_hdr_t *buf; | |
643 | ||
644 | mutex_enter(hash_lock); | |
645 | for (buf = buf_hash_table.ht_table[idx]; buf != NULL; | |
646 | buf = buf->b_hash_next) { | |
647 | if (BUF_EQUAL(spa, dva, birth, buf)) { | |
648 | *lockp = hash_lock; | |
649 | return (buf); | |
650 | } | |
651 | } | |
652 | mutex_exit(hash_lock); | |
653 | *lockp = NULL; | |
654 | return (NULL); | |
655 | } | |
656 | ||
657 | /* | |
658 | * Insert an entry into the hash table. If there is already an element | |
659 | * equal to elem in the hash table, then the already existing element | |
660 | * will be returned and the new element will not be inserted. | |
661 | * Otherwise returns NULL. | |
662 | */ | |
663 | static arc_buf_hdr_t * | |
664 | buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) | |
665 | { | |
666 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
667 | kmutex_t *hash_lock = BUF_HASH_LOCK(idx); | |
668 | arc_buf_hdr_t *fbuf; | |
669 | uint32_t i; | |
670 | ||
671 | ASSERT(!HDR_IN_HASH_TABLE(buf)); | |
672 | *lockp = hash_lock; | |
673 | mutex_enter(hash_lock); | |
674 | for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; | |
675 | fbuf = fbuf->b_hash_next, i++) { | |
676 | if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) | |
677 | return (fbuf); | |
678 | } | |
679 | ||
680 | buf->b_hash_next = buf_hash_table.ht_table[idx]; | |
681 | buf_hash_table.ht_table[idx] = buf; | |
682 | buf->b_flags |= ARC_IN_HASH_TABLE; | |
683 | ||
684 | /* collect some hash table performance data */ | |
685 | if (i > 0) { | |
686 | ARCSTAT_BUMP(arcstat_hash_collisions); | |
687 | if (i == 1) | |
688 | ARCSTAT_BUMP(arcstat_hash_chains); | |
689 | ||
690 | ARCSTAT_MAX(arcstat_hash_chain_max, i); | |
691 | } | |
692 | ||
693 | ARCSTAT_BUMP(arcstat_hash_elements); | |
694 | ARCSTAT_MAXSTAT(arcstat_hash_elements); | |
695 | ||
696 | return (NULL); | |
697 | } | |
698 | ||
699 | static void | |
700 | buf_hash_remove(arc_buf_hdr_t *buf) | |
701 | { | |
702 | arc_buf_hdr_t *fbuf, **bufp; | |
703 | uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); | |
704 | ||
705 | ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); | |
706 | ASSERT(HDR_IN_HASH_TABLE(buf)); | |
707 | ||
708 | bufp = &buf_hash_table.ht_table[idx]; | |
709 | while ((fbuf = *bufp) != buf) { | |
710 | ASSERT(fbuf != NULL); | |
711 | bufp = &fbuf->b_hash_next; | |
712 | } | |
713 | *bufp = buf->b_hash_next; | |
714 | buf->b_hash_next = NULL; | |
715 | buf->b_flags &= ~ARC_IN_HASH_TABLE; | |
716 | ||
717 | /* collect some hash table performance data */ | |
718 | ARCSTAT_BUMPDOWN(arcstat_hash_elements); | |
719 | ||
720 | if (buf_hash_table.ht_table[idx] && | |
721 | buf_hash_table.ht_table[idx]->b_hash_next == NULL) | |
722 | ARCSTAT_BUMPDOWN(arcstat_hash_chains); | |
723 | } | |
724 | ||
725 | /* | |
726 | * Global data structures and functions for the buf kmem cache. | |
727 | */ | |
728 | static kmem_cache_t *hdr_cache; | |
729 | static kmem_cache_t *buf_cache; | |
730 | ||
731 | static void | |
732 | buf_fini(void) | |
733 | { | |
734 | int i; | |
735 | ||
736 | kmem_free(buf_hash_table.ht_table, | |
737 | (buf_hash_table.ht_mask + 1) * sizeof (void *)); | |
738 | for (i = 0; i < BUF_LOCKS; i++) | |
739 | mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); | |
740 | kmem_cache_destroy(hdr_cache); | |
741 | kmem_cache_destroy(buf_cache); | |
742 | } | |
743 | ||
744 | /* | |
745 | * Constructor callback - called when the cache is empty | |
746 | * and a new buf is requested. | |
747 | */ | |
748 | /* ARGSUSED */ | |
749 | static int | |
750 | hdr_cons(void *vbuf, void *unused, int kmflag) | |
751 | { | |
752 | arc_buf_hdr_t *buf = vbuf; | |
753 | ||
754 | bzero(buf, sizeof (arc_buf_hdr_t)); | |
755 | refcount_create(&buf->b_refcnt); | |
756 | cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); | |
757 | mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); | |
758 | ||
759 | ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); | |
760 | return (0); | |
761 | } | |
762 | ||
b128c09f BB |
763 | /* ARGSUSED */ |
764 | static int | |
765 | buf_cons(void *vbuf, void *unused, int kmflag) | |
766 | { | |
767 | arc_buf_t *buf = vbuf; | |
768 | ||
769 | bzero(buf, sizeof (arc_buf_t)); | |
770 | rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); | |
771 | return (0); | |
772 | } | |
773 | ||
34dc7c2f BB |
774 | /* |
775 | * Destructor callback - called when a cached buf is | |
776 | * no longer required. | |
777 | */ | |
778 | /* ARGSUSED */ | |
779 | static void | |
780 | hdr_dest(void *vbuf, void *unused) | |
781 | { | |
782 | arc_buf_hdr_t *buf = vbuf; | |
783 | ||
784 | refcount_destroy(&buf->b_refcnt); | |
785 | cv_destroy(&buf->b_cv); | |
786 | mutex_destroy(&buf->b_freeze_lock); | |
787 | ||
788 | ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); | |
789 | } | |
790 | ||
b128c09f BB |
791 | /* ARGSUSED */ |
792 | static void | |
793 | buf_dest(void *vbuf, void *unused) | |
794 | { | |
795 | arc_buf_t *buf = vbuf; | |
796 | ||
797 | rw_destroy(&buf->b_lock); | |
798 | } | |
799 | ||
34dc7c2f BB |
800 | /* |
801 | * Reclaim callback -- invoked when memory is low. | |
802 | */ | |
803 | /* ARGSUSED */ | |
804 | static void | |
805 | hdr_recl(void *unused) | |
806 | { | |
807 | dprintf("hdr_recl called\n"); | |
808 | /* | |
809 | * umem calls the reclaim func when we destroy the buf cache, | |
810 | * which is after we do arc_fini(). | |
811 | */ | |
812 | if (!arc_dead) | |
813 | cv_signal(&arc_reclaim_thr_cv); | |
814 | } | |
815 | ||
816 | static void | |
817 | buf_init(void) | |
818 | { | |
819 | uint64_t *ct; | |
820 | uint64_t hsize = 1ULL << 12; | |
821 | int i, j; | |
822 | ||
823 | /* | |
824 | * The hash table is big enough to fill all of physical memory | |
825 | * with an average 64K block size. The table will take up | |
826 | * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). | |
827 | */ | |
828 | while (hsize * 65536 < physmem * PAGESIZE) | |
829 | hsize <<= 1; | |
830 | retry: | |
831 | buf_hash_table.ht_mask = hsize - 1; | |
832 | buf_hash_table.ht_table = | |
833 | kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); | |
834 | if (buf_hash_table.ht_table == NULL) { | |
835 | ASSERT(hsize > (1ULL << 8)); | |
836 | hsize >>= 1; | |
837 | goto retry; | |
838 | } | |
839 | ||
840 | hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), | |
841 | 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); | |
842 | buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), | |
b128c09f | 843 | 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); |
34dc7c2f BB |
844 | |
845 | for (i = 0; i < 256; i++) | |
846 | for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) | |
847 | *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); | |
848 | ||
849 | for (i = 0; i < BUF_LOCKS; i++) { | |
850 | mutex_init(&buf_hash_table.ht_locks[i].ht_lock, | |
851 | NULL, MUTEX_DEFAULT, NULL); | |
852 | } | |
853 | } | |
854 | ||
855 | #define ARC_MINTIME (hz>>4) /* 62 ms */ | |
856 | ||
857 | static void | |
858 | arc_cksum_verify(arc_buf_t *buf) | |
859 | { | |
860 | zio_cksum_t zc; | |
861 | ||
862 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
863 | return; | |
864 | ||
865 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
866 | if (buf->b_hdr->b_freeze_cksum == NULL || | |
867 | (buf->b_hdr->b_flags & ARC_IO_ERROR)) { | |
868 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
869 | return; | |
870 | } | |
871 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
872 | if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) | |
873 | panic("buffer modified while frozen!"); | |
874 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
875 | } | |
876 | ||
877 | static int | |
878 | arc_cksum_equal(arc_buf_t *buf) | |
879 | { | |
880 | zio_cksum_t zc; | |
881 | int equal; | |
882 | ||
883 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
884 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); | |
885 | equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); | |
886 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
887 | ||
888 | return (equal); | |
889 | } | |
890 | ||
891 | static void | |
892 | arc_cksum_compute(arc_buf_t *buf, boolean_t force) | |
893 | { | |
894 | if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) | |
895 | return; | |
896 | ||
897 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
898 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
899 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
900 | return; | |
901 | } | |
902 | buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); | |
903 | fletcher_2_native(buf->b_data, buf->b_hdr->b_size, | |
904 | buf->b_hdr->b_freeze_cksum); | |
905 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
906 | } | |
907 | ||
908 | void | |
909 | arc_buf_thaw(arc_buf_t *buf) | |
910 | { | |
911 | if (zfs_flags & ZFS_DEBUG_MODIFY) { | |
912 | if (buf->b_hdr->b_state != arc_anon) | |
913 | panic("modifying non-anon buffer!"); | |
914 | if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) | |
915 | panic("modifying buffer while i/o in progress!"); | |
916 | arc_cksum_verify(buf); | |
917 | } | |
918 | ||
919 | mutex_enter(&buf->b_hdr->b_freeze_lock); | |
920 | if (buf->b_hdr->b_freeze_cksum != NULL) { | |
921 | kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
922 | buf->b_hdr->b_freeze_cksum = NULL; | |
923 | } | |
924 | mutex_exit(&buf->b_hdr->b_freeze_lock); | |
925 | } | |
926 | ||
927 | void | |
928 | arc_buf_freeze(arc_buf_t *buf) | |
929 | { | |
930 | if (!(zfs_flags & ZFS_DEBUG_MODIFY)) | |
931 | return; | |
932 | ||
933 | ASSERT(buf->b_hdr->b_freeze_cksum != NULL || | |
934 | buf->b_hdr->b_state == arc_anon); | |
935 | arc_cksum_compute(buf, B_FALSE); | |
936 | } | |
937 | ||
938 | static void | |
939 | add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
940 | { | |
941 | ASSERT(MUTEX_HELD(hash_lock)); | |
942 | ||
943 | if ((refcount_add(&ab->b_refcnt, tag) == 1) && | |
944 | (ab->b_state != arc_anon)) { | |
945 | uint64_t delta = ab->b_size * ab->b_datacnt; | |
946 | list_t *list = &ab->b_state->arcs_list[ab->b_type]; | |
947 | uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; | |
948 | ||
949 | ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); | |
950 | mutex_enter(&ab->b_state->arcs_mtx); | |
951 | ASSERT(list_link_active(&ab->b_arc_node)); | |
952 | list_remove(list, ab); | |
953 | if (GHOST_STATE(ab->b_state)) { | |
954 | ASSERT3U(ab->b_datacnt, ==, 0); | |
955 | ASSERT3P(ab->b_buf, ==, NULL); | |
956 | delta = ab->b_size; | |
957 | } | |
958 | ASSERT(delta > 0); | |
959 | ASSERT3U(*size, >=, delta); | |
960 | atomic_add_64(size, -delta); | |
961 | mutex_exit(&ab->b_state->arcs_mtx); | |
b128c09f | 962 | /* remove the prefetch flag if we get a reference */ |
34dc7c2f BB |
963 | if (ab->b_flags & ARC_PREFETCH) |
964 | ab->b_flags &= ~ARC_PREFETCH; | |
965 | } | |
966 | } | |
967 | ||
968 | static int | |
969 | remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) | |
970 | { | |
971 | int cnt; | |
972 | arc_state_t *state = ab->b_state; | |
973 | ||
974 | ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); | |
975 | ASSERT(!GHOST_STATE(state)); | |
976 | ||
977 | if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && | |
978 | (state != arc_anon)) { | |
979 | uint64_t *size = &state->arcs_lsize[ab->b_type]; | |
980 | ||
981 | ASSERT(!MUTEX_HELD(&state->arcs_mtx)); | |
982 | mutex_enter(&state->arcs_mtx); | |
983 | ASSERT(!list_link_active(&ab->b_arc_node)); | |
984 | list_insert_head(&state->arcs_list[ab->b_type], ab); | |
985 | ASSERT(ab->b_datacnt > 0); | |
986 | atomic_add_64(size, ab->b_size * ab->b_datacnt); | |
987 | mutex_exit(&state->arcs_mtx); | |
988 | } | |
989 | return (cnt); | |
990 | } | |
991 | ||
992 | /* | |
993 | * Move the supplied buffer to the indicated state. The mutex | |
994 | * for the buffer must be held by the caller. | |
995 | */ | |
996 | static void | |
997 | arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) | |
998 | { | |
999 | arc_state_t *old_state = ab->b_state; | |
1000 | int64_t refcnt = refcount_count(&ab->b_refcnt); | |
1001 | uint64_t from_delta, to_delta; | |
1002 | ||
1003 | ASSERT(MUTEX_HELD(hash_lock)); | |
1004 | ASSERT(new_state != old_state); | |
1005 | ASSERT(refcnt == 0 || ab->b_datacnt > 0); | |
1006 | ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); | |
1007 | ||
1008 | from_delta = to_delta = ab->b_datacnt * ab->b_size; | |
1009 | ||
1010 | /* | |
1011 | * If this buffer is evictable, transfer it from the | |
1012 | * old state list to the new state list. | |
1013 | */ | |
1014 | if (refcnt == 0) { | |
1015 | if (old_state != arc_anon) { | |
1016 | int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); | |
1017 | uint64_t *size = &old_state->arcs_lsize[ab->b_type]; | |
1018 | ||
1019 | if (use_mutex) | |
1020 | mutex_enter(&old_state->arcs_mtx); | |
1021 | ||
1022 | ASSERT(list_link_active(&ab->b_arc_node)); | |
1023 | list_remove(&old_state->arcs_list[ab->b_type], ab); | |
1024 | ||
1025 | /* | |
1026 | * If prefetching out of the ghost cache, | |
1027 | * we will have a non-null datacnt. | |
1028 | */ | |
1029 | if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { | |
1030 | /* ghost elements have a ghost size */ | |
1031 | ASSERT(ab->b_buf == NULL); | |
1032 | from_delta = ab->b_size; | |
1033 | } | |
1034 | ASSERT3U(*size, >=, from_delta); | |
1035 | atomic_add_64(size, -from_delta); | |
1036 | ||
1037 | if (use_mutex) | |
1038 | mutex_exit(&old_state->arcs_mtx); | |
1039 | } | |
1040 | if (new_state != arc_anon) { | |
1041 | int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); | |
1042 | uint64_t *size = &new_state->arcs_lsize[ab->b_type]; | |
1043 | ||
1044 | if (use_mutex) | |
1045 | mutex_enter(&new_state->arcs_mtx); | |
1046 | ||
1047 | list_insert_head(&new_state->arcs_list[ab->b_type], ab); | |
1048 | ||
1049 | /* ghost elements have a ghost size */ | |
1050 | if (GHOST_STATE(new_state)) { | |
1051 | ASSERT(ab->b_datacnt == 0); | |
1052 | ASSERT(ab->b_buf == NULL); | |
1053 | to_delta = ab->b_size; | |
1054 | } | |
1055 | atomic_add_64(size, to_delta); | |
1056 | ||
1057 | if (use_mutex) | |
1058 | mutex_exit(&new_state->arcs_mtx); | |
1059 | } | |
1060 | } | |
1061 | ||
1062 | ASSERT(!BUF_EMPTY(ab)); | |
1063 | if (new_state == arc_anon) { | |
1064 | buf_hash_remove(ab); | |
1065 | } | |
1066 | ||
1067 | /* adjust state sizes */ | |
1068 | if (to_delta) | |
1069 | atomic_add_64(&new_state->arcs_size, to_delta); | |
1070 | if (from_delta) { | |
1071 | ASSERT3U(old_state->arcs_size, >=, from_delta); | |
1072 | atomic_add_64(&old_state->arcs_size, -from_delta); | |
1073 | } | |
1074 | ab->b_state = new_state; | |
1075 | ||
1076 | /* adjust l2arc hdr stats */ | |
1077 | if (new_state == arc_l2c_only) | |
1078 | l2arc_hdr_stat_add(); | |
1079 | else if (old_state == arc_l2c_only) | |
1080 | l2arc_hdr_stat_remove(); | |
1081 | } | |
1082 | ||
1083 | void | |
1084 | arc_space_consume(uint64_t space) | |
1085 | { | |
1086 | atomic_add_64(&arc_meta_used, space); | |
1087 | atomic_add_64(&arc_size, space); | |
1088 | } | |
1089 | ||
1090 | void | |
1091 | arc_space_return(uint64_t space) | |
1092 | { | |
1093 | ASSERT(arc_meta_used >= space); | |
1094 | if (arc_meta_max < arc_meta_used) | |
1095 | arc_meta_max = arc_meta_used; | |
1096 | atomic_add_64(&arc_meta_used, -space); | |
1097 | ASSERT(arc_size >= space); | |
1098 | atomic_add_64(&arc_size, -space); | |
1099 | } | |
1100 | ||
1101 | void * | |
1102 | arc_data_buf_alloc(uint64_t size) | |
1103 | { | |
1104 | if (arc_evict_needed(ARC_BUFC_DATA)) | |
1105 | cv_signal(&arc_reclaim_thr_cv); | |
1106 | atomic_add_64(&arc_size, size); | |
1107 | return (zio_data_buf_alloc(size)); | |
1108 | } | |
1109 | ||
1110 | void | |
1111 | arc_data_buf_free(void *buf, uint64_t size) | |
1112 | { | |
1113 | zio_data_buf_free(buf, size); | |
1114 | ASSERT(arc_size >= size); | |
1115 | atomic_add_64(&arc_size, -size); | |
1116 | } | |
1117 | ||
1118 | arc_buf_t * | |
1119 | arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) | |
1120 | { | |
1121 | arc_buf_hdr_t *hdr; | |
1122 | arc_buf_t *buf; | |
1123 | ||
1124 | ASSERT3U(size, >, 0); | |
1125 | hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
1126 | ASSERT(BUF_EMPTY(hdr)); | |
1127 | hdr->b_size = size; | |
1128 | hdr->b_type = type; | |
1129 | hdr->b_spa = spa; | |
1130 | hdr->b_state = arc_anon; | |
1131 | hdr->b_arc_access = 0; | |
1132 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1133 | buf->b_hdr = hdr; | |
1134 | buf->b_data = NULL; | |
1135 | buf->b_efunc = NULL; | |
1136 | buf->b_private = NULL; | |
1137 | buf->b_next = NULL; | |
1138 | hdr->b_buf = buf; | |
1139 | arc_get_data_buf(buf); | |
1140 | hdr->b_datacnt = 1; | |
1141 | hdr->b_flags = 0; | |
1142 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1143 | (void) refcount_add(&hdr->b_refcnt, tag); | |
1144 | ||
1145 | return (buf); | |
1146 | } | |
1147 | ||
1148 | static arc_buf_t * | |
1149 | arc_buf_clone(arc_buf_t *from) | |
1150 | { | |
1151 | arc_buf_t *buf; | |
1152 | arc_buf_hdr_t *hdr = from->b_hdr; | |
1153 | uint64_t size = hdr->b_size; | |
1154 | ||
1155 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); | |
1156 | buf->b_hdr = hdr; | |
1157 | buf->b_data = NULL; | |
1158 | buf->b_efunc = NULL; | |
1159 | buf->b_private = NULL; | |
1160 | buf->b_next = hdr->b_buf; | |
1161 | hdr->b_buf = buf; | |
1162 | arc_get_data_buf(buf); | |
1163 | bcopy(from->b_data, buf->b_data, size); | |
1164 | hdr->b_datacnt += 1; | |
1165 | return (buf); | |
1166 | } | |
1167 | ||
1168 | void | |
1169 | arc_buf_add_ref(arc_buf_t *buf, void* tag) | |
1170 | { | |
1171 | arc_buf_hdr_t *hdr; | |
1172 | kmutex_t *hash_lock; | |
1173 | ||
1174 | /* | |
b128c09f BB |
1175 | * Check to see if this buffer is evicted. Callers |
1176 | * must verify b_data != NULL to know if the add_ref | |
1177 | * was successful. | |
34dc7c2f | 1178 | */ |
b128c09f BB |
1179 | rw_enter(&buf->b_lock, RW_READER); |
1180 | if (buf->b_data == NULL) { | |
1181 | rw_exit(&buf->b_lock); | |
34dc7c2f BB |
1182 | return; |
1183 | } | |
b128c09f BB |
1184 | hdr = buf->b_hdr; |
1185 | ASSERT(hdr != NULL); | |
34dc7c2f | 1186 | hash_lock = HDR_LOCK(hdr); |
34dc7c2f | 1187 | mutex_enter(hash_lock); |
b128c09f | 1188 | rw_exit(&buf->b_lock); |
34dc7c2f | 1189 | |
34dc7c2f BB |
1190 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); |
1191 | add_reference(hdr, hash_lock, tag); | |
1192 | arc_access(hdr, hash_lock); | |
1193 | mutex_exit(hash_lock); | |
1194 | ARCSTAT_BUMP(arcstat_hits); | |
1195 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
1196 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
1197 | data, metadata, hits); | |
1198 | } | |
1199 | ||
1200 | /* | |
1201 | * Free the arc data buffer. If it is an l2arc write in progress, | |
1202 | * the buffer is placed on l2arc_free_on_write to be freed later. | |
1203 | */ | |
1204 | static void | |
1205 | arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), | |
1206 | void *data, size_t size) | |
1207 | { | |
1208 | if (HDR_L2_WRITING(hdr)) { | |
1209 | l2arc_data_free_t *df; | |
1210 | df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); | |
1211 | df->l2df_data = data; | |
1212 | df->l2df_size = size; | |
1213 | df->l2df_func = free_func; | |
1214 | mutex_enter(&l2arc_free_on_write_mtx); | |
1215 | list_insert_head(l2arc_free_on_write, df); | |
1216 | mutex_exit(&l2arc_free_on_write_mtx); | |
1217 | ARCSTAT_BUMP(arcstat_l2_free_on_write); | |
1218 | } else { | |
1219 | free_func(data, size); | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | static void | |
1224 | arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) | |
1225 | { | |
1226 | arc_buf_t **bufp; | |
1227 | ||
1228 | /* free up data associated with the buf */ | |
1229 | if (buf->b_data) { | |
1230 | arc_state_t *state = buf->b_hdr->b_state; | |
1231 | uint64_t size = buf->b_hdr->b_size; | |
1232 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
1233 | ||
1234 | arc_cksum_verify(buf); | |
1235 | if (!recycle) { | |
1236 | if (type == ARC_BUFC_METADATA) { | |
1237 | arc_buf_data_free(buf->b_hdr, zio_buf_free, | |
1238 | buf->b_data, size); | |
1239 | arc_space_return(size); | |
1240 | } else { | |
1241 | ASSERT(type == ARC_BUFC_DATA); | |
1242 | arc_buf_data_free(buf->b_hdr, | |
1243 | zio_data_buf_free, buf->b_data, size); | |
1244 | atomic_add_64(&arc_size, -size); | |
1245 | } | |
1246 | } | |
1247 | if (list_link_active(&buf->b_hdr->b_arc_node)) { | |
1248 | uint64_t *cnt = &state->arcs_lsize[type]; | |
1249 | ||
1250 | ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); | |
1251 | ASSERT(state != arc_anon); | |
1252 | ||
1253 | ASSERT3U(*cnt, >=, size); | |
1254 | atomic_add_64(cnt, -size); | |
1255 | } | |
1256 | ASSERT3U(state->arcs_size, >=, size); | |
1257 | atomic_add_64(&state->arcs_size, -size); | |
1258 | buf->b_data = NULL; | |
1259 | ASSERT(buf->b_hdr->b_datacnt > 0); | |
1260 | buf->b_hdr->b_datacnt -= 1; | |
1261 | } | |
1262 | ||
1263 | /* only remove the buf if requested */ | |
1264 | if (!all) | |
1265 | return; | |
1266 | ||
1267 | /* remove the buf from the hdr list */ | |
1268 | for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) | |
1269 | continue; | |
1270 | *bufp = buf->b_next; | |
1271 | ||
1272 | ASSERT(buf->b_efunc == NULL); | |
1273 | ||
1274 | /* clean up the buf */ | |
1275 | buf->b_hdr = NULL; | |
1276 | kmem_cache_free(buf_cache, buf); | |
1277 | } | |
1278 | ||
1279 | static void | |
1280 | arc_hdr_destroy(arc_buf_hdr_t *hdr) | |
1281 | { | |
1282 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1283 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
1284 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
b128c09f | 1285 | ASSERT(!(hdr->b_flags & ARC_STORED)); |
34dc7c2f BB |
1286 | |
1287 | if (hdr->b_l2hdr != NULL) { | |
1288 | if (!MUTEX_HELD(&l2arc_buflist_mtx)) { | |
1289 | /* | |
1290 | * To prevent arc_free() and l2arc_evict() from | |
1291 | * attempting to free the same buffer at the same time, | |
1292 | * a FREE_IN_PROGRESS flag is given to arc_free() to | |
1293 | * give it priority. l2arc_evict() can't destroy this | |
1294 | * header while we are waiting on l2arc_buflist_mtx. | |
b128c09f BB |
1295 | * |
1296 | * The hdr may be removed from l2ad_buflist before we | |
1297 | * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. | |
34dc7c2f BB |
1298 | */ |
1299 | mutex_enter(&l2arc_buflist_mtx); | |
b128c09f BB |
1300 | if (hdr->b_l2hdr != NULL) { |
1301 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, | |
1302 | hdr); | |
1303 | } | |
34dc7c2f BB |
1304 | mutex_exit(&l2arc_buflist_mtx); |
1305 | } else { | |
1306 | list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); | |
1307 | } | |
1308 | ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); | |
1309 | kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); | |
1310 | if (hdr->b_state == arc_l2c_only) | |
1311 | l2arc_hdr_stat_remove(); | |
1312 | hdr->b_l2hdr = NULL; | |
1313 | } | |
1314 | ||
1315 | if (!BUF_EMPTY(hdr)) { | |
1316 | ASSERT(!HDR_IN_HASH_TABLE(hdr)); | |
1317 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
1318 | hdr->b_birth = 0; | |
1319 | hdr->b_cksum0 = 0; | |
1320 | } | |
1321 | while (hdr->b_buf) { | |
1322 | arc_buf_t *buf = hdr->b_buf; | |
1323 | ||
1324 | if (buf->b_efunc) { | |
1325 | mutex_enter(&arc_eviction_mtx); | |
b128c09f | 1326 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f BB |
1327 | ASSERT(buf->b_hdr != NULL); |
1328 | arc_buf_destroy(hdr->b_buf, FALSE, FALSE); | |
1329 | hdr->b_buf = buf->b_next; | |
1330 | buf->b_hdr = &arc_eviction_hdr; | |
1331 | buf->b_next = arc_eviction_list; | |
1332 | arc_eviction_list = buf; | |
b128c09f | 1333 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1334 | mutex_exit(&arc_eviction_mtx); |
1335 | } else { | |
1336 | arc_buf_destroy(hdr->b_buf, FALSE, TRUE); | |
1337 | } | |
1338 | } | |
1339 | if (hdr->b_freeze_cksum != NULL) { | |
1340 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
1341 | hdr->b_freeze_cksum = NULL; | |
1342 | } | |
1343 | ||
1344 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
1345 | ASSERT3P(hdr->b_hash_next, ==, NULL); | |
1346 | ASSERT3P(hdr->b_acb, ==, NULL); | |
1347 | kmem_cache_free(hdr_cache, hdr); | |
1348 | } | |
1349 | ||
1350 | void | |
1351 | arc_buf_free(arc_buf_t *buf, void *tag) | |
1352 | { | |
1353 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1354 | int hashed = hdr->b_state != arc_anon; | |
1355 | ||
1356 | ASSERT(buf->b_efunc == NULL); | |
1357 | ASSERT(buf->b_data != NULL); | |
1358 | ||
1359 | if (hashed) { | |
1360 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1361 | ||
1362 | mutex_enter(hash_lock); | |
1363 | (void) remove_reference(hdr, hash_lock, tag); | |
1364 | if (hdr->b_datacnt > 1) | |
1365 | arc_buf_destroy(buf, FALSE, TRUE); | |
1366 | else | |
1367 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1368 | mutex_exit(hash_lock); | |
1369 | } else if (HDR_IO_IN_PROGRESS(hdr)) { | |
1370 | int destroy_hdr; | |
1371 | /* | |
1372 | * We are in the middle of an async write. Don't destroy | |
1373 | * this buffer unless the write completes before we finish | |
1374 | * decrementing the reference count. | |
1375 | */ | |
1376 | mutex_enter(&arc_eviction_mtx); | |
1377 | (void) remove_reference(hdr, NULL, tag); | |
1378 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
1379 | destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); | |
1380 | mutex_exit(&arc_eviction_mtx); | |
1381 | if (destroy_hdr) | |
1382 | arc_hdr_destroy(hdr); | |
1383 | } else { | |
1384 | if (remove_reference(hdr, NULL, tag) > 0) { | |
1385 | ASSERT(HDR_IO_ERROR(hdr)); | |
1386 | arc_buf_destroy(buf, FALSE, TRUE); | |
1387 | } else { | |
1388 | arc_hdr_destroy(hdr); | |
1389 | } | |
1390 | } | |
1391 | } | |
1392 | ||
1393 | int | |
1394 | arc_buf_remove_ref(arc_buf_t *buf, void* tag) | |
1395 | { | |
1396 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
1397 | kmutex_t *hash_lock = HDR_LOCK(hdr); | |
1398 | int no_callback = (buf->b_efunc == NULL); | |
1399 | ||
1400 | if (hdr->b_state == arc_anon) { | |
1401 | arc_buf_free(buf, tag); | |
1402 | return (no_callback); | |
1403 | } | |
1404 | ||
1405 | mutex_enter(hash_lock); | |
1406 | ASSERT(hdr->b_state != arc_anon); | |
1407 | ASSERT(buf->b_data != NULL); | |
1408 | ||
1409 | (void) remove_reference(hdr, hash_lock, tag); | |
1410 | if (hdr->b_datacnt > 1) { | |
1411 | if (no_callback) | |
1412 | arc_buf_destroy(buf, FALSE, TRUE); | |
1413 | } else if (no_callback) { | |
1414 | ASSERT(hdr->b_buf == buf && buf->b_next == NULL); | |
1415 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
1416 | } | |
1417 | ASSERT(no_callback || hdr->b_datacnt > 1 || | |
1418 | refcount_is_zero(&hdr->b_refcnt)); | |
1419 | mutex_exit(hash_lock); | |
1420 | return (no_callback); | |
1421 | } | |
1422 | ||
1423 | int | |
1424 | arc_buf_size(arc_buf_t *buf) | |
1425 | { | |
1426 | return (buf->b_hdr->b_size); | |
1427 | } | |
1428 | ||
1429 | /* | |
1430 | * Evict buffers from list until we've removed the specified number of | |
1431 | * bytes. Move the removed buffers to the appropriate evict state. | |
1432 | * If the recycle flag is set, then attempt to "recycle" a buffer: | |
1433 | * - look for a buffer to evict that is `bytes' long. | |
1434 | * - return the data block from this buffer rather than freeing it. | |
1435 | * This flag is used by callers that are trying to make space for a | |
1436 | * new buffer in a full arc cache. | |
1437 | * | |
1438 | * This function makes a "best effort". It skips over any buffers | |
1439 | * it can't get a hash_lock on, and so may not catch all candidates. | |
1440 | * It may also return without evicting as much space as requested. | |
1441 | */ | |
1442 | static void * | |
1443 | arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, | |
1444 | arc_buf_contents_t type) | |
1445 | { | |
1446 | arc_state_t *evicted_state; | |
1447 | uint64_t bytes_evicted = 0, skipped = 0, missed = 0; | |
1448 | arc_buf_hdr_t *ab, *ab_prev = NULL; | |
1449 | list_t *list = &state->arcs_list[type]; | |
1450 | kmutex_t *hash_lock; | |
1451 | boolean_t have_lock; | |
1452 | void *stolen = NULL; | |
1453 | ||
1454 | ASSERT(state == arc_mru || state == arc_mfu); | |
1455 | ||
1456 | evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
1457 | ||
1458 | mutex_enter(&state->arcs_mtx); | |
1459 | mutex_enter(&evicted_state->arcs_mtx); | |
1460 | ||
1461 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1462 | ab_prev = list_prev(list, ab); | |
1463 | /* prefetch buffers have a minimum lifespan */ | |
1464 | if (HDR_IO_IN_PROGRESS(ab) || | |
1465 | (spa && ab->b_spa != spa) || | |
1466 | (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && | |
1467 | lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { | |
1468 | skipped++; | |
1469 | continue; | |
1470 | } | |
1471 | /* "lookahead" for better eviction candidate */ | |
1472 | if (recycle && ab->b_size != bytes && | |
1473 | ab_prev && ab_prev->b_size == bytes) | |
1474 | continue; | |
1475 | hash_lock = HDR_LOCK(ab); | |
1476 | have_lock = MUTEX_HELD(hash_lock); | |
1477 | if (have_lock || mutex_tryenter(hash_lock)) { | |
1478 | ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); | |
1479 | ASSERT(ab->b_datacnt > 0); | |
1480 | while (ab->b_buf) { | |
1481 | arc_buf_t *buf = ab->b_buf; | |
b128c09f BB |
1482 | if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { |
1483 | missed += 1; | |
1484 | break; | |
1485 | } | |
34dc7c2f BB |
1486 | if (buf->b_data) { |
1487 | bytes_evicted += ab->b_size; | |
1488 | if (recycle && ab->b_type == type && | |
1489 | ab->b_size == bytes && | |
1490 | !HDR_L2_WRITING(ab)) { | |
1491 | stolen = buf->b_data; | |
1492 | recycle = FALSE; | |
1493 | } | |
1494 | } | |
1495 | if (buf->b_efunc) { | |
1496 | mutex_enter(&arc_eviction_mtx); | |
1497 | arc_buf_destroy(buf, | |
1498 | buf->b_data == stolen, FALSE); | |
1499 | ab->b_buf = buf->b_next; | |
1500 | buf->b_hdr = &arc_eviction_hdr; | |
1501 | buf->b_next = arc_eviction_list; | |
1502 | arc_eviction_list = buf; | |
1503 | mutex_exit(&arc_eviction_mtx); | |
b128c09f | 1504 | rw_exit(&buf->b_lock); |
34dc7c2f | 1505 | } else { |
b128c09f | 1506 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1507 | arc_buf_destroy(buf, |
1508 | buf->b_data == stolen, TRUE); | |
1509 | } | |
1510 | } | |
b128c09f BB |
1511 | if (ab->b_datacnt == 0) { |
1512 | arc_change_state(evicted_state, ab, hash_lock); | |
1513 | ASSERT(HDR_IN_HASH_TABLE(ab)); | |
1514 | ab->b_flags |= ARC_IN_HASH_TABLE; | |
1515 | ab->b_flags &= ~ARC_BUF_AVAILABLE; | |
1516 | DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); | |
1517 | } | |
34dc7c2f BB |
1518 | if (!have_lock) |
1519 | mutex_exit(hash_lock); | |
1520 | if (bytes >= 0 && bytes_evicted >= bytes) | |
1521 | break; | |
1522 | } else { | |
1523 | missed += 1; | |
1524 | } | |
1525 | } | |
1526 | ||
1527 | mutex_exit(&evicted_state->arcs_mtx); | |
1528 | mutex_exit(&state->arcs_mtx); | |
1529 | ||
1530 | if (bytes_evicted < bytes) | |
1531 | dprintf("only evicted %lld bytes from %x", | |
1532 | (longlong_t)bytes_evicted, state); | |
1533 | ||
1534 | if (skipped) | |
1535 | ARCSTAT_INCR(arcstat_evict_skip, skipped); | |
1536 | ||
1537 | if (missed) | |
1538 | ARCSTAT_INCR(arcstat_mutex_miss, missed); | |
1539 | ||
1540 | /* | |
1541 | * We have just evicted some date into the ghost state, make | |
1542 | * sure we also adjust the ghost state size if necessary. | |
1543 | */ | |
1544 | if (arc_no_grow && | |
1545 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { | |
1546 | int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + | |
1547 | arc_mru_ghost->arcs_size - arc_c; | |
1548 | ||
1549 | if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { | |
1550 | int64_t todelete = | |
1551 | MIN(arc_mru_ghost->arcs_lsize[type], mru_over); | |
1552 | arc_evict_ghost(arc_mru_ghost, NULL, todelete); | |
1553 | } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { | |
1554 | int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], | |
1555 | arc_mru_ghost->arcs_size + | |
1556 | arc_mfu_ghost->arcs_size - arc_c); | |
1557 | arc_evict_ghost(arc_mfu_ghost, NULL, todelete); | |
1558 | } | |
1559 | } | |
1560 | ||
1561 | return (stolen); | |
1562 | } | |
1563 | ||
1564 | /* | |
1565 | * Remove buffers from list until we've removed the specified number of | |
1566 | * bytes. Destroy the buffers that are removed. | |
1567 | */ | |
1568 | static void | |
1569 | arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) | |
1570 | { | |
1571 | arc_buf_hdr_t *ab, *ab_prev; | |
1572 | list_t *list = &state->arcs_list[ARC_BUFC_DATA]; | |
1573 | kmutex_t *hash_lock; | |
1574 | uint64_t bytes_deleted = 0; | |
1575 | uint64_t bufs_skipped = 0; | |
1576 | ||
1577 | ASSERT(GHOST_STATE(state)); | |
1578 | top: | |
1579 | mutex_enter(&state->arcs_mtx); | |
1580 | for (ab = list_tail(list); ab; ab = ab_prev) { | |
1581 | ab_prev = list_prev(list, ab); | |
1582 | if (spa && ab->b_spa != spa) | |
1583 | continue; | |
1584 | hash_lock = HDR_LOCK(ab); | |
1585 | if (mutex_tryenter(hash_lock)) { | |
1586 | ASSERT(!HDR_IO_IN_PROGRESS(ab)); | |
1587 | ASSERT(ab->b_buf == NULL); | |
1588 | ARCSTAT_BUMP(arcstat_deleted); | |
1589 | bytes_deleted += ab->b_size; | |
1590 | ||
1591 | if (ab->b_l2hdr != NULL) { | |
1592 | /* | |
1593 | * This buffer is cached on the 2nd Level ARC; | |
1594 | * don't destroy the header. | |
1595 | */ | |
1596 | arc_change_state(arc_l2c_only, ab, hash_lock); | |
1597 | mutex_exit(hash_lock); | |
1598 | } else { | |
1599 | arc_change_state(arc_anon, ab, hash_lock); | |
1600 | mutex_exit(hash_lock); | |
1601 | arc_hdr_destroy(ab); | |
1602 | } | |
1603 | ||
1604 | DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); | |
1605 | if (bytes >= 0 && bytes_deleted >= bytes) | |
1606 | break; | |
1607 | } else { | |
1608 | if (bytes < 0) { | |
1609 | mutex_exit(&state->arcs_mtx); | |
1610 | mutex_enter(hash_lock); | |
1611 | mutex_exit(hash_lock); | |
1612 | goto top; | |
1613 | } | |
1614 | bufs_skipped += 1; | |
1615 | } | |
1616 | } | |
1617 | mutex_exit(&state->arcs_mtx); | |
1618 | ||
1619 | if (list == &state->arcs_list[ARC_BUFC_DATA] && | |
1620 | (bytes < 0 || bytes_deleted < bytes)) { | |
1621 | list = &state->arcs_list[ARC_BUFC_METADATA]; | |
1622 | goto top; | |
1623 | } | |
1624 | ||
1625 | if (bufs_skipped) { | |
1626 | ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); | |
1627 | ASSERT(bytes >= 0); | |
1628 | } | |
1629 | ||
1630 | if (bytes_deleted < bytes) | |
1631 | dprintf("only deleted %lld bytes from %p", | |
1632 | (longlong_t)bytes_deleted, state); | |
1633 | } | |
1634 | ||
1635 | static void | |
1636 | arc_adjust(void) | |
1637 | { | |
1638 | int64_t top_sz, mru_over, arc_over, todelete; | |
1639 | ||
1640 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; | |
1641 | ||
1642 | if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { | |
1643 | int64_t toevict = | |
1644 | MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); | |
1645 | (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); | |
1646 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size; | |
1647 | } | |
1648 | ||
1649 | if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { | |
1650 | int64_t toevict = | |
1651 | MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); | |
1652 | (void) arc_evict(arc_mru, NULL, toevict, FALSE, | |
1653 | ARC_BUFC_METADATA); | |
1654 | top_sz = arc_anon->arcs_size + arc_mru->arcs_size; | |
1655 | } | |
1656 | ||
1657 | mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; | |
1658 | ||
1659 | if (mru_over > 0) { | |
1660 | if (arc_mru_ghost->arcs_size > 0) { | |
1661 | todelete = MIN(arc_mru_ghost->arcs_size, mru_over); | |
1662 | arc_evict_ghost(arc_mru_ghost, NULL, todelete); | |
1663 | } | |
1664 | } | |
1665 | ||
1666 | if ((arc_over = arc_size - arc_c) > 0) { | |
1667 | int64_t tbl_over; | |
1668 | ||
1669 | if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { | |
1670 | int64_t toevict = | |
1671 | MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); | |
1672 | (void) arc_evict(arc_mfu, NULL, toevict, FALSE, | |
1673 | ARC_BUFC_DATA); | |
1674 | arc_over = arc_size - arc_c; | |
1675 | } | |
1676 | ||
1677 | if (arc_over > 0 && | |
1678 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { | |
1679 | int64_t toevict = | |
1680 | MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], | |
1681 | arc_over); | |
1682 | (void) arc_evict(arc_mfu, NULL, toevict, FALSE, | |
1683 | ARC_BUFC_METADATA); | |
1684 | } | |
1685 | ||
1686 | tbl_over = arc_size + arc_mru_ghost->arcs_size + | |
1687 | arc_mfu_ghost->arcs_size - arc_c * 2; | |
1688 | ||
1689 | if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { | |
1690 | todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); | |
1691 | arc_evict_ghost(arc_mfu_ghost, NULL, todelete); | |
1692 | } | |
1693 | } | |
1694 | } | |
1695 | ||
1696 | static void | |
1697 | arc_do_user_evicts(void) | |
1698 | { | |
1699 | mutex_enter(&arc_eviction_mtx); | |
1700 | while (arc_eviction_list != NULL) { | |
1701 | arc_buf_t *buf = arc_eviction_list; | |
1702 | arc_eviction_list = buf->b_next; | |
b128c09f | 1703 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f | 1704 | buf->b_hdr = NULL; |
b128c09f | 1705 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
1706 | mutex_exit(&arc_eviction_mtx); |
1707 | ||
1708 | if (buf->b_efunc != NULL) | |
1709 | VERIFY(buf->b_efunc(buf) == 0); | |
1710 | ||
1711 | buf->b_efunc = NULL; | |
1712 | buf->b_private = NULL; | |
1713 | kmem_cache_free(buf_cache, buf); | |
1714 | mutex_enter(&arc_eviction_mtx); | |
1715 | } | |
1716 | mutex_exit(&arc_eviction_mtx); | |
1717 | } | |
1718 | ||
1719 | /* | |
1720 | * Flush all *evictable* data from the cache for the given spa. | |
1721 | * NOTE: this will not touch "active" (i.e. referenced) data. | |
1722 | */ | |
1723 | void | |
1724 | arc_flush(spa_t *spa) | |
1725 | { | |
1726 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { | |
1727 | (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); | |
1728 | if (spa) | |
1729 | break; | |
1730 | } | |
1731 | while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { | |
1732 | (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); | |
1733 | if (spa) | |
1734 | break; | |
1735 | } | |
1736 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { | |
1737 | (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); | |
1738 | if (spa) | |
1739 | break; | |
1740 | } | |
1741 | while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { | |
1742 | (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); | |
1743 | if (spa) | |
1744 | break; | |
1745 | } | |
1746 | ||
1747 | arc_evict_ghost(arc_mru_ghost, spa, -1); | |
1748 | arc_evict_ghost(arc_mfu_ghost, spa, -1); | |
1749 | ||
1750 | mutex_enter(&arc_reclaim_thr_lock); | |
1751 | arc_do_user_evicts(); | |
1752 | mutex_exit(&arc_reclaim_thr_lock); | |
1753 | ASSERT(spa || arc_eviction_list == NULL); | |
1754 | } | |
1755 | ||
1756 | int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ | |
1757 | ||
1758 | void | |
1759 | arc_shrink(void) | |
1760 | { | |
1761 | if (arc_c > arc_c_min) { | |
1762 | uint64_t to_free; | |
1763 | ||
1764 | #ifdef _KERNEL | |
1765 | to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree)); | |
1766 | #else | |
1767 | to_free = arc_c >> arc_shrink_shift; | |
1768 | #endif | |
1769 | if (arc_c > arc_c_min + to_free) | |
1770 | atomic_add_64(&arc_c, -to_free); | |
1771 | else | |
1772 | arc_c = arc_c_min; | |
1773 | ||
1774 | atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); | |
1775 | if (arc_c > arc_size) | |
1776 | arc_c = MAX(arc_size, arc_c_min); | |
1777 | if (arc_p > arc_c) | |
1778 | arc_p = (arc_c >> 1); | |
1779 | ASSERT(arc_c >= arc_c_min); | |
1780 | ASSERT((int64_t)arc_p >= 0); | |
1781 | } | |
1782 | ||
1783 | if (arc_size > arc_c) | |
1784 | arc_adjust(); | |
1785 | } | |
1786 | ||
1787 | static int | |
1788 | arc_reclaim_needed(void) | |
1789 | { | |
1790 | uint64_t extra; | |
1791 | ||
1792 | #ifdef _KERNEL | |
1793 | ||
1794 | if (needfree) | |
1795 | return (1); | |
1796 | ||
1797 | /* | |
1798 | * take 'desfree' extra pages, so we reclaim sooner, rather than later | |
1799 | */ | |
1800 | extra = desfree; | |
1801 | ||
1802 | /* | |
1803 | * check that we're out of range of the pageout scanner. It starts to | |
1804 | * schedule paging if freemem is less than lotsfree and needfree. | |
1805 | * lotsfree is the high-water mark for pageout, and needfree is the | |
1806 | * number of needed free pages. We add extra pages here to make sure | |
1807 | * the scanner doesn't start up while we're freeing memory. | |
1808 | */ | |
1809 | if (freemem < lotsfree + needfree + extra) | |
1810 | return (1); | |
1811 | ||
1812 | /* | |
1813 | * check to make sure that swapfs has enough space so that anon | |
1814 | * reservations can still succeed. anon_resvmem() checks that the | |
1815 | * availrmem is greater than swapfs_minfree, and the number of reserved | |
1816 | * swap pages. We also add a bit of extra here just to prevent | |
1817 | * circumstances from getting really dire. | |
1818 | */ | |
1819 | if (availrmem < swapfs_minfree + swapfs_reserve + extra) | |
1820 | return (1); | |
1821 | ||
1822 | #if defined(__i386) | |
1823 | /* | |
1824 | * If we're on an i386 platform, it's possible that we'll exhaust the | |
1825 | * kernel heap space before we ever run out of available physical | |
1826 | * memory. Most checks of the size of the heap_area compare against | |
1827 | * tune.t_minarmem, which is the minimum available real memory that we | |
1828 | * can have in the system. However, this is generally fixed at 25 pages | |
1829 | * which is so low that it's useless. In this comparison, we seek to | |
1830 | * calculate the total heap-size, and reclaim if more than 3/4ths of the | |
1831 | * heap is allocated. (Or, in the calculation, if less than 1/4th is | |
1832 | * free) | |
1833 | */ | |
1834 | if (btop(vmem_size(heap_arena, VMEM_FREE)) < | |
1835 | (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) | |
1836 | return (1); | |
1837 | #endif | |
1838 | ||
1839 | #else | |
1840 | if (spa_get_random(100) == 0) | |
1841 | return (1); | |
1842 | #endif | |
1843 | return (0); | |
1844 | } | |
1845 | ||
1846 | static void | |
1847 | arc_kmem_reap_now(arc_reclaim_strategy_t strat) | |
1848 | { | |
1849 | size_t i; | |
1850 | kmem_cache_t *prev_cache = NULL; | |
1851 | kmem_cache_t *prev_data_cache = NULL; | |
1852 | extern kmem_cache_t *zio_buf_cache[]; | |
1853 | extern kmem_cache_t *zio_data_buf_cache[]; | |
1854 | ||
1855 | #ifdef _KERNEL | |
1856 | if (arc_meta_used >= arc_meta_limit) { | |
1857 | /* | |
1858 | * We are exceeding our meta-data cache limit. | |
1859 | * Purge some DNLC entries to release holds on meta-data. | |
1860 | */ | |
1861 | dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); | |
1862 | } | |
1863 | #if defined(__i386) | |
1864 | /* | |
1865 | * Reclaim unused memory from all kmem caches. | |
1866 | */ | |
1867 | kmem_reap(); | |
1868 | #endif | |
1869 | #endif | |
1870 | ||
1871 | /* | |
1872 | * An aggressive reclamation will shrink the cache size as well as | |
1873 | * reap free buffers from the arc kmem caches. | |
1874 | */ | |
1875 | if (strat == ARC_RECLAIM_AGGR) | |
1876 | arc_shrink(); | |
1877 | ||
1878 | for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { | |
1879 | if (zio_buf_cache[i] != prev_cache) { | |
1880 | prev_cache = zio_buf_cache[i]; | |
1881 | kmem_cache_reap_now(zio_buf_cache[i]); | |
1882 | } | |
1883 | if (zio_data_buf_cache[i] != prev_data_cache) { | |
1884 | prev_data_cache = zio_data_buf_cache[i]; | |
1885 | kmem_cache_reap_now(zio_data_buf_cache[i]); | |
1886 | } | |
1887 | } | |
1888 | kmem_cache_reap_now(buf_cache); | |
1889 | kmem_cache_reap_now(hdr_cache); | |
1890 | } | |
1891 | ||
1892 | static void | |
1893 | arc_reclaim_thread(void) | |
1894 | { | |
1895 | clock_t growtime = 0; | |
1896 | arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; | |
1897 | callb_cpr_t cpr; | |
1898 | ||
1899 | CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); | |
1900 | ||
1901 | mutex_enter(&arc_reclaim_thr_lock); | |
1902 | while (arc_thread_exit == 0) { | |
1903 | if (arc_reclaim_needed()) { | |
1904 | ||
1905 | if (arc_no_grow) { | |
1906 | if (last_reclaim == ARC_RECLAIM_CONS) { | |
1907 | last_reclaim = ARC_RECLAIM_AGGR; | |
1908 | } else { | |
1909 | last_reclaim = ARC_RECLAIM_CONS; | |
1910 | } | |
1911 | } else { | |
1912 | arc_no_grow = TRUE; | |
1913 | last_reclaim = ARC_RECLAIM_AGGR; | |
1914 | membar_producer(); | |
1915 | } | |
1916 | ||
1917 | /* reset the growth delay for every reclaim */ | |
1918 | growtime = lbolt + (arc_grow_retry * hz); | |
1919 | ||
1920 | arc_kmem_reap_now(last_reclaim); | |
b128c09f | 1921 | arc_warm = B_TRUE; |
34dc7c2f BB |
1922 | |
1923 | } else if (arc_no_grow && lbolt >= growtime) { | |
1924 | arc_no_grow = FALSE; | |
1925 | } | |
1926 | ||
1927 | if (2 * arc_c < arc_size + | |
1928 | arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size) | |
1929 | arc_adjust(); | |
1930 | ||
1931 | if (arc_eviction_list != NULL) | |
1932 | arc_do_user_evicts(); | |
1933 | ||
1934 | /* block until needed, or one second, whichever is shorter */ | |
1935 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
1936 | (void) cv_timedwait(&arc_reclaim_thr_cv, | |
1937 | &arc_reclaim_thr_lock, (lbolt + hz)); | |
1938 | CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); | |
1939 | } | |
1940 | ||
1941 | arc_thread_exit = 0; | |
1942 | cv_broadcast(&arc_reclaim_thr_cv); | |
1943 | CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ | |
1944 | thread_exit(); | |
1945 | } | |
1946 | ||
1947 | /* | |
1948 | * Adapt arc info given the number of bytes we are trying to add and | |
1949 | * the state that we are comming from. This function is only called | |
1950 | * when we are adding new content to the cache. | |
1951 | */ | |
1952 | static void | |
1953 | arc_adapt(int bytes, arc_state_t *state) | |
1954 | { | |
1955 | int mult; | |
1956 | ||
1957 | if (state == arc_l2c_only) | |
1958 | return; | |
1959 | ||
1960 | ASSERT(bytes > 0); | |
1961 | /* | |
1962 | * Adapt the target size of the MRU list: | |
1963 | * - if we just hit in the MRU ghost list, then increase | |
1964 | * the target size of the MRU list. | |
1965 | * - if we just hit in the MFU ghost list, then increase | |
1966 | * the target size of the MFU list by decreasing the | |
1967 | * target size of the MRU list. | |
1968 | */ | |
1969 | if (state == arc_mru_ghost) { | |
1970 | mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? | |
1971 | 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); | |
1972 | ||
1973 | arc_p = MIN(arc_c, arc_p + bytes * mult); | |
1974 | } else if (state == arc_mfu_ghost) { | |
1975 | mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? | |
1976 | 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); | |
1977 | ||
1978 | arc_p = MAX(0, (int64_t)arc_p - bytes * mult); | |
1979 | } | |
1980 | ASSERT((int64_t)arc_p >= 0); | |
1981 | ||
1982 | if (arc_reclaim_needed()) { | |
1983 | cv_signal(&arc_reclaim_thr_cv); | |
1984 | return; | |
1985 | } | |
1986 | ||
1987 | if (arc_no_grow) | |
1988 | return; | |
1989 | ||
1990 | if (arc_c >= arc_c_max) | |
1991 | return; | |
1992 | ||
1993 | /* | |
1994 | * If we're within (2 * maxblocksize) bytes of the target | |
1995 | * cache size, increment the target cache size | |
1996 | */ | |
1997 | if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { | |
1998 | atomic_add_64(&arc_c, (int64_t)bytes); | |
1999 | if (arc_c > arc_c_max) | |
2000 | arc_c = arc_c_max; | |
2001 | else if (state == arc_anon) | |
2002 | atomic_add_64(&arc_p, (int64_t)bytes); | |
2003 | if (arc_p > arc_c) | |
2004 | arc_p = arc_c; | |
2005 | } | |
2006 | ASSERT((int64_t)arc_p >= 0); | |
2007 | } | |
2008 | ||
2009 | /* | |
2010 | * Check if the cache has reached its limits and eviction is required | |
2011 | * prior to insert. | |
2012 | */ | |
2013 | static int | |
2014 | arc_evict_needed(arc_buf_contents_t type) | |
2015 | { | |
2016 | if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) | |
2017 | return (1); | |
2018 | ||
2019 | #ifdef _KERNEL | |
2020 | /* | |
2021 | * If zio data pages are being allocated out of a separate heap segment, | |
2022 | * then enforce that the size of available vmem for this area remains | |
2023 | * above about 1/32nd free. | |
2024 | */ | |
2025 | if (type == ARC_BUFC_DATA && zio_arena != NULL && | |
2026 | vmem_size(zio_arena, VMEM_FREE) < | |
2027 | (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) | |
2028 | return (1); | |
2029 | #endif | |
2030 | ||
2031 | if (arc_reclaim_needed()) | |
2032 | return (1); | |
2033 | ||
2034 | return (arc_size > arc_c); | |
2035 | } | |
2036 | ||
2037 | /* | |
2038 | * The buffer, supplied as the first argument, needs a data block. | |
2039 | * So, if we are at cache max, determine which cache should be victimized. | |
2040 | * We have the following cases: | |
2041 | * | |
2042 | * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> | |
2043 | * In this situation if we're out of space, but the resident size of the MFU is | |
2044 | * under the limit, victimize the MFU cache to satisfy this insertion request. | |
2045 | * | |
2046 | * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> | |
2047 | * Here, we've used up all of the available space for the MRU, so we need to | |
2048 | * evict from our own cache instead. Evict from the set of resident MRU | |
2049 | * entries. | |
2050 | * | |
2051 | * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> | |
2052 | * c minus p represents the MFU space in the cache, since p is the size of the | |
2053 | * cache that is dedicated to the MRU. In this situation there's still space on | |
2054 | * the MFU side, so the MRU side needs to be victimized. | |
2055 | * | |
2056 | * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> | |
2057 | * MFU's resident set is consuming more space than it has been allotted. In | |
2058 | * this situation, we must victimize our own cache, the MFU, for this insertion. | |
2059 | */ | |
2060 | static void | |
2061 | arc_get_data_buf(arc_buf_t *buf) | |
2062 | { | |
2063 | arc_state_t *state = buf->b_hdr->b_state; | |
2064 | uint64_t size = buf->b_hdr->b_size; | |
2065 | arc_buf_contents_t type = buf->b_hdr->b_type; | |
2066 | ||
2067 | arc_adapt(size, state); | |
2068 | ||
2069 | /* | |
2070 | * We have not yet reached cache maximum size, | |
2071 | * just allocate a new buffer. | |
2072 | */ | |
2073 | if (!arc_evict_needed(type)) { | |
2074 | if (type == ARC_BUFC_METADATA) { | |
2075 | buf->b_data = zio_buf_alloc(size); | |
2076 | arc_space_consume(size); | |
2077 | } else { | |
2078 | ASSERT(type == ARC_BUFC_DATA); | |
2079 | buf->b_data = zio_data_buf_alloc(size); | |
2080 | atomic_add_64(&arc_size, size); | |
2081 | } | |
2082 | goto out; | |
2083 | } | |
2084 | ||
2085 | /* | |
2086 | * If we are prefetching from the mfu ghost list, this buffer | |
2087 | * will end up on the mru list; so steal space from there. | |
2088 | */ | |
2089 | if (state == arc_mfu_ghost) | |
2090 | state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; | |
2091 | else if (state == arc_mru_ghost) | |
2092 | state = arc_mru; | |
2093 | ||
2094 | if (state == arc_mru || state == arc_anon) { | |
2095 | uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; | |
2096 | state = (arc_mfu->arcs_lsize[type] > 0 && | |
2097 | arc_p > mru_used) ? arc_mfu : arc_mru; | |
2098 | } else { | |
2099 | /* MFU cases */ | |
2100 | uint64_t mfu_space = arc_c - arc_p; | |
2101 | state = (arc_mru->arcs_lsize[type] > 0 && | |
2102 | mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; | |
2103 | } | |
2104 | if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { | |
2105 | if (type == ARC_BUFC_METADATA) { | |
2106 | buf->b_data = zio_buf_alloc(size); | |
2107 | arc_space_consume(size); | |
2108 | } else { | |
2109 | ASSERT(type == ARC_BUFC_DATA); | |
2110 | buf->b_data = zio_data_buf_alloc(size); | |
2111 | atomic_add_64(&arc_size, size); | |
2112 | } | |
2113 | ARCSTAT_BUMP(arcstat_recycle_miss); | |
2114 | } | |
2115 | ASSERT(buf->b_data != NULL); | |
2116 | out: | |
2117 | /* | |
2118 | * Update the state size. Note that ghost states have a | |
2119 | * "ghost size" and so don't need to be updated. | |
2120 | */ | |
2121 | if (!GHOST_STATE(buf->b_hdr->b_state)) { | |
2122 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2123 | ||
2124 | atomic_add_64(&hdr->b_state->arcs_size, size); | |
2125 | if (list_link_active(&hdr->b_arc_node)) { | |
2126 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2127 | atomic_add_64(&hdr->b_state->arcs_lsize[type], size); | |
2128 | } | |
2129 | /* | |
2130 | * If we are growing the cache, and we are adding anonymous | |
2131 | * data, and we have outgrown arc_p, update arc_p | |
2132 | */ | |
2133 | if (arc_size < arc_c && hdr->b_state == arc_anon && | |
2134 | arc_anon->arcs_size + arc_mru->arcs_size > arc_p) | |
2135 | arc_p = MIN(arc_c, arc_p + size); | |
2136 | } | |
2137 | } | |
2138 | ||
2139 | /* | |
2140 | * This routine is called whenever a buffer is accessed. | |
2141 | * NOTE: the hash lock is dropped in this function. | |
2142 | */ | |
2143 | static void | |
2144 | arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) | |
2145 | { | |
2146 | ASSERT(MUTEX_HELD(hash_lock)); | |
2147 | ||
2148 | if (buf->b_state == arc_anon) { | |
2149 | /* | |
2150 | * This buffer is not in the cache, and does not | |
2151 | * appear in our "ghost" list. Add the new buffer | |
2152 | * to the MRU state. | |
2153 | */ | |
2154 | ||
2155 | ASSERT(buf->b_arc_access == 0); | |
2156 | buf->b_arc_access = lbolt; | |
2157 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2158 | arc_change_state(arc_mru, buf, hash_lock); | |
2159 | ||
2160 | } else if (buf->b_state == arc_mru) { | |
2161 | /* | |
2162 | * If this buffer is here because of a prefetch, then either: | |
2163 | * - clear the flag if this is a "referencing" read | |
2164 | * (any subsequent access will bump this into the MFU state). | |
2165 | * or | |
2166 | * - move the buffer to the head of the list if this is | |
2167 | * another prefetch (to make it less likely to be evicted). | |
2168 | */ | |
2169 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2170 | if (refcount_count(&buf->b_refcnt) == 0) { | |
2171 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2172 | } else { | |
2173 | buf->b_flags &= ~ARC_PREFETCH; | |
2174 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2175 | } | |
2176 | buf->b_arc_access = lbolt; | |
2177 | return; | |
2178 | } | |
2179 | ||
2180 | /* | |
2181 | * This buffer has been "accessed" only once so far, | |
2182 | * but it is still in the cache. Move it to the MFU | |
2183 | * state. | |
2184 | */ | |
2185 | if (lbolt > buf->b_arc_access + ARC_MINTIME) { | |
2186 | /* | |
2187 | * More than 125ms have passed since we | |
2188 | * instantiated this buffer. Move it to the | |
2189 | * most frequently used state. | |
2190 | */ | |
2191 | buf->b_arc_access = lbolt; | |
2192 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2193 | arc_change_state(arc_mfu, buf, hash_lock); | |
2194 | } | |
2195 | ARCSTAT_BUMP(arcstat_mru_hits); | |
2196 | } else if (buf->b_state == arc_mru_ghost) { | |
2197 | arc_state_t *new_state; | |
2198 | /* | |
2199 | * This buffer has been "accessed" recently, but | |
2200 | * was evicted from the cache. Move it to the | |
2201 | * MFU state. | |
2202 | */ | |
2203 | ||
2204 | if (buf->b_flags & ARC_PREFETCH) { | |
2205 | new_state = arc_mru; | |
2206 | if (refcount_count(&buf->b_refcnt) > 0) | |
2207 | buf->b_flags &= ~ARC_PREFETCH; | |
2208 | DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); | |
2209 | } else { | |
2210 | new_state = arc_mfu; | |
2211 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2212 | } | |
2213 | ||
2214 | buf->b_arc_access = lbolt; | |
2215 | arc_change_state(new_state, buf, hash_lock); | |
2216 | ||
2217 | ARCSTAT_BUMP(arcstat_mru_ghost_hits); | |
2218 | } else if (buf->b_state == arc_mfu) { | |
2219 | /* | |
2220 | * This buffer has been accessed more than once and is | |
2221 | * still in the cache. Keep it in the MFU state. | |
2222 | * | |
2223 | * NOTE: an add_reference() that occurred when we did | |
2224 | * the arc_read() will have kicked this off the list. | |
2225 | * If it was a prefetch, we will explicitly move it to | |
2226 | * the head of the list now. | |
2227 | */ | |
2228 | if ((buf->b_flags & ARC_PREFETCH) != 0) { | |
2229 | ASSERT(refcount_count(&buf->b_refcnt) == 0); | |
2230 | ASSERT(list_link_active(&buf->b_arc_node)); | |
2231 | } | |
2232 | ARCSTAT_BUMP(arcstat_mfu_hits); | |
2233 | buf->b_arc_access = lbolt; | |
2234 | } else if (buf->b_state == arc_mfu_ghost) { | |
2235 | arc_state_t *new_state = arc_mfu; | |
2236 | /* | |
2237 | * This buffer has been accessed more than once but has | |
2238 | * been evicted from the cache. Move it back to the | |
2239 | * MFU state. | |
2240 | */ | |
2241 | ||
2242 | if (buf->b_flags & ARC_PREFETCH) { | |
2243 | /* | |
2244 | * This is a prefetch access... | |
2245 | * move this block back to the MRU state. | |
2246 | */ | |
2247 | ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); | |
2248 | new_state = arc_mru; | |
2249 | } | |
2250 | ||
2251 | buf->b_arc_access = lbolt; | |
2252 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2253 | arc_change_state(new_state, buf, hash_lock); | |
2254 | ||
2255 | ARCSTAT_BUMP(arcstat_mfu_ghost_hits); | |
2256 | } else if (buf->b_state == arc_l2c_only) { | |
2257 | /* | |
2258 | * This buffer is on the 2nd Level ARC. | |
2259 | */ | |
2260 | ||
2261 | buf->b_arc_access = lbolt; | |
2262 | DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); | |
2263 | arc_change_state(arc_mfu, buf, hash_lock); | |
2264 | } else { | |
2265 | ASSERT(!"invalid arc state"); | |
2266 | } | |
2267 | } | |
2268 | ||
2269 | /* a generic arc_done_func_t which you can use */ | |
2270 | /* ARGSUSED */ | |
2271 | void | |
2272 | arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2273 | { | |
2274 | bcopy(buf->b_data, arg, buf->b_hdr->b_size); | |
2275 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2276 | } | |
2277 | ||
2278 | /* a generic arc_done_func_t */ | |
2279 | void | |
2280 | arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) | |
2281 | { | |
2282 | arc_buf_t **bufp = arg; | |
2283 | if (zio && zio->io_error) { | |
2284 | VERIFY(arc_buf_remove_ref(buf, arg) == 1); | |
2285 | *bufp = NULL; | |
2286 | } else { | |
2287 | *bufp = buf; | |
2288 | } | |
2289 | } | |
2290 | ||
2291 | static void | |
2292 | arc_read_done(zio_t *zio) | |
2293 | { | |
2294 | arc_buf_hdr_t *hdr, *found; | |
2295 | arc_buf_t *buf; | |
2296 | arc_buf_t *abuf; /* buffer we're assigning to callback */ | |
2297 | kmutex_t *hash_lock; | |
2298 | arc_callback_t *callback_list, *acb; | |
2299 | int freeable = FALSE; | |
2300 | ||
2301 | buf = zio->io_private; | |
2302 | hdr = buf->b_hdr; | |
2303 | ||
2304 | /* | |
2305 | * The hdr was inserted into hash-table and removed from lists | |
2306 | * prior to starting I/O. We should find this header, since | |
2307 | * it's in the hash table, and it should be legit since it's | |
2308 | * not possible to evict it during the I/O. The only possible | |
2309 | * reason for it not to be found is if we were freed during the | |
2310 | * read. | |
2311 | */ | |
2312 | found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, | |
2313 | &hash_lock); | |
2314 | ||
2315 | ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || | |
2316 | (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || | |
2317 | (found == hdr && HDR_L2_READING(hdr))); | |
2318 | ||
b128c09f | 2319 | hdr->b_flags &= ~ARC_L2_EVICTED; |
34dc7c2f | 2320 | if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) |
b128c09f | 2321 | hdr->b_flags &= ~ARC_L2CACHE; |
34dc7c2f BB |
2322 | |
2323 | /* byteswap if necessary */ | |
2324 | callback_list = hdr->b_acb; | |
2325 | ASSERT(callback_list != NULL); | |
b128c09f BB |
2326 | if (BP_SHOULD_BYTESWAP(zio->io_bp)) { |
2327 | arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? | |
2328 | byteswap_uint64_array : | |
2329 | dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; | |
2330 | func(buf->b_data, hdr->b_size); | |
2331 | } | |
34dc7c2f BB |
2332 | |
2333 | arc_cksum_compute(buf, B_FALSE); | |
2334 | ||
2335 | /* create copies of the data buffer for the callers */ | |
2336 | abuf = buf; | |
2337 | for (acb = callback_list; acb; acb = acb->acb_next) { | |
2338 | if (acb->acb_done) { | |
2339 | if (abuf == NULL) | |
2340 | abuf = arc_buf_clone(buf); | |
2341 | acb->acb_buf = abuf; | |
2342 | abuf = NULL; | |
2343 | } | |
2344 | } | |
2345 | hdr->b_acb = NULL; | |
2346 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
2347 | ASSERT(!HDR_BUF_AVAILABLE(hdr)); | |
2348 | if (abuf == buf) | |
2349 | hdr->b_flags |= ARC_BUF_AVAILABLE; | |
2350 | ||
2351 | ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); | |
2352 | ||
2353 | if (zio->io_error != 0) { | |
2354 | hdr->b_flags |= ARC_IO_ERROR; | |
2355 | if (hdr->b_state != arc_anon) | |
2356 | arc_change_state(arc_anon, hdr, hash_lock); | |
2357 | if (HDR_IN_HASH_TABLE(hdr)) | |
2358 | buf_hash_remove(hdr); | |
2359 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
34dc7c2f BB |
2360 | } |
2361 | ||
2362 | /* | |
2363 | * Broadcast before we drop the hash_lock to avoid the possibility | |
2364 | * that the hdr (and hence the cv) might be freed before we get to | |
2365 | * the cv_broadcast(). | |
2366 | */ | |
2367 | cv_broadcast(&hdr->b_cv); | |
2368 | ||
2369 | if (hash_lock) { | |
2370 | /* | |
2371 | * Only call arc_access on anonymous buffers. This is because | |
2372 | * if we've issued an I/O for an evicted buffer, we've already | |
2373 | * called arc_access (to prevent any simultaneous readers from | |
2374 | * getting confused). | |
2375 | */ | |
2376 | if (zio->io_error == 0 && hdr->b_state == arc_anon) | |
2377 | arc_access(hdr, hash_lock); | |
2378 | mutex_exit(hash_lock); | |
2379 | } else { | |
2380 | /* | |
2381 | * This block was freed while we waited for the read to | |
2382 | * complete. It has been removed from the hash table and | |
2383 | * moved to the anonymous state (so that it won't show up | |
2384 | * in the cache). | |
2385 | */ | |
2386 | ASSERT3P(hdr->b_state, ==, arc_anon); | |
2387 | freeable = refcount_is_zero(&hdr->b_refcnt); | |
2388 | } | |
2389 | ||
2390 | /* execute each callback and free its structure */ | |
2391 | while ((acb = callback_list) != NULL) { | |
2392 | if (acb->acb_done) | |
2393 | acb->acb_done(zio, acb->acb_buf, acb->acb_private); | |
2394 | ||
2395 | if (acb->acb_zio_dummy != NULL) { | |
2396 | acb->acb_zio_dummy->io_error = zio->io_error; | |
2397 | zio_nowait(acb->acb_zio_dummy); | |
2398 | } | |
2399 | ||
2400 | callback_list = acb->acb_next; | |
2401 | kmem_free(acb, sizeof (arc_callback_t)); | |
2402 | } | |
2403 | ||
2404 | if (freeable) | |
2405 | arc_hdr_destroy(hdr); | |
2406 | } | |
2407 | ||
2408 | /* | |
2409 | * "Read" the block block at the specified DVA (in bp) via the | |
2410 | * cache. If the block is found in the cache, invoke the provided | |
2411 | * callback immediately and return. Note that the `zio' parameter | |
2412 | * in the callback will be NULL in this case, since no IO was | |
2413 | * required. If the block is not in the cache pass the read request | |
2414 | * on to the spa with a substitute callback function, so that the | |
2415 | * requested block will be added to the cache. | |
2416 | * | |
2417 | * If a read request arrives for a block that has a read in-progress, | |
2418 | * either wait for the in-progress read to complete (and return the | |
2419 | * results); or, if this is a read with a "done" func, add a record | |
2420 | * to the read to invoke the "done" func when the read completes, | |
2421 | * and return; or just return. | |
2422 | * | |
2423 | * arc_read_done() will invoke all the requested "done" functions | |
2424 | * for readers of this block. | |
b128c09f BB |
2425 | * |
2426 | * Normal callers should use arc_read and pass the arc buffer and offset | |
2427 | * for the bp. But if you know you don't need locking, you can use | |
2428 | * arc_read_bp. | |
34dc7c2f BB |
2429 | */ |
2430 | int | |
b128c09f BB |
2431 | arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, |
2432 | arc_done_func_t *done, void *private, int priority, int zio_flags, | |
2433 | uint32_t *arc_flags, const zbookmark_t *zb) | |
2434 | { | |
2435 | int err; | |
2436 | arc_buf_hdr_t *hdr = pbuf->b_hdr; | |
2437 | ||
2438 | ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); | |
2439 | ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); | |
2440 | rw_enter(&pbuf->b_lock, RW_READER); | |
2441 | ||
2442 | err = arc_read_nolock(pio, spa, bp, done, private, priority, | |
2443 | zio_flags, arc_flags, zb); | |
2444 | ||
2445 | ASSERT3P(hdr, ==, pbuf->b_hdr); | |
2446 | rw_exit(&pbuf->b_lock); | |
2447 | return (err); | |
2448 | } | |
2449 | ||
2450 | int | |
2451 | arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, | |
2452 | arc_done_func_t *done, void *private, int priority, int zio_flags, | |
2453 | uint32_t *arc_flags, const zbookmark_t *zb) | |
34dc7c2f BB |
2454 | { |
2455 | arc_buf_hdr_t *hdr; | |
2456 | arc_buf_t *buf; | |
2457 | kmutex_t *hash_lock; | |
2458 | zio_t *rzio; | |
2459 | ||
2460 | top: | |
2461 | hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); | |
2462 | if (hdr && hdr->b_datacnt > 0) { | |
2463 | ||
2464 | *arc_flags |= ARC_CACHED; | |
2465 | ||
2466 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
2467 | ||
2468 | if (*arc_flags & ARC_WAIT) { | |
2469 | cv_wait(&hdr->b_cv, hash_lock); | |
2470 | mutex_exit(hash_lock); | |
2471 | goto top; | |
2472 | } | |
2473 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2474 | ||
2475 | if (done) { | |
2476 | arc_callback_t *acb = NULL; | |
2477 | ||
2478 | acb = kmem_zalloc(sizeof (arc_callback_t), | |
2479 | KM_SLEEP); | |
2480 | acb->acb_done = done; | |
2481 | acb->acb_private = private; | |
34dc7c2f BB |
2482 | if (pio != NULL) |
2483 | acb->acb_zio_dummy = zio_null(pio, | |
b128c09f | 2484 | spa, NULL, NULL, zio_flags); |
34dc7c2f BB |
2485 | |
2486 | ASSERT(acb->acb_done != NULL); | |
2487 | acb->acb_next = hdr->b_acb; | |
2488 | hdr->b_acb = acb; | |
2489 | add_reference(hdr, hash_lock, private); | |
2490 | mutex_exit(hash_lock); | |
2491 | return (0); | |
2492 | } | |
2493 | mutex_exit(hash_lock); | |
2494 | return (0); | |
2495 | } | |
2496 | ||
2497 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2498 | ||
2499 | if (done) { | |
2500 | add_reference(hdr, hash_lock, private); | |
2501 | /* | |
2502 | * If this block is already in use, create a new | |
2503 | * copy of the data so that we will be guaranteed | |
2504 | * that arc_release() will always succeed. | |
2505 | */ | |
2506 | buf = hdr->b_buf; | |
2507 | ASSERT(buf); | |
2508 | ASSERT(buf->b_data); | |
2509 | if (HDR_BUF_AVAILABLE(hdr)) { | |
2510 | ASSERT(buf->b_efunc == NULL); | |
2511 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2512 | } else { | |
2513 | buf = arc_buf_clone(buf); | |
2514 | } | |
2515 | } else if (*arc_flags & ARC_PREFETCH && | |
2516 | refcount_count(&hdr->b_refcnt) == 0) { | |
2517 | hdr->b_flags |= ARC_PREFETCH; | |
2518 | } | |
2519 | DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); | |
2520 | arc_access(hdr, hash_lock); | |
b128c09f BB |
2521 | if (*arc_flags & ARC_L2CACHE) |
2522 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2523 | mutex_exit(hash_lock); |
2524 | ARCSTAT_BUMP(arcstat_hits); | |
2525 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2526 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2527 | data, metadata, hits); | |
2528 | ||
2529 | if (done) | |
2530 | done(NULL, buf, private); | |
2531 | } else { | |
2532 | uint64_t size = BP_GET_LSIZE(bp); | |
2533 | arc_callback_t *acb; | |
b128c09f BB |
2534 | vdev_t *vd = NULL; |
2535 | daddr_t addr; | |
34dc7c2f BB |
2536 | |
2537 | if (hdr == NULL) { | |
2538 | /* this block is not in the cache */ | |
2539 | arc_buf_hdr_t *exists; | |
2540 | arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); | |
2541 | buf = arc_buf_alloc(spa, size, private, type); | |
2542 | hdr = buf->b_hdr; | |
2543 | hdr->b_dva = *BP_IDENTITY(bp); | |
2544 | hdr->b_birth = bp->blk_birth; | |
2545 | hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; | |
2546 | exists = buf_hash_insert(hdr, &hash_lock); | |
2547 | if (exists) { | |
2548 | /* somebody beat us to the hash insert */ | |
2549 | mutex_exit(hash_lock); | |
2550 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
2551 | hdr->b_birth = 0; | |
2552 | hdr->b_cksum0 = 0; | |
2553 | (void) arc_buf_remove_ref(buf, private); | |
2554 | goto top; /* restart the IO request */ | |
2555 | } | |
2556 | /* if this is a prefetch, we don't have a reference */ | |
2557 | if (*arc_flags & ARC_PREFETCH) { | |
2558 | (void) remove_reference(hdr, hash_lock, | |
2559 | private); | |
2560 | hdr->b_flags |= ARC_PREFETCH; | |
2561 | } | |
b128c09f BB |
2562 | if (*arc_flags & ARC_L2CACHE) |
2563 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2564 | if (BP_GET_LEVEL(bp) > 0) |
2565 | hdr->b_flags |= ARC_INDIRECT; | |
2566 | } else { | |
2567 | /* this block is in the ghost cache */ | |
2568 | ASSERT(GHOST_STATE(hdr->b_state)); | |
2569 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2570 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); | |
2571 | ASSERT(hdr->b_buf == NULL); | |
2572 | ||
2573 | /* if this is a prefetch, we don't have a reference */ | |
2574 | if (*arc_flags & ARC_PREFETCH) | |
2575 | hdr->b_flags |= ARC_PREFETCH; | |
2576 | else | |
2577 | add_reference(hdr, hash_lock, private); | |
b128c09f BB |
2578 | if (*arc_flags & ARC_L2CACHE) |
2579 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
2580 | buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); |
2581 | buf->b_hdr = hdr; | |
2582 | buf->b_data = NULL; | |
2583 | buf->b_efunc = NULL; | |
2584 | buf->b_private = NULL; | |
2585 | buf->b_next = NULL; | |
2586 | hdr->b_buf = buf; | |
2587 | arc_get_data_buf(buf); | |
2588 | ASSERT(hdr->b_datacnt == 0); | |
2589 | hdr->b_datacnt = 1; | |
2590 | ||
2591 | } | |
2592 | ||
2593 | acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); | |
2594 | acb->acb_done = done; | |
2595 | acb->acb_private = private; | |
34dc7c2f BB |
2596 | |
2597 | ASSERT(hdr->b_acb == NULL); | |
2598 | hdr->b_acb = acb; | |
2599 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
2600 | ||
2601 | /* | |
2602 | * If the buffer has been evicted, migrate it to a present state | |
2603 | * before issuing the I/O. Once we drop the hash-table lock, | |
2604 | * the header will be marked as I/O in progress and have an | |
2605 | * attached buffer. At this point, anybody who finds this | |
2606 | * buffer ought to notice that it's legit but has a pending I/O. | |
2607 | */ | |
2608 | ||
2609 | if (GHOST_STATE(hdr->b_state)) | |
2610 | arc_access(hdr, hash_lock); | |
2611 | ||
b128c09f BB |
2612 | if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && |
2613 | (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { | |
2614 | addr = hdr->b_l2hdr->b_daddr; | |
2615 | /* | |
2616 | * Lock out device removal. | |
2617 | */ | |
2618 | if (vdev_is_dead(vd) || | |
2619 | !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) | |
2620 | vd = NULL; | |
2621 | } | |
2622 | ||
2623 | mutex_exit(hash_lock); | |
2624 | ||
34dc7c2f BB |
2625 | ASSERT3U(hdr->b_size, ==, size); |
2626 | DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, | |
2627 | zbookmark_t *, zb); | |
2628 | ARCSTAT_BUMP(arcstat_misses); | |
2629 | ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), | |
2630 | demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, | |
2631 | data, metadata, misses); | |
2632 | ||
b128c09f | 2633 | if (vd != NULL) { |
34dc7c2f BB |
2634 | /* |
2635 | * Read from the L2ARC if the following are true: | |
b128c09f BB |
2636 | * 1. The L2ARC vdev was previously cached. |
2637 | * 2. This buffer still has L2ARC metadata. | |
2638 | * 3. This buffer isn't currently writing to the L2ARC. | |
2639 | * 4. The L2ARC entry wasn't evicted, which may | |
2640 | * also have invalidated the vdev. | |
34dc7c2f | 2641 | */ |
b128c09f BB |
2642 | if (hdr->b_l2hdr != NULL && |
2643 | !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { | |
34dc7c2f BB |
2644 | l2arc_read_callback_t *cb; |
2645 | ||
2646 | DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); | |
2647 | ARCSTAT_BUMP(arcstat_l2_hits); | |
2648 | ||
34dc7c2f BB |
2649 | cb = kmem_zalloc(sizeof (l2arc_read_callback_t), |
2650 | KM_SLEEP); | |
2651 | cb->l2rcb_buf = buf; | |
2652 | cb->l2rcb_spa = spa; | |
2653 | cb->l2rcb_bp = *bp; | |
2654 | cb->l2rcb_zb = *zb; | |
b128c09f | 2655 | cb->l2rcb_flags = zio_flags; |
34dc7c2f BB |
2656 | |
2657 | /* | |
b128c09f BB |
2658 | * l2arc read. The SCL_L2ARC lock will be |
2659 | * released by l2arc_read_done(). | |
34dc7c2f BB |
2660 | */ |
2661 | rzio = zio_read_phys(pio, vd, addr, size, | |
2662 | buf->b_data, ZIO_CHECKSUM_OFF, | |
b128c09f BB |
2663 | l2arc_read_done, cb, priority, zio_flags | |
2664 | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | | |
2665 | ZIO_FLAG_DONT_PROPAGATE | | |
2666 | ZIO_FLAG_DONT_RETRY, B_FALSE); | |
34dc7c2f BB |
2667 | DTRACE_PROBE2(l2arc__read, vdev_t *, vd, |
2668 | zio_t *, rzio); | |
2669 | ||
b128c09f BB |
2670 | if (*arc_flags & ARC_NOWAIT) { |
2671 | zio_nowait(rzio); | |
2672 | return (0); | |
2673 | } | |
34dc7c2f | 2674 | |
b128c09f BB |
2675 | ASSERT(*arc_flags & ARC_WAIT); |
2676 | if (zio_wait(rzio) == 0) | |
2677 | return (0); | |
2678 | ||
2679 | /* l2arc read error; goto zio_read() */ | |
34dc7c2f BB |
2680 | } else { |
2681 | DTRACE_PROBE1(l2arc__miss, | |
2682 | arc_buf_hdr_t *, hdr); | |
2683 | ARCSTAT_BUMP(arcstat_l2_misses); | |
2684 | if (HDR_L2_WRITING(hdr)) | |
2685 | ARCSTAT_BUMP(arcstat_l2_rw_clash); | |
b128c09f | 2686 | spa_config_exit(spa, SCL_L2ARC, vd); |
34dc7c2f BB |
2687 | } |
2688 | } | |
34dc7c2f BB |
2689 | |
2690 | rzio = zio_read(pio, spa, bp, buf->b_data, size, | |
b128c09f | 2691 | arc_read_done, buf, priority, zio_flags, zb); |
34dc7c2f BB |
2692 | |
2693 | if (*arc_flags & ARC_WAIT) | |
2694 | return (zio_wait(rzio)); | |
2695 | ||
2696 | ASSERT(*arc_flags & ARC_NOWAIT); | |
2697 | zio_nowait(rzio); | |
2698 | } | |
2699 | return (0); | |
2700 | } | |
2701 | ||
2702 | /* | |
2703 | * arc_read() variant to support pool traversal. If the block is already | |
2704 | * in the ARC, make a copy of it; otherwise, the caller will do the I/O. | |
2705 | * The idea is that we don't want pool traversal filling up memory, but | |
2706 | * if the ARC already has the data anyway, we shouldn't pay for the I/O. | |
2707 | */ | |
2708 | int | |
2709 | arc_tryread(spa_t *spa, blkptr_t *bp, void *data) | |
2710 | { | |
2711 | arc_buf_hdr_t *hdr; | |
2712 | kmutex_t *hash_mtx; | |
2713 | int rc = 0; | |
2714 | ||
2715 | hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); | |
2716 | ||
2717 | if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { | |
2718 | arc_buf_t *buf = hdr->b_buf; | |
2719 | ||
2720 | ASSERT(buf); | |
2721 | while (buf->b_data == NULL) { | |
2722 | buf = buf->b_next; | |
2723 | ASSERT(buf); | |
2724 | } | |
2725 | bcopy(buf->b_data, data, hdr->b_size); | |
2726 | } else { | |
2727 | rc = ENOENT; | |
2728 | } | |
2729 | ||
2730 | if (hash_mtx) | |
2731 | mutex_exit(hash_mtx); | |
2732 | ||
2733 | return (rc); | |
2734 | } | |
2735 | ||
2736 | void | |
2737 | arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) | |
2738 | { | |
2739 | ASSERT(buf->b_hdr != NULL); | |
2740 | ASSERT(buf->b_hdr->b_state != arc_anon); | |
2741 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); | |
2742 | buf->b_efunc = func; | |
2743 | buf->b_private = private; | |
2744 | } | |
2745 | ||
2746 | /* | |
2747 | * This is used by the DMU to let the ARC know that a buffer is | |
2748 | * being evicted, so the ARC should clean up. If this arc buf | |
2749 | * is not yet in the evicted state, it will be put there. | |
2750 | */ | |
2751 | int | |
2752 | arc_buf_evict(arc_buf_t *buf) | |
2753 | { | |
2754 | arc_buf_hdr_t *hdr; | |
2755 | kmutex_t *hash_lock; | |
2756 | arc_buf_t **bufp; | |
2757 | ||
b128c09f | 2758 | rw_enter(&buf->b_lock, RW_WRITER); |
34dc7c2f BB |
2759 | hdr = buf->b_hdr; |
2760 | if (hdr == NULL) { | |
2761 | /* | |
2762 | * We are in arc_do_user_evicts(). | |
2763 | */ | |
2764 | ASSERT(buf->b_data == NULL); | |
b128c09f | 2765 | rw_exit(&buf->b_lock); |
34dc7c2f | 2766 | return (0); |
b128c09f BB |
2767 | } else if (buf->b_data == NULL) { |
2768 | arc_buf_t copy = *buf; /* structure assignment */ | |
34dc7c2f | 2769 | /* |
b128c09f BB |
2770 | * We are on the eviction list; process this buffer now |
2771 | * but let arc_do_user_evicts() do the reaping. | |
34dc7c2f | 2772 | */ |
b128c09f BB |
2773 | buf->b_efunc = NULL; |
2774 | rw_exit(&buf->b_lock); | |
2775 | VERIFY(copy.b_efunc(©) == 0); | |
2776 | return (1); | |
34dc7c2f | 2777 | } |
b128c09f BB |
2778 | hash_lock = HDR_LOCK(hdr); |
2779 | mutex_enter(hash_lock); | |
34dc7c2f BB |
2780 | |
2781 | ASSERT(buf->b_hdr == hdr); | |
2782 | ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); | |
2783 | ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); | |
2784 | ||
2785 | /* | |
2786 | * Pull this buffer off of the hdr | |
2787 | */ | |
2788 | bufp = &hdr->b_buf; | |
2789 | while (*bufp != buf) | |
2790 | bufp = &(*bufp)->b_next; | |
2791 | *bufp = buf->b_next; | |
2792 | ||
2793 | ASSERT(buf->b_data != NULL); | |
2794 | arc_buf_destroy(buf, FALSE, FALSE); | |
2795 | ||
2796 | if (hdr->b_datacnt == 0) { | |
2797 | arc_state_t *old_state = hdr->b_state; | |
2798 | arc_state_t *evicted_state; | |
2799 | ||
2800 | ASSERT(refcount_is_zero(&hdr->b_refcnt)); | |
2801 | ||
2802 | evicted_state = | |
2803 | (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; | |
2804 | ||
2805 | mutex_enter(&old_state->arcs_mtx); | |
2806 | mutex_enter(&evicted_state->arcs_mtx); | |
2807 | ||
2808 | arc_change_state(evicted_state, hdr, hash_lock); | |
2809 | ASSERT(HDR_IN_HASH_TABLE(hdr)); | |
2810 | hdr->b_flags |= ARC_IN_HASH_TABLE; | |
2811 | hdr->b_flags &= ~ARC_BUF_AVAILABLE; | |
2812 | ||
2813 | mutex_exit(&evicted_state->arcs_mtx); | |
2814 | mutex_exit(&old_state->arcs_mtx); | |
2815 | } | |
2816 | mutex_exit(hash_lock); | |
b128c09f | 2817 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2818 | |
2819 | VERIFY(buf->b_efunc(buf) == 0); | |
2820 | buf->b_efunc = NULL; | |
2821 | buf->b_private = NULL; | |
2822 | buf->b_hdr = NULL; | |
2823 | kmem_cache_free(buf_cache, buf); | |
2824 | return (1); | |
2825 | } | |
2826 | ||
2827 | /* | |
2828 | * Release this buffer from the cache. This must be done | |
2829 | * after a read and prior to modifying the buffer contents. | |
2830 | * If the buffer has more than one reference, we must make | |
b128c09f | 2831 | * a new hdr for the buffer. |
34dc7c2f BB |
2832 | */ |
2833 | void | |
2834 | arc_release(arc_buf_t *buf, void *tag) | |
2835 | { | |
b128c09f BB |
2836 | arc_buf_hdr_t *hdr; |
2837 | kmutex_t *hash_lock; | |
2838 | l2arc_buf_hdr_t *l2hdr; | |
34dc7c2f BB |
2839 | uint64_t buf_size; |
2840 | ||
b128c09f BB |
2841 | rw_enter(&buf->b_lock, RW_WRITER); |
2842 | hdr = buf->b_hdr; | |
2843 | ||
34dc7c2f BB |
2844 | /* this buffer is not on any list */ |
2845 | ASSERT(refcount_count(&hdr->b_refcnt) > 0); | |
b128c09f | 2846 | ASSERT(!(hdr->b_flags & ARC_STORED)); |
34dc7c2f BB |
2847 | |
2848 | if (hdr->b_state == arc_anon) { | |
2849 | /* this buffer is already released */ | |
2850 | ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); | |
2851 | ASSERT(BUF_EMPTY(hdr)); | |
2852 | ASSERT(buf->b_efunc == NULL); | |
2853 | arc_buf_thaw(buf); | |
b128c09f | 2854 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2855 | return; |
2856 | } | |
2857 | ||
b128c09f | 2858 | hash_lock = HDR_LOCK(hdr); |
34dc7c2f BB |
2859 | mutex_enter(hash_lock); |
2860 | ||
b128c09f BB |
2861 | l2hdr = hdr->b_l2hdr; |
2862 | if (l2hdr) { | |
2863 | mutex_enter(&l2arc_buflist_mtx); | |
2864 | hdr->b_l2hdr = NULL; | |
2865 | buf_size = hdr->b_size; | |
2866 | } | |
2867 | ||
34dc7c2f BB |
2868 | /* |
2869 | * Do we have more than one buf? | |
2870 | */ | |
b128c09f | 2871 | if (hdr->b_datacnt > 1) { |
34dc7c2f BB |
2872 | arc_buf_hdr_t *nhdr; |
2873 | arc_buf_t **bufp; | |
2874 | uint64_t blksz = hdr->b_size; | |
2875 | spa_t *spa = hdr->b_spa; | |
2876 | arc_buf_contents_t type = hdr->b_type; | |
2877 | uint32_t flags = hdr->b_flags; | |
2878 | ||
b128c09f | 2879 | ASSERT(hdr->b_buf != buf || buf->b_next != NULL); |
34dc7c2f BB |
2880 | /* |
2881 | * Pull the data off of this buf and attach it to | |
2882 | * a new anonymous buf. | |
2883 | */ | |
2884 | (void) remove_reference(hdr, hash_lock, tag); | |
2885 | bufp = &hdr->b_buf; | |
2886 | while (*bufp != buf) | |
2887 | bufp = &(*bufp)->b_next; | |
2888 | *bufp = (*bufp)->b_next; | |
2889 | buf->b_next = NULL; | |
2890 | ||
2891 | ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); | |
2892 | atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); | |
2893 | if (refcount_is_zero(&hdr->b_refcnt)) { | |
2894 | uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; | |
2895 | ASSERT3U(*size, >=, hdr->b_size); | |
2896 | atomic_add_64(size, -hdr->b_size); | |
2897 | } | |
2898 | hdr->b_datacnt -= 1; | |
34dc7c2f BB |
2899 | arc_cksum_verify(buf); |
2900 | ||
2901 | mutex_exit(hash_lock); | |
2902 | ||
2903 | nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
2904 | nhdr->b_size = blksz; | |
2905 | nhdr->b_spa = spa; | |
2906 | nhdr->b_type = type; | |
2907 | nhdr->b_buf = buf; | |
2908 | nhdr->b_state = arc_anon; | |
2909 | nhdr->b_arc_access = 0; | |
2910 | nhdr->b_flags = flags & ARC_L2_WRITING; | |
2911 | nhdr->b_l2hdr = NULL; | |
2912 | nhdr->b_datacnt = 1; | |
2913 | nhdr->b_freeze_cksum = NULL; | |
2914 | (void) refcount_add(&nhdr->b_refcnt, tag); | |
2915 | buf->b_hdr = nhdr; | |
b128c09f | 2916 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2917 | atomic_add_64(&arc_anon->arcs_size, blksz); |
2918 | } else { | |
b128c09f | 2919 | rw_exit(&buf->b_lock); |
34dc7c2f BB |
2920 | ASSERT(refcount_count(&hdr->b_refcnt) == 1); |
2921 | ASSERT(!list_link_active(&hdr->b_arc_node)); | |
2922 | ASSERT(!HDR_IO_IN_PROGRESS(hdr)); | |
2923 | arc_change_state(arc_anon, hdr, hash_lock); | |
2924 | hdr->b_arc_access = 0; | |
34dc7c2f BB |
2925 | mutex_exit(hash_lock); |
2926 | ||
2927 | bzero(&hdr->b_dva, sizeof (dva_t)); | |
2928 | hdr->b_birth = 0; | |
2929 | hdr->b_cksum0 = 0; | |
2930 | arc_buf_thaw(buf); | |
2931 | } | |
2932 | buf->b_efunc = NULL; | |
2933 | buf->b_private = NULL; | |
2934 | ||
2935 | if (l2hdr) { | |
2936 | list_remove(l2hdr->b_dev->l2ad_buflist, hdr); | |
2937 | kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); | |
2938 | ARCSTAT_INCR(arcstat_l2_size, -buf_size); | |
34dc7c2f | 2939 | mutex_exit(&l2arc_buflist_mtx); |
b128c09f | 2940 | } |
34dc7c2f BB |
2941 | } |
2942 | ||
2943 | int | |
2944 | arc_released(arc_buf_t *buf) | |
2945 | { | |
b128c09f BB |
2946 | int released; |
2947 | ||
2948 | rw_enter(&buf->b_lock, RW_READER); | |
2949 | released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); | |
2950 | rw_exit(&buf->b_lock); | |
2951 | return (released); | |
34dc7c2f BB |
2952 | } |
2953 | ||
2954 | int | |
2955 | arc_has_callback(arc_buf_t *buf) | |
2956 | { | |
b128c09f BB |
2957 | int callback; |
2958 | ||
2959 | rw_enter(&buf->b_lock, RW_READER); | |
2960 | callback = (buf->b_efunc != NULL); | |
2961 | rw_exit(&buf->b_lock); | |
2962 | return (callback); | |
34dc7c2f BB |
2963 | } |
2964 | ||
2965 | #ifdef ZFS_DEBUG | |
2966 | int | |
2967 | arc_referenced(arc_buf_t *buf) | |
2968 | { | |
b128c09f BB |
2969 | int referenced; |
2970 | ||
2971 | rw_enter(&buf->b_lock, RW_READER); | |
2972 | referenced = (refcount_count(&buf->b_hdr->b_refcnt)); | |
2973 | rw_exit(&buf->b_lock); | |
2974 | return (referenced); | |
34dc7c2f BB |
2975 | } |
2976 | #endif | |
2977 | ||
2978 | static void | |
2979 | arc_write_ready(zio_t *zio) | |
2980 | { | |
2981 | arc_write_callback_t *callback = zio->io_private; | |
2982 | arc_buf_t *buf = callback->awcb_buf; | |
2983 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
2984 | ||
b128c09f BB |
2985 | ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); |
2986 | callback->awcb_ready(zio, buf, callback->awcb_private); | |
2987 | ||
34dc7c2f BB |
2988 | /* |
2989 | * If the IO is already in progress, then this is a re-write | |
b128c09f BB |
2990 | * attempt, so we need to thaw and re-compute the cksum. |
2991 | * It is the responsibility of the callback to handle the | |
2992 | * accounting for any re-write attempt. | |
34dc7c2f BB |
2993 | */ |
2994 | if (HDR_IO_IN_PROGRESS(hdr)) { | |
34dc7c2f BB |
2995 | mutex_enter(&hdr->b_freeze_lock); |
2996 | if (hdr->b_freeze_cksum != NULL) { | |
2997 | kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); | |
2998 | hdr->b_freeze_cksum = NULL; | |
2999 | } | |
3000 | mutex_exit(&hdr->b_freeze_lock); | |
3001 | } | |
3002 | arc_cksum_compute(buf, B_FALSE); | |
3003 | hdr->b_flags |= ARC_IO_IN_PROGRESS; | |
3004 | } | |
3005 | ||
3006 | static void | |
3007 | arc_write_done(zio_t *zio) | |
3008 | { | |
3009 | arc_write_callback_t *callback = zio->io_private; | |
3010 | arc_buf_t *buf = callback->awcb_buf; | |
3011 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3012 | ||
3013 | hdr->b_acb = NULL; | |
3014 | ||
34dc7c2f BB |
3015 | hdr->b_dva = *BP_IDENTITY(zio->io_bp); |
3016 | hdr->b_birth = zio->io_bp->blk_birth; | |
3017 | hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; | |
3018 | /* | |
3019 | * If the block to be written was all-zero, we may have | |
3020 | * compressed it away. In this case no write was performed | |
3021 | * so there will be no dva/birth-date/checksum. The buffer | |
3022 | * must therefor remain anonymous (and uncached). | |
3023 | */ | |
3024 | if (!BUF_EMPTY(hdr)) { | |
3025 | arc_buf_hdr_t *exists; | |
3026 | kmutex_t *hash_lock; | |
3027 | ||
3028 | arc_cksum_verify(buf); | |
3029 | ||
3030 | exists = buf_hash_insert(hdr, &hash_lock); | |
3031 | if (exists) { | |
3032 | /* | |
3033 | * This can only happen if we overwrite for | |
3034 | * sync-to-convergence, because we remove | |
3035 | * buffers from the hash table when we arc_free(). | |
3036 | */ | |
b128c09f | 3037 | ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); |
34dc7c2f BB |
3038 | ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), |
3039 | BP_IDENTITY(zio->io_bp))); | |
3040 | ASSERT3U(zio->io_bp_orig.blk_birth, ==, | |
3041 | zio->io_bp->blk_birth); | |
3042 | ||
3043 | ASSERT(refcount_is_zero(&exists->b_refcnt)); | |
3044 | arc_change_state(arc_anon, exists, hash_lock); | |
3045 | mutex_exit(hash_lock); | |
3046 | arc_hdr_destroy(exists); | |
3047 | exists = buf_hash_insert(hdr, &hash_lock); | |
3048 | ASSERT3P(exists, ==, NULL); | |
3049 | } | |
3050 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
b128c09f BB |
3051 | /* if it's not anon, we are doing a scrub */ |
3052 | if (hdr->b_state == arc_anon) | |
3053 | arc_access(hdr, hash_lock); | |
34dc7c2f BB |
3054 | mutex_exit(hash_lock); |
3055 | } else if (callback->awcb_done == NULL) { | |
3056 | int destroy_hdr; | |
3057 | /* | |
3058 | * This is an anonymous buffer with no user callback, | |
3059 | * destroy it if there are no active references. | |
3060 | */ | |
3061 | mutex_enter(&arc_eviction_mtx); | |
3062 | destroy_hdr = refcount_is_zero(&hdr->b_refcnt); | |
3063 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
3064 | mutex_exit(&arc_eviction_mtx); | |
3065 | if (destroy_hdr) | |
3066 | arc_hdr_destroy(hdr); | |
3067 | } else { | |
3068 | hdr->b_flags &= ~ARC_IO_IN_PROGRESS; | |
3069 | } | |
b128c09f | 3070 | hdr->b_flags &= ~ARC_STORED; |
34dc7c2f BB |
3071 | |
3072 | if (callback->awcb_done) { | |
3073 | ASSERT(!refcount_is_zero(&hdr->b_refcnt)); | |
3074 | callback->awcb_done(zio, buf, callback->awcb_private); | |
3075 | } | |
3076 | ||
3077 | kmem_free(callback, sizeof (arc_write_callback_t)); | |
3078 | } | |
3079 | ||
b128c09f BB |
3080 | void |
3081 | write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) | |
3082 | { | |
3083 | boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); | |
3084 | ||
3085 | /* Determine checksum setting */ | |
3086 | if (ismd) { | |
3087 | /* | |
3088 | * Metadata always gets checksummed. If the data | |
3089 | * checksum is multi-bit correctable, and it's not a | |
3090 | * ZBT-style checksum, then it's suitable for metadata | |
3091 | * as well. Otherwise, the metadata checksum defaults | |
3092 | * to fletcher4. | |
3093 | */ | |
3094 | if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && | |
3095 | !zio_checksum_table[wp->wp_oschecksum].ci_zbt) | |
3096 | zp->zp_checksum = wp->wp_oschecksum; | |
3097 | else | |
3098 | zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; | |
3099 | } else { | |
3100 | zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, | |
3101 | wp->wp_oschecksum); | |
3102 | } | |
3103 | ||
3104 | /* Determine compression setting */ | |
3105 | if (ismd) { | |
3106 | /* | |
3107 | * XXX -- we should design a compression algorithm | |
3108 | * that specializes in arrays of bps. | |
3109 | */ | |
3110 | zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : | |
3111 | ZIO_COMPRESS_LZJB; | |
3112 | } else { | |
3113 | zp->zp_compress = zio_compress_select(wp->wp_dncompress, | |
3114 | wp->wp_oscompress); | |
3115 | } | |
3116 | ||
3117 | zp->zp_type = wp->wp_type; | |
3118 | zp->zp_level = wp->wp_level; | |
3119 | zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); | |
3120 | } | |
3121 | ||
34dc7c2f | 3122 | zio_t * |
b128c09f BB |
3123 | arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, |
3124 | boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, | |
34dc7c2f | 3125 | arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, |
b128c09f | 3126 | int zio_flags, const zbookmark_t *zb) |
34dc7c2f BB |
3127 | { |
3128 | arc_buf_hdr_t *hdr = buf->b_hdr; | |
3129 | arc_write_callback_t *callback; | |
b128c09f BB |
3130 | zio_t *zio; |
3131 | zio_prop_t zp; | |
34dc7c2f | 3132 | |
b128c09f | 3133 | ASSERT(ready != NULL); |
34dc7c2f BB |
3134 | ASSERT(!HDR_IO_ERROR(hdr)); |
3135 | ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); | |
3136 | ASSERT(hdr->b_acb == 0); | |
b128c09f BB |
3137 | if (l2arc) |
3138 | hdr->b_flags |= ARC_L2CACHE; | |
34dc7c2f BB |
3139 | callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); |
3140 | callback->awcb_ready = ready; | |
3141 | callback->awcb_done = done; | |
3142 | callback->awcb_private = private; | |
3143 | callback->awcb_buf = buf; | |
b128c09f BB |
3144 | |
3145 | write_policy(spa, wp, &zp); | |
3146 | zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, | |
3147 | arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); | |
34dc7c2f BB |
3148 | |
3149 | return (zio); | |
3150 | } | |
3151 | ||
3152 | int | |
3153 | arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, | |
3154 | zio_done_func_t *done, void *private, uint32_t arc_flags) | |
3155 | { | |
3156 | arc_buf_hdr_t *ab; | |
3157 | kmutex_t *hash_lock; | |
3158 | zio_t *zio; | |
3159 | ||
3160 | /* | |
3161 | * If this buffer is in the cache, release it, so it | |
3162 | * can be re-used. | |
3163 | */ | |
3164 | ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); | |
3165 | if (ab != NULL) { | |
3166 | /* | |
3167 | * The checksum of blocks to free is not always | |
3168 | * preserved (eg. on the deadlist). However, if it is | |
3169 | * nonzero, it should match what we have in the cache. | |
3170 | */ | |
3171 | ASSERT(bp->blk_cksum.zc_word[0] == 0 || | |
b128c09f BB |
3172 | bp->blk_cksum.zc_word[0] == ab->b_cksum0 || |
3173 | bp->blk_fill == BLK_FILL_ALREADY_FREED); | |
3174 | ||
34dc7c2f BB |
3175 | if (ab->b_state != arc_anon) |
3176 | arc_change_state(arc_anon, ab, hash_lock); | |
3177 | if (HDR_IO_IN_PROGRESS(ab)) { | |
3178 | /* | |
3179 | * This should only happen when we prefetch. | |
3180 | */ | |
3181 | ASSERT(ab->b_flags & ARC_PREFETCH); | |
3182 | ASSERT3U(ab->b_datacnt, ==, 1); | |
3183 | ab->b_flags |= ARC_FREED_IN_READ; | |
3184 | if (HDR_IN_HASH_TABLE(ab)) | |
3185 | buf_hash_remove(ab); | |
3186 | ab->b_arc_access = 0; | |
3187 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3188 | ab->b_birth = 0; | |
3189 | ab->b_cksum0 = 0; | |
3190 | ab->b_buf->b_efunc = NULL; | |
3191 | ab->b_buf->b_private = NULL; | |
3192 | mutex_exit(hash_lock); | |
3193 | } else if (refcount_is_zero(&ab->b_refcnt)) { | |
3194 | ab->b_flags |= ARC_FREE_IN_PROGRESS; | |
3195 | mutex_exit(hash_lock); | |
3196 | arc_hdr_destroy(ab); | |
3197 | ARCSTAT_BUMP(arcstat_deleted); | |
3198 | } else { | |
3199 | /* | |
3200 | * We still have an active reference on this | |
3201 | * buffer. This can happen, e.g., from | |
3202 | * dbuf_unoverride(). | |
3203 | */ | |
3204 | ASSERT(!HDR_IN_HASH_TABLE(ab)); | |
3205 | ab->b_arc_access = 0; | |
3206 | bzero(&ab->b_dva, sizeof (dva_t)); | |
3207 | ab->b_birth = 0; | |
3208 | ab->b_cksum0 = 0; | |
3209 | ab->b_buf->b_efunc = NULL; | |
3210 | ab->b_buf->b_private = NULL; | |
3211 | mutex_exit(hash_lock); | |
3212 | } | |
3213 | } | |
3214 | ||
b128c09f | 3215 | zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); |
34dc7c2f BB |
3216 | |
3217 | if (arc_flags & ARC_WAIT) | |
3218 | return (zio_wait(zio)); | |
3219 | ||
3220 | ASSERT(arc_flags & ARC_NOWAIT); | |
3221 | zio_nowait(zio); | |
3222 | ||
3223 | return (0); | |
3224 | } | |
3225 | ||
3226 | static int | |
3227 | arc_memory_throttle(uint64_t reserve, uint64_t txg) | |
3228 | { | |
3229 | #ifdef _KERNEL | |
3230 | uint64_t inflight_data = arc_anon->arcs_size; | |
3231 | uint64_t available_memory = ptob(freemem); | |
3232 | static uint64_t page_load = 0; | |
3233 | static uint64_t last_txg = 0; | |
3234 | ||
3235 | #if defined(__i386) | |
3236 | available_memory = | |
3237 | MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); | |
3238 | #endif | |
3239 | if (available_memory >= zfs_write_limit_max) | |
3240 | return (0); | |
3241 | ||
3242 | if (txg > last_txg) { | |
3243 | last_txg = txg; | |
3244 | page_load = 0; | |
3245 | } | |
3246 | /* | |
3247 | * If we are in pageout, we know that memory is already tight, | |
3248 | * the arc is already going to be evicting, so we just want to | |
3249 | * continue to let page writes occur as quickly as possible. | |
3250 | */ | |
3251 | if (curproc == proc_pageout) { | |
3252 | if (page_load > MAX(ptob(minfree), available_memory) / 4) | |
3253 | return (ERESTART); | |
3254 | /* Note: reserve is inflated, so we deflate */ | |
3255 | page_load += reserve / 8; | |
3256 | return (0); | |
3257 | } else if (page_load > 0 && arc_reclaim_needed()) { | |
3258 | /* memory is low, delay before restarting */ | |
3259 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3260 | return (EAGAIN); | |
3261 | } | |
3262 | page_load = 0; | |
3263 | ||
3264 | if (arc_size > arc_c_min) { | |
3265 | uint64_t evictable_memory = | |
3266 | arc_mru->arcs_lsize[ARC_BUFC_DATA] + | |
3267 | arc_mru->arcs_lsize[ARC_BUFC_METADATA] + | |
3268 | arc_mfu->arcs_lsize[ARC_BUFC_DATA] + | |
3269 | arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; | |
3270 | available_memory += MIN(evictable_memory, arc_size - arc_c_min); | |
3271 | } | |
3272 | ||
3273 | if (inflight_data > available_memory / 4) { | |
3274 | ARCSTAT_INCR(arcstat_memory_throttle_count, 1); | |
3275 | return (ERESTART); | |
3276 | } | |
3277 | #endif | |
3278 | return (0); | |
3279 | } | |
3280 | ||
3281 | void | |
3282 | arc_tempreserve_clear(uint64_t reserve) | |
3283 | { | |
3284 | atomic_add_64(&arc_tempreserve, -reserve); | |
3285 | ASSERT((int64_t)arc_tempreserve >= 0); | |
3286 | } | |
3287 | ||
3288 | int | |
3289 | arc_tempreserve_space(uint64_t reserve, uint64_t txg) | |
3290 | { | |
3291 | int error; | |
3292 | ||
3293 | #ifdef ZFS_DEBUG | |
3294 | /* | |
3295 | * Once in a while, fail for no reason. Everything should cope. | |
3296 | */ | |
3297 | if (spa_get_random(10000) == 0) { | |
3298 | dprintf("forcing random failure\n"); | |
3299 | return (ERESTART); | |
3300 | } | |
3301 | #endif | |
3302 | if (reserve > arc_c/4 && !arc_no_grow) | |
3303 | arc_c = MIN(arc_c_max, reserve * 4); | |
3304 | if (reserve > arc_c) | |
3305 | return (ENOMEM); | |
3306 | ||
3307 | /* | |
3308 | * Writes will, almost always, require additional memory allocations | |
3309 | * in order to compress/encrypt/etc the data. We therefor need to | |
3310 | * make sure that there is sufficient available memory for this. | |
3311 | */ | |
3312 | if (error = arc_memory_throttle(reserve, txg)) | |
3313 | return (error); | |
3314 | ||
3315 | /* | |
3316 | * Throttle writes when the amount of dirty data in the cache | |
3317 | * gets too large. We try to keep the cache less than half full | |
3318 | * of dirty blocks so that our sync times don't grow too large. | |
3319 | * Note: if two requests come in concurrently, we might let them | |
3320 | * both succeed, when one of them should fail. Not a huge deal. | |
3321 | */ | |
3322 | if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && | |
3323 | arc_anon->arcs_size > arc_c / 4) { | |
3324 | dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " | |
3325 | "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", | |
3326 | arc_tempreserve>>10, | |
3327 | arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, | |
3328 | arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, | |
3329 | reserve>>10, arc_c>>10); | |
3330 | return (ERESTART); | |
3331 | } | |
3332 | atomic_add_64(&arc_tempreserve, reserve); | |
3333 | return (0); | |
3334 | } | |
3335 | ||
3336 | void | |
3337 | arc_init(void) | |
3338 | { | |
3339 | mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
3340 | cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); | |
3341 | ||
3342 | /* Convert seconds to clock ticks */ | |
3343 | arc_min_prefetch_lifespan = 1 * hz; | |
3344 | ||
3345 | /* Start out with 1/8 of all memory */ | |
3346 | arc_c = physmem * PAGESIZE / 8; | |
3347 | ||
3348 | #ifdef _KERNEL | |
3349 | /* | |
3350 | * On architectures where the physical memory can be larger | |
3351 | * than the addressable space (intel in 32-bit mode), we may | |
3352 | * need to limit the cache to 1/8 of VM size. | |
3353 | */ | |
3354 | arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); | |
3355 | #endif | |
3356 | ||
3357 | /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ | |
3358 | arc_c_min = MAX(arc_c / 4, 64<<20); | |
3359 | /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ | |
3360 | if (arc_c * 8 >= 1<<30) | |
3361 | arc_c_max = (arc_c * 8) - (1<<30); | |
3362 | else | |
3363 | arc_c_max = arc_c_min; | |
3364 | arc_c_max = MAX(arc_c * 6, arc_c_max); | |
3365 | ||
3366 | /* | |
3367 | * Allow the tunables to override our calculations if they are | |
3368 | * reasonable (ie. over 64MB) | |
3369 | */ | |
3370 | if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) | |
3371 | arc_c_max = zfs_arc_max; | |
3372 | if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) | |
3373 | arc_c_min = zfs_arc_min; | |
3374 | ||
3375 | arc_c = arc_c_max; | |
3376 | arc_p = (arc_c >> 1); | |
3377 | ||
3378 | /* limit meta-data to 1/4 of the arc capacity */ | |
3379 | arc_meta_limit = arc_c_max / 4; | |
3380 | ||
3381 | /* Allow the tunable to override if it is reasonable */ | |
3382 | if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) | |
3383 | arc_meta_limit = zfs_arc_meta_limit; | |
3384 | ||
3385 | if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) | |
3386 | arc_c_min = arc_meta_limit / 2; | |
3387 | ||
3388 | /* if kmem_flags are set, lets try to use less memory */ | |
3389 | if (kmem_debugging()) | |
3390 | arc_c = arc_c / 2; | |
3391 | if (arc_c < arc_c_min) | |
3392 | arc_c = arc_c_min; | |
3393 | ||
3394 | arc_anon = &ARC_anon; | |
3395 | arc_mru = &ARC_mru; | |
3396 | arc_mru_ghost = &ARC_mru_ghost; | |
3397 | arc_mfu = &ARC_mfu; | |
3398 | arc_mfu_ghost = &ARC_mfu_ghost; | |
3399 | arc_l2c_only = &ARC_l2c_only; | |
3400 | arc_size = 0; | |
3401 | ||
3402 | mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3403 | mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3404 | mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3405 | mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3406 | mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3407 | mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3408 | ||
3409 | list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], | |
3410 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3411 | list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], | |
3412 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3413 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], | |
3414 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3415 | list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], | |
3416 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3417 | list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], | |
3418 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3419 | list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], | |
3420 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3421 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], | |
3422 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3423 | list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], | |
3424 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3425 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], | |
3426 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3427 | list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], | |
3428 | sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); | |
3429 | ||
3430 | buf_init(); | |
3431 | ||
3432 | arc_thread_exit = 0; | |
3433 | arc_eviction_list = NULL; | |
3434 | mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); | |
3435 | bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); | |
3436 | ||
3437 | arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, | |
3438 | sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); | |
3439 | ||
3440 | if (arc_ksp != NULL) { | |
3441 | arc_ksp->ks_data = &arc_stats; | |
3442 | kstat_install(arc_ksp); | |
3443 | } | |
3444 | ||
3445 | (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, | |
3446 | TS_RUN, minclsyspri); | |
3447 | ||
3448 | arc_dead = FALSE; | |
b128c09f | 3449 | arc_warm = B_FALSE; |
34dc7c2f BB |
3450 | |
3451 | if (zfs_write_limit_max == 0) | |
b128c09f | 3452 | zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; |
34dc7c2f BB |
3453 | else |
3454 | zfs_write_limit_shift = 0; | |
b128c09f | 3455 | mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); |
34dc7c2f BB |
3456 | } |
3457 | ||
3458 | void | |
3459 | arc_fini(void) | |
3460 | { | |
3461 | mutex_enter(&arc_reclaim_thr_lock); | |
3462 | arc_thread_exit = 1; | |
3463 | while (arc_thread_exit != 0) | |
3464 | cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); | |
3465 | mutex_exit(&arc_reclaim_thr_lock); | |
3466 | ||
3467 | arc_flush(NULL); | |
3468 | ||
3469 | arc_dead = TRUE; | |
3470 | ||
3471 | if (arc_ksp != NULL) { | |
3472 | kstat_delete(arc_ksp); | |
3473 | arc_ksp = NULL; | |
3474 | } | |
3475 | ||
3476 | mutex_destroy(&arc_eviction_mtx); | |
3477 | mutex_destroy(&arc_reclaim_thr_lock); | |
3478 | cv_destroy(&arc_reclaim_thr_cv); | |
3479 | ||
3480 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); | |
3481 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3482 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); | |
3483 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); | |
3484 | list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); | |
3485 | list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); | |
3486 | list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); | |
3487 | list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); | |
3488 | ||
3489 | mutex_destroy(&arc_anon->arcs_mtx); | |
3490 | mutex_destroy(&arc_mru->arcs_mtx); | |
3491 | mutex_destroy(&arc_mru_ghost->arcs_mtx); | |
3492 | mutex_destroy(&arc_mfu->arcs_mtx); | |
3493 | mutex_destroy(&arc_mfu_ghost->arcs_mtx); | |
3494 | ||
b128c09f BB |
3495 | mutex_destroy(&zfs_write_limit_lock); |
3496 | ||
34dc7c2f BB |
3497 | buf_fini(); |
3498 | } | |
3499 | ||
3500 | /* | |
3501 | * Level 2 ARC | |
3502 | * | |
3503 | * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. | |
3504 | * It uses dedicated storage devices to hold cached data, which are populated | |
3505 | * using large infrequent writes. The main role of this cache is to boost | |
3506 | * the performance of random read workloads. The intended L2ARC devices | |
3507 | * include short-stroked disks, solid state disks, and other media with | |
3508 | * substantially faster read latency than disk. | |
3509 | * | |
3510 | * +-----------------------+ | |
3511 | * | ARC | | |
3512 | * +-----------------------+ | |
3513 | * | ^ ^ | |
3514 | * | | | | |
3515 | * l2arc_feed_thread() arc_read() | |
3516 | * | | | | |
3517 | * | l2arc read | | |
3518 | * V | | | |
3519 | * +---------------+ | | |
3520 | * | L2ARC | | | |
3521 | * +---------------+ | | |
3522 | * | ^ | | |
3523 | * l2arc_write() | | | |
3524 | * | | | | |
3525 | * V | | | |
3526 | * +-------+ +-------+ | |
3527 | * | vdev | | vdev | | |
3528 | * | cache | | cache | | |
3529 | * +-------+ +-------+ | |
3530 | * +=========+ .-----. | |
3531 | * : L2ARC : |-_____-| | |
3532 | * : devices : | Disks | | |
3533 | * +=========+ `-_____-' | |
3534 | * | |
3535 | * Read requests are satisfied from the following sources, in order: | |
3536 | * | |
3537 | * 1) ARC | |
3538 | * 2) vdev cache of L2ARC devices | |
3539 | * 3) L2ARC devices | |
3540 | * 4) vdev cache of disks | |
3541 | * 5) disks | |
3542 | * | |
3543 | * Some L2ARC device types exhibit extremely slow write performance. | |
3544 | * To accommodate for this there are some significant differences between | |
3545 | * the L2ARC and traditional cache design: | |
3546 | * | |
3547 | * 1. There is no eviction path from the ARC to the L2ARC. Evictions from | |
3548 | * the ARC behave as usual, freeing buffers and placing headers on ghost | |
3549 | * lists. The ARC does not send buffers to the L2ARC during eviction as | |
3550 | * this would add inflated write latencies for all ARC memory pressure. | |
3551 | * | |
3552 | * 2. The L2ARC attempts to cache data from the ARC before it is evicted. | |
3553 | * It does this by periodically scanning buffers from the eviction-end of | |
3554 | * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are | |
3555 | * not already there. It scans until a headroom of buffers is satisfied, | |
3556 | * which itself is a buffer for ARC eviction. The thread that does this is | |
3557 | * l2arc_feed_thread(), illustrated below; example sizes are included to | |
3558 | * provide a better sense of ratio than this diagram: | |
3559 | * | |
3560 | * head --> tail | |
3561 | * +---------------------+----------+ | |
3562 | * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC | |
3563 | * +---------------------+----------+ | o L2ARC eligible | |
3564 | * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer | |
3565 | * +---------------------+----------+ | | |
3566 | * 15.9 Gbytes ^ 32 Mbytes | | |
3567 | * headroom | | |
3568 | * l2arc_feed_thread() | |
3569 | * | | |
3570 | * l2arc write hand <--[oooo]--' | |
3571 | * | 8 Mbyte | |
3572 | * | write max | |
3573 | * V | |
3574 | * +==============================+ | |
3575 | * L2ARC dev |####|#|###|###| |####| ... | | |
3576 | * +==============================+ | |
3577 | * 32 Gbytes | |
3578 | * | |
3579 | * 3. If an ARC buffer is copied to the L2ARC but then hit instead of | |
3580 | * evicted, then the L2ARC has cached a buffer much sooner than it probably | |
3581 | * needed to, potentially wasting L2ARC device bandwidth and storage. It is | |
3582 | * safe to say that this is an uncommon case, since buffers at the end of | |
3583 | * the ARC lists have moved there due to inactivity. | |
3584 | * | |
3585 | * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, | |
3586 | * then the L2ARC simply misses copying some buffers. This serves as a | |
3587 | * pressure valve to prevent heavy read workloads from both stalling the ARC | |
3588 | * with waits and clogging the L2ARC with writes. This also helps prevent | |
3589 | * the potential for the L2ARC to churn if it attempts to cache content too | |
3590 | * quickly, such as during backups of the entire pool. | |
3591 | * | |
b128c09f BB |
3592 | * 5. After system boot and before the ARC has filled main memory, there are |
3593 | * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru | |
3594 | * lists can remain mostly static. Instead of searching from tail of these | |
3595 | * lists as pictured, the l2arc_feed_thread() will search from the list heads | |
3596 | * for eligible buffers, greatly increasing its chance of finding them. | |
3597 | * | |
3598 | * The L2ARC device write speed is also boosted during this time so that | |
3599 | * the L2ARC warms up faster. Since there have been no ARC evictions yet, | |
3600 | * there are no L2ARC reads, and no fear of degrading read performance | |
3601 | * through increased writes. | |
3602 | * | |
3603 | * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that | |
34dc7c2f BB |
3604 | * the vdev queue can aggregate them into larger and fewer writes. Each |
3605 | * device is written to in a rotor fashion, sweeping writes through | |
3606 | * available space then repeating. | |
3607 | * | |
b128c09f | 3608 | * 7. The L2ARC does not store dirty content. It never needs to flush |
34dc7c2f BB |
3609 | * write buffers back to disk based storage. |
3610 | * | |
b128c09f | 3611 | * 8. If an ARC buffer is written (and dirtied) which also exists in the |
34dc7c2f BB |
3612 | * L2ARC, the now stale L2ARC buffer is immediately dropped. |
3613 | * | |
3614 | * The performance of the L2ARC can be tweaked by a number of tunables, which | |
3615 | * may be necessary for different workloads: | |
3616 | * | |
3617 | * l2arc_write_max max write bytes per interval | |
b128c09f | 3618 | * l2arc_write_boost extra write bytes during device warmup |
34dc7c2f BB |
3619 | * l2arc_noprefetch skip caching prefetched buffers |
3620 | * l2arc_headroom number of max device writes to precache | |
3621 | * l2arc_feed_secs seconds between L2ARC writing | |
3622 | * | |
3623 | * Tunables may be removed or added as future performance improvements are | |
3624 | * integrated, and also may become zpool properties. | |
3625 | */ | |
3626 | ||
3627 | static void | |
3628 | l2arc_hdr_stat_add(void) | |
3629 | { | |
3630 | ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); | |
3631 | ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); | |
3632 | } | |
3633 | ||
3634 | static void | |
3635 | l2arc_hdr_stat_remove(void) | |
3636 | { | |
3637 | ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); | |
3638 | ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); | |
3639 | } | |
3640 | ||
3641 | /* | |
3642 | * Cycle through L2ARC devices. This is how L2ARC load balances. | |
b128c09f | 3643 | * If a device is returned, this also returns holding the spa config lock. |
34dc7c2f BB |
3644 | */ |
3645 | static l2arc_dev_t * | |
3646 | l2arc_dev_get_next(void) | |
3647 | { | |
b128c09f | 3648 | l2arc_dev_t *first, *next = NULL; |
34dc7c2f | 3649 | |
b128c09f BB |
3650 | /* |
3651 | * Lock out the removal of spas (spa_namespace_lock), then removal | |
3652 | * of cache devices (l2arc_dev_mtx). Once a device has been selected, | |
3653 | * both locks will be dropped and a spa config lock held instead. | |
3654 | */ | |
3655 | mutex_enter(&spa_namespace_lock); | |
3656 | mutex_enter(&l2arc_dev_mtx); | |
3657 | ||
3658 | /* if there are no vdevs, there is nothing to do */ | |
3659 | if (l2arc_ndev == 0) | |
3660 | goto out; | |
3661 | ||
3662 | first = NULL; | |
3663 | next = l2arc_dev_last; | |
3664 | do { | |
3665 | /* loop around the list looking for a non-faulted vdev */ | |
3666 | if (next == NULL) { | |
34dc7c2f | 3667 | next = list_head(l2arc_dev_list); |
b128c09f BB |
3668 | } else { |
3669 | next = list_next(l2arc_dev_list, next); | |
3670 | if (next == NULL) | |
3671 | next = list_head(l2arc_dev_list); | |
3672 | } | |
3673 | ||
3674 | /* if we have come back to the start, bail out */ | |
3675 | if (first == NULL) | |
3676 | first = next; | |
3677 | else if (next == first) | |
3678 | break; | |
3679 | ||
3680 | } while (vdev_is_dead(next->l2ad_vdev)); | |
3681 | ||
3682 | /* if we were unable to find any usable vdevs, return NULL */ | |
3683 | if (vdev_is_dead(next->l2ad_vdev)) | |
3684 | next = NULL; | |
34dc7c2f BB |
3685 | |
3686 | l2arc_dev_last = next; | |
3687 | ||
b128c09f BB |
3688 | out: |
3689 | mutex_exit(&l2arc_dev_mtx); | |
3690 | ||
3691 | /* | |
3692 | * Grab the config lock to prevent the 'next' device from being | |
3693 | * removed while we are writing to it. | |
3694 | */ | |
3695 | if (next != NULL) | |
3696 | spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); | |
3697 | mutex_exit(&spa_namespace_lock); | |
3698 | ||
34dc7c2f BB |
3699 | return (next); |
3700 | } | |
3701 | ||
b128c09f BB |
3702 | /* |
3703 | * Free buffers that were tagged for destruction. | |
3704 | */ | |
3705 | static void | |
3706 | l2arc_do_free_on_write() | |
3707 | { | |
3708 | list_t *buflist; | |
3709 | l2arc_data_free_t *df, *df_prev; | |
3710 | ||
3711 | mutex_enter(&l2arc_free_on_write_mtx); | |
3712 | buflist = l2arc_free_on_write; | |
3713 | ||
3714 | for (df = list_tail(buflist); df; df = df_prev) { | |
3715 | df_prev = list_prev(buflist, df); | |
3716 | ASSERT(df->l2df_data != NULL); | |
3717 | ASSERT(df->l2df_func != NULL); | |
3718 | df->l2df_func(df->l2df_data, df->l2df_size); | |
3719 | list_remove(buflist, df); | |
3720 | kmem_free(df, sizeof (l2arc_data_free_t)); | |
3721 | } | |
3722 | ||
3723 | mutex_exit(&l2arc_free_on_write_mtx); | |
3724 | } | |
3725 | ||
34dc7c2f BB |
3726 | /* |
3727 | * A write to a cache device has completed. Update all headers to allow | |
3728 | * reads from these buffers to begin. | |
3729 | */ | |
3730 | static void | |
3731 | l2arc_write_done(zio_t *zio) | |
3732 | { | |
3733 | l2arc_write_callback_t *cb; | |
3734 | l2arc_dev_t *dev; | |
3735 | list_t *buflist; | |
34dc7c2f | 3736 | arc_buf_hdr_t *head, *ab, *ab_prev; |
b128c09f | 3737 | l2arc_buf_hdr_t *abl2; |
34dc7c2f BB |
3738 | kmutex_t *hash_lock; |
3739 | ||
3740 | cb = zio->io_private; | |
3741 | ASSERT(cb != NULL); | |
3742 | dev = cb->l2wcb_dev; | |
3743 | ASSERT(dev != NULL); | |
3744 | head = cb->l2wcb_head; | |
3745 | ASSERT(head != NULL); | |
3746 | buflist = dev->l2ad_buflist; | |
3747 | ASSERT(buflist != NULL); | |
3748 | DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, | |
3749 | l2arc_write_callback_t *, cb); | |
3750 | ||
3751 | if (zio->io_error != 0) | |
3752 | ARCSTAT_BUMP(arcstat_l2_writes_error); | |
3753 | ||
3754 | mutex_enter(&l2arc_buflist_mtx); | |
3755 | ||
3756 | /* | |
3757 | * All writes completed, or an error was hit. | |
3758 | */ | |
3759 | for (ab = list_prev(buflist, head); ab; ab = ab_prev) { | |
3760 | ab_prev = list_prev(buflist, ab); | |
3761 | ||
3762 | hash_lock = HDR_LOCK(ab); | |
3763 | if (!mutex_tryenter(hash_lock)) { | |
3764 | /* | |
3765 | * This buffer misses out. It may be in a stage | |
3766 | * of eviction. Its ARC_L2_WRITING flag will be | |
3767 | * left set, denying reads to this buffer. | |
3768 | */ | |
3769 | ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); | |
3770 | continue; | |
3771 | } | |
3772 | ||
3773 | if (zio->io_error != 0) { | |
3774 | /* | |
b128c09f | 3775 | * Error - drop L2ARC entry. |
34dc7c2f | 3776 | */ |
b128c09f BB |
3777 | list_remove(buflist, ab); |
3778 | abl2 = ab->b_l2hdr; | |
34dc7c2f | 3779 | ab->b_l2hdr = NULL; |
b128c09f BB |
3780 | kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); |
3781 | ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); | |
34dc7c2f BB |
3782 | } |
3783 | ||
3784 | /* | |
3785 | * Allow ARC to begin reads to this L2ARC entry. | |
3786 | */ | |
3787 | ab->b_flags &= ~ARC_L2_WRITING; | |
3788 | ||
3789 | mutex_exit(hash_lock); | |
3790 | } | |
3791 | ||
3792 | atomic_inc_64(&l2arc_writes_done); | |
3793 | list_remove(buflist, head); | |
3794 | kmem_cache_free(hdr_cache, head); | |
3795 | mutex_exit(&l2arc_buflist_mtx); | |
3796 | ||
b128c09f | 3797 | l2arc_do_free_on_write(); |
34dc7c2f BB |
3798 | |
3799 | kmem_free(cb, sizeof (l2arc_write_callback_t)); | |
3800 | } | |
3801 | ||
3802 | /* | |
3803 | * A read to a cache device completed. Validate buffer contents before | |
3804 | * handing over to the regular ARC routines. | |
3805 | */ | |
3806 | static void | |
3807 | l2arc_read_done(zio_t *zio) | |
3808 | { | |
3809 | l2arc_read_callback_t *cb; | |
3810 | arc_buf_hdr_t *hdr; | |
3811 | arc_buf_t *buf; | |
34dc7c2f | 3812 | kmutex_t *hash_lock; |
b128c09f BB |
3813 | int equal; |
3814 | ||
3815 | ASSERT(zio->io_vd != NULL); | |
3816 | ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); | |
3817 | ||
3818 | spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); | |
34dc7c2f BB |
3819 | |
3820 | cb = zio->io_private; | |
3821 | ASSERT(cb != NULL); | |
3822 | buf = cb->l2rcb_buf; | |
3823 | ASSERT(buf != NULL); | |
3824 | hdr = buf->b_hdr; | |
3825 | ASSERT(hdr != NULL); | |
3826 | ||
3827 | hash_lock = HDR_LOCK(hdr); | |
3828 | mutex_enter(hash_lock); | |
3829 | ||
3830 | /* | |
3831 | * Check this survived the L2ARC journey. | |
3832 | */ | |
3833 | equal = arc_cksum_equal(buf); | |
3834 | if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { | |
3835 | mutex_exit(hash_lock); | |
3836 | zio->io_private = buf; | |
b128c09f BB |
3837 | zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ |
3838 | zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ | |
34dc7c2f BB |
3839 | arc_read_done(zio); |
3840 | } else { | |
3841 | mutex_exit(hash_lock); | |
3842 | /* | |
3843 | * Buffer didn't survive caching. Increment stats and | |
3844 | * reissue to the original storage device. | |
3845 | */ | |
b128c09f | 3846 | if (zio->io_error != 0) { |
34dc7c2f | 3847 | ARCSTAT_BUMP(arcstat_l2_io_error); |
b128c09f BB |
3848 | } else { |
3849 | zio->io_error = EIO; | |
3850 | } | |
34dc7c2f BB |
3851 | if (!equal) |
3852 | ARCSTAT_BUMP(arcstat_l2_cksum_bad); | |
3853 | ||
34dc7c2f | 3854 | /* |
b128c09f BB |
3855 | * If there's no waiter, issue an async i/o to the primary |
3856 | * storage now. If there *is* a waiter, the caller must | |
3857 | * issue the i/o in a context where it's OK to block. | |
34dc7c2f | 3858 | */ |
b128c09f BB |
3859 | if (zio->io_waiter == NULL) |
3860 | zio_nowait(zio_read(zio->io_parent, | |
3861 | cb->l2rcb_spa, &cb->l2rcb_bp, | |
3862 | buf->b_data, zio->io_size, arc_read_done, buf, | |
3863 | zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); | |
34dc7c2f BB |
3864 | } |
3865 | ||
3866 | kmem_free(cb, sizeof (l2arc_read_callback_t)); | |
3867 | } | |
3868 | ||
3869 | /* | |
3870 | * This is the list priority from which the L2ARC will search for pages to | |
3871 | * cache. This is used within loops (0..3) to cycle through lists in the | |
3872 | * desired order. This order can have a significant effect on cache | |
3873 | * performance. | |
3874 | * | |
3875 | * Currently the metadata lists are hit first, MFU then MRU, followed by | |
3876 | * the data lists. This function returns a locked list, and also returns | |
3877 | * the lock pointer. | |
3878 | */ | |
3879 | static list_t * | |
3880 | l2arc_list_locked(int list_num, kmutex_t **lock) | |
3881 | { | |
3882 | list_t *list; | |
3883 | ||
3884 | ASSERT(list_num >= 0 && list_num <= 3); | |
3885 | ||
3886 | switch (list_num) { | |
3887 | case 0: | |
3888 | list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; | |
3889 | *lock = &arc_mfu->arcs_mtx; | |
3890 | break; | |
3891 | case 1: | |
3892 | list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; | |
3893 | *lock = &arc_mru->arcs_mtx; | |
3894 | break; | |
3895 | case 2: | |
3896 | list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; | |
3897 | *lock = &arc_mfu->arcs_mtx; | |
3898 | break; | |
3899 | case 3: | |
3900 | list = &arc_mru->arcs_list[ARC_BUFC_DATA]; | |
3901 | *lock = &arc_mru->arcs_mtx; | |
3902 | break; | |
3903 | } | |
3904 | ||
3905 | ASSERT(!(MUTEX_HELD(*lock))); | |
3906 | mutex_enter(*lock); | |
3907 | return (list); | |
3908 | } | |
3909 | ||
3910 | /* | |
3911 | * Evict buffers from the device write hand to the distance specified in | |
3912 | * bytes. This distance may span populated buffers, it may span nothing. | |
3913 | * This is clearing a region on the L2ARC device ready for writing. | |
3914 | * If the 'all' boolean is set, every buffer is evicted. | |
3915 | */ | |
3916 | static void | |
3917 | l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) | |
3918 | { | |
3919 | list_t *buflist; | |
3920 | l2arc_buf_hdr_t *abl2; | |
3921 | arc_buf_hdr_t *ab, *ab_prev; | |
3922 | kmutex_t *hash_lock; | |
3923 | uint64_t taddr; | |
3924 | ||
34dc7c2f BB |
3925 | buflist = dev->l2ad_buflist; |
3926 | ||
3927 | if (buflist == NULL) | |
3928 | return; | |
3929 | ||
3930 | if (!all && dev->l2ad_first) { | |
3931 | /* | |
3932 | * This is the first sweep through the device. There is | |
3933 | * nothing to evict. | |
3934 | */ | |
3935 | return; | |
3936 | } | |
3937 | ||
b128c09f | 3938 | if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { |
34dc7c2f BB |
3939 | /* |
3940 | * When nearing the end of the device, evict to the end | |
3941 | * before the device write hand jumps to the start. | |
3942 | */ | |
3943 | taddr = dev->l2ad_end; | |
3944 | } else { | |
3945 | taddr = dev->l2ad_hand + distance; | |
3946 | } | |
3947 | DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, | |
3948 | uint64_t, taddr, boolean_t, all); | |
3949 | ||
3950 | top: | |
3951 | mutex_enter(&l2arc_buflist_mtx); | |
3952 | for (ab = list_tail(buflist); ab; ab = ab_prev) { | |
3953 | ab_prev = list_prev(buflist, ab); | |
3954 | ||
3955 | hash_lock = HDR_LOCK(ab); | |
3956 | if (!mutex_tryenter(hash_lock)) { | |
3957 | /* | |
3958 | * Missed the hash lock. Retry. | |
3959 | */ | |
3960 | ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); | |
3961 | mutex_exit(&l2arc_buflist_mtx); | |
3962 | mutex_enter(hash_lock); | |
3963 | mutex_exit(hash_lock); | |
3964 | goto top; | |
3965 | } | |
3966 | ||
3967 | if (HDR_L2_WRITE_HEAD(ab)) { | |
3968 | /* | |
3969 | * We hit a write head node. Leave it for | |
3970 | * l2arc_write_done(). | |
3971 | */ | |
3972 | list_remove(buflist, ab); | |
3973 | mutex_exit(hash_lock); | |
3974 | continue; | |
3975 | } | |
3976 | ||
3977 | if (!all && ab->b_l2hdr != NULL && | |
3978 | (ab->b_l2hdr->b_daddr > taddr || | |
3979 | ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { | |
3980 | /* | |
3981 | * We've evicted to the target address, | |
3982 | * or the end of the device. | |
3983 | */ | |
3984 | mutex_exit(hash_lock); | |
3985 | break; | |
3986 | } | |
3987 | ||
3988 | if (HDR_FREE_IN_PROGRESS(ab)) { | |
3989 | /* | |
3990 | * Already on the path to destruction. | |
3991 | */ | |
3992 | mutex_exit(hash_lock); | |
3993 | continue; | |
3994 | } | |
3995 | ||
3996 | if (ab->b_state == arc_l2c_only) { | |
3997 | ASSERT(!HDR_L2_READING(ab)); | |
3998 | /* | |
3999 | * This doesn't exist in the ARC. Destroy. | |
4000 | * arc_hdr_destroy() will call list_remove() | |
4001 | * and decrement arcstat_l2_size. | |
4002 | */ | |
4003 | arc_change_state(arc_anon, ab, hash_lock); | |
4004 | arc_hdr_destroy(ab); | |
4005 | } else { | |
b128c09f BB |
4006 | /* |
4007 | * Invalidate issued or about to be issued | |
4008 | * reads, since we may be about to write | |
4009 | * over this location. | |
4010 | */ | |
4011 | if (HDR_L2_READING(ab)) { | |
4012 | ARCSTAT_BUMP(arcstat_l2_evict_reading); | |
4013 | ab->b_flags |= ARC_L2_EVICTED; | |
4014 | } | |
4015 | ||
34dc7c2f BB |
4016 | /* |
4017 | * Tell ARC this no longer exists in L2ARC. | |
4018 | */ | |
4019 | if (ab->b_l2hdr != NULL) { | |
4020 | abl2 = ab->b_l2hdr; | |
4021 | ab->b_l2hdr = NULL; | |
4022 | kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); | |
4023 | ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); | |
4024 | } | |
4025 | list_remove(buflist, ab); | |
4026 | ||
4027 | /* | |
4028 | * This may have been leftover after a | |
4029 | * failed write. | |
4030 | */ | |
4031 | ab->b_flags &= ~ARC_L2_WRITING; | |
34dc7c2f BB |
4032 | } |
4033 | mutex_exit(hash_lock); | |
4034 | } | |
4035 | mutex_exit(&l2arc_buflist_mtx); | |
4036 | ||
4037 | spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); | |
4038 | dev->l2ad_evict = taddr; | |
4039 | } | |
4040 | ||
4041 | /* | |
4042 | * Find and write ARC buffers to the L2ARC device. | |
4043 | * | |
4044 | * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid | |
4045 | * for reading until they have completed writing. | |
4046 | */ | |
4047 | static void | |
b128c09f | 4048 | l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) |
34dc7c2f BB |
4049 | { |
4050 | arc_buf_hdr_t *ab, *ab_prev, *head; | |
4051 | l2arc_buf_hdr_t *hdrl2; | |
4052 | list_t *list; | |
b128c09f | 4053 | uint64_t passed_sz, write_sz, buf_sz, headroom; |
34dc7c2f BB |
4054 | void *buf_data; |
4055 | kmutex_t *hash_lock, *list_lock; | |
4056 | boolean_t have_lock, full; | |
4057 | l2arc_write_callback_t *cb; | |
4058 | zio_t *pio, *wzio; | |
4059 | ||
34dc7c2f BB |
4060 | ASSERT(dev->l2ad_vdev != NULL); |
4061 | ||
4062 | pio = NULL; | |
4063 | write_sz = 0; | |
4064 | full = B_FALSE; | |
4065 | head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); | |
4066 | head->b_flags |= ARC_L2_WRITE_HEAD; | |
4067 | ||
4068 | /* | |
4069 | * Copy buffers for L2ARC writing. | |
4070 | */ | |
4071 | mutex_enter(&l2arc_buflist_mtx); | |
4072 | for (int try = 0; try <= 3; try++) { | |
4073 | list = l2arc_list_locked(try, &list_lock); | |
4074 | passed_sz = 0; | |
4075 | ||
b128c09f BB |
4076 | /* |
4077 | * L2ARC fast warmup. | |
4078 | * | |
4079 | * Until the ARC is warm and starts to evict, read from the | |
4080 | * head of the ARC lists rather than the tail. | |
4081 | */ | |
4082 | headroom = target_sz * l2arc_headroom; | |
4083 | if (arc_warm == B_FALSE) | |
4084 | ab = list_head(list); | |
4085 | else | |
4086 | ab = list_tail(list); | |
4087 | ||
4088 | for (; ab; ab = ab_prev) { | |
4089 | if (arc_warm == B_FALSE) | |
4090 | ab_prev = list_next(list, ab); | |
4091 | else | |
4092 | ab_prev = list_prev(list, ab); | |
34dc7c2f BB |
4093 | |
4094 | hash_lock = HDR_LOCK(ab); | |
4095 | have_lock = MUTEX_HELD(hash_lock); | |
4096 | if (!have_lock && !mutex_tryenter(hash_lock)) { | |
4097 | /* | |
4098 | * Skip this buffer rather than waiting. | |
4099 | */ | |
4100 | continue; | |
4101 | } | |
4102 | ||
4103 | passed_sz += ab->b_size; | |
4104 | if (passed_sz > headroom) { | |
4105 | /* | |
4106 | * Searched too far. | |
4107 | */ | |
4108 | mutex_exit(hash_lock); | |
4109 | break; | |
4110 | } | |
4111 | ||
4112 | if (ab->b_spa != spa) { | |
4113 | mutex_exit(hash_lock); | |
4114 | continue; | |
4115 | } | |
4116 | ||
4117 | if (ab->b_l2hdr != NULL) { | |
4118 | /* | |
4119 | * Already in L2ARC. | |
4120 | */ | |
4121 | mutex_exit(hash_lock); | |
4122 | continue; | |
4123 | } | |
4124 | ||
b128c09f | 4125 | if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { |
34dc7c2f BB |
4126 | mutex_exit(hash_lock); |
4127 | continue; | |
4128 | } | |
4129 | ||
4130 | if ((write_sz + ab->b_size) > target_sz) { | |
4131 | full = B_TRUE; | |
4132 | mutex_exit(hash_lock); | |
4133 | break; | |
4134 | } | |
4135 | ||
4136 | if (ab->b_buf == NULL) { | |
4137 | DTRACE_PROBE1(l2arc__buf__null, void *, ab); | |
4138 | mutex_exit(hash_lock); | |
4139 | continue; | |
4140 | } | |
4141 | ||
4142 | if (pio == NULL) { | |
4143 | /* | |
4144 | * Insert a dummy header on the buflist so | |
4145 | * l2arc_write_done() can find where the | |
4146 | * write buffers begin without searching. | |
4147 | */ | |
4148 | list_insert_head(dev->l2ad_buflist, head); | |
4149 | ||
4150 | cb = kmem_alloc( | |
4151 | sizeof (l2arc_write_callback_t), KM_SLEEP); | |
4152 | cb->l2wcb_dev = dev; | |
4153 | cb->l2wcb_head = head; | |
4154 | pio = zio_root(spa, l2arc_write_done, cb, | |
4155 | ZIO_FLAG_CANFAIL); | |
4156 | } | |
4157 | ||
4158 | /* | |
4159 | * Create and add a new L2ARC header. | |
4160 | */ | |
4161 | hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); | |
4162 | hdrl2->b_dev = dev; | |
4163 | hdrl2->b_daddr = dev->l2ad_hand; | |
4164 | ||
4165 | ab->b_flags |= ARC_L2_WRITING; | |
4166 | ab->b_l2hdr = hdrl2; | |
4167 | list_insert_head(dev->l2ad_buflist, ab); | |
4168 | buf_data = ab->b_buf->b_data; | |
4169 | buf_sz = ab->b_size; | |
4170 | ||
4171 | /* | |
4172 | * Compute and store the buffer cksum before | |
4173 | * writing. On debug the cksum is verified first. | |
4174 | */ | |
4175 | arc_cksum_verify(ab->b_buf); | |
4176 | arc_cksum_compute(ab->b_buf, B_TRUE); | |
4177 | ||
4178 | mutex_exit(hash_lock); | |
4179 | ||
4180 | wzio = zio_write_phys(pio, dev->l2ad_vdev, | |
4181 | dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, | |
4182 | NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, | |
4183 | ZIO_FLAG_CANFAIL, B_FALSE); | |
4184 | ||
4185 | DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, | |
4186 | zio_t *, wzio); | |
4187 | (void) zio_nowait(wzio); | |
4188 | ||
b128c09f BB |
4189 | /* |
4190 | * Keep the clock hand suitably device-aligned. | |
4191 | */ | |
4192 | buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); | |
4193 | ||
34dc7c2f BB |
4194 | write_sz += buf_sz; |
4195 | dev->l2ad_hand += buf_sz; | |
4196 | } | |
4197 | ||
4198 | mutex_exit(list_lock); | |
4199 | ||
4200 | if (full == B_TRUE) | |
4201 | break; | |
4202 | } | |
4203 | mutex_exit(&l2arc_buflist_mtx); | |
4204 | ||
4205 | if (pio == NULL) { | |
4206 | ASSERT3U(write_sz, ==, 0); | |
4207 | kmem_cache_free(hdr_cache, head); | |
4208 | return; | |
4209 | } | |
4210 | ||
4211 | ASSERT3U(write_sz, <=, target_sz); | |
4212 | ARCSTAT_BUMP(arcstat_l2_writes_sent); | |
4213 | ARCSTAT_INCR(arcstat_l2_size, write_sz); | |
4214 | spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); | |
4215 | ||
4216 | /* | |
4217 | * Bump device hand to the device start if it is approaching the end. | |
4218 | * l2arc_evict() will already have evicted ahead for this case. | |
4219 | */ | |
b128c09f | 4220 | if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { |
34dc7c2f BB |
4221 | spa_l2cache_space_update(dev->l2ad_vdev, 0, |
4222 | dev->l2ad_end - dev->l2ad_hand); | |
4223 | dev->l2ad_hand = dev->l2ad_start; | |
4224 | dev->l2ad_evict = dev->l2ad_start; | |
4225 | dev->l2ad_first = B_FALSE; | |
4226 | } | |
4227 | ||
4228 | (void) zio_wait(pio); | |
4229 | } | |
4230 | ||
4231 | /* | |
4232 | * This thread feeds the L2ARC at regular intervals. This is the beating | |
4233 | * heart of the L2ARC. | |
4234 | */ | |
4235 | static void | |
4236 | l2arc_feed_thread(void) | |
4237 | { | |
4238 | callb_cpr_t cpr; | |
4239 | l2arc_dev_t *dev; | |
4240 | spa_t *spa; | |
b128c09f | 4241 | uint64_t size; |
34dc7c2f BB |
4242 | |
4243 | CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); | |
4244 | ||
4245 | mutex_enter(&l2arc_feed_thr_lock); | |
4246 | ||
4247 | while (l2arc_thread_exit == 0) { | |
4248 | /* | |
b128c09f | 4249 | * Pause for l2arc_feed_secs seconds between writes. |
34dc7c2f BB |
4250 | */ |
4251 | CALLB_CPR_SAFE_BEGIN(&cpr); | |
34dc7c2f | 4252 | (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, |
b128c09f | 4253 | lbolt + (hz * l2arc_feed_secs)); |
34dc7c2f BB |
4254 | CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); |
4255 | ||
4256 | /* | |
b128c09f | 4257 | * Quick check for L2ARC devices. |
34dc7c2f BB |
4258 | */ |
4259 | mutex_enter(&l2arc_dev_mtx); | |
4260 | if (l2arc_ndev == 0) { | |
4261 | mutex_exit(&l2arc_dev_mtx); | |
4262 | continue; | |
4263 | } | |
b128c09f | 4264 | mutex_exit(&l2arc_dev_mtx); |
34dc7c2f BB |
4265 | |
4266 | /* | |
b128c09f BB |
4267 | * This selects the next l2arc device to write to, and in |
4268 | * doing so the next spa to feed from: dev->l2ad_spa. This | |
4269 | * will return NULL if there are now no l2arc devices or if | |
4270 | * they are all faulted. | |
4271 | * | |
4272 | * If a device is returned, its spa's config lock is also | |
4273 | * held to prevent device removal. l2arc_dev_get_next() | |
4274 | * will grab and release l2arc_dev_mtx. | |
34dc7c2f | 4275 | */ |
b128c09f | 4276 | if ((dev = l2arc_dev_get_next()) == NULL) |
34dc7c2f | 4277 | continue; |
b128c09f BB |
4278 | |
4279 | spa = dev->l2ad_spa; | |
4280 | ASSERT(spa != NULL); | |
34dc7c2f BB |
4281 | |
4282 | /* | |
b128c09f | 4283 | * Avoid contributing to memory pressure. |
34dc7c2f | 4284 | */ |
b128c09f BB |
4285 | if (arc_reclaim_needed()) { |
4286 | ARCSTAT_BUMP(arcstat_l2_abort_lowmem); | |
4287 | spa_config_exit(spa, SCL_L2ARC, dev); | |
34dc7c2f BB |
4288 | continue; |
4289 | } | |
b128c09f | 4290 | |
34dc7c2f BB |
4291 | ARCSTAT_BUMP(arcstat_l2_feeds); |
4292 | ||
b128c09f BB |
4293 | size = dev->l2ad_write; |
4294 | if (arc_warm == B_FALSE) | |
4295 | size += dev->l2ad_boost; | |
4296 | ||
34dc7c2f BB |
4297 | /* |
4298 | * Evict L2ARC buffers that will be overwritten. | |
4299 | */ | |
b128c09f | 4300 | l2arc_evict(dev, size, B_FALSE); |
34dc7c2f BB |
4301 | |
4302 | /* | |
4303 | * Write ARC buffers. | |
4304 | */ | |
b128c09f BB |
4305 | l2arc_write_buffers(spa, dev, size); |
4306 | spa_config_exit(spa, SCL_L2ARC, dev); | |
34dc7c2f BB |
4307 | } |
4308 | ||
4309 | l2arc_thread_exit = 0; | |
4310 | cv_broadcast(&l2arc_feed_thr_cv); | |
4311 | CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ | |
4312 | thread_exit(); | |
4313 | } | |
4314 | ||
b128c09f BB |
4315 | boolean_t |
4316 | l2arc_vdev_present(vdev_t *vd) | |
4317 | { | |
4318 | l2arc_dev_t *dev; | |
4319 | ||
4320 | mutex_enter(&l2arc_dev_mtx); | |
4321 | for (dev = list_head(l2arc_dev_list); dev != NULL; | |
4322 | dev = list_next(l2arc_dev_list, dev)) { | |
4323 | if (dev->l2ad_vdev == vd) | |
4324 | break; | |
4325 | } | |
4326 | mutex_exit(&l2arc_dev_mtx); | |
4327 | ||
4328 | return (dev != NULL); | |
4329 | } | |
4330 | ||
34dc7c2f BB |
4331 | /* |
4332 | * Add a vdev for use by the L2ARC. By this point the spa has already | |
4333 | * validated the vdev and opened it. | |
4334 | */ | |
4335 | void | |
4336 | l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) | |
4337 | { | |
4338 | l2arc_dev_t *adddev; | |
4339 | ||
b128c09f BB |
4340 | ASSERT(!l2arc_vdev_present(vd)); |
4341 | ||
34dc7c2f BB |
4342 | /* |
4343 | * Create a new l2arc device entry. | |
4344 | */ | |
4345 | adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); | |
4346 | adddev->l2ad_spa = spa; | |
4347 | adddev->l2ad_vdev = vd; | |
4348 | adddev->l2ad_write = l2arc_write_max; | |
b128c09f | 4349 | adddev->l2ad_boost = l2arc_write_boost; |
34dc7c2f BB |
4350 | adddev->l2ad_start = start; |
4351 | adddev->l2ad_end = end; | |
4352 | adddev->l2ad_hand = adddev->l2ad_start; | |
4353 | adddev->l2ad_evict = adddev->l2ad_start; | |
4354 | adddev->l2ad_first = B_TRUE; | |
4355 | ASSERT3U(adddev->l2ad_write, >, 0); | |
4356 | ||
4357 | /* | |
4358 | * This is a list of all ARC buffers that are still valid on the | |
4359 | * device. | |
4360 | */ | |
4361 | adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); | |
4362 | list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), | |
4363 | offsetof(arc_buf_hdr_t, b_l2node)); | |
4364 | ||
4365 | spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); | |
4366 | ||
4367 | /* | |
4368 | * Add device to global list | |
4369 | */ | |
4370 | mutex_enter(&l2arc_dev_mtx); | |
4371 | list_insert_head(l2arc_dev_list, adddev); | |
4372 | atomic_inc_64(&l2arc_ndev); | |
4373 | mutex_exit(&l2arc_dev_mtx); | |
4374 | } | |
4375 | ||
4376 | /* | |
4377 | * Remove a vdev from the L2ARC. | |
4378 | */ | |
4379 | void | |
4380 | l2arc_remove_vdev(vdev_t *vd) | |
4381 | { | |
4382 | l2arc_dev_t *dev, *nextdev, *remdev = NULL; | |
4383 | ||
34dc7c2f BB |
4384 | /* |
4385 | * Find the device by vdev | |
4386 | */ | |
4387 | mutex_enter(&l2arc_dev_mtx); | |
4388 | for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { | |
4389 | nextdev = list_next(l2arc_dev_list, dev); | |
4390 | if (vd == dev->l2ad_vdev) { | |
4391 | remdev = dev; | |
4392 | break; | |
4393 | } | |
4394 | } | |
4395 | ASSERT(remdev != NULL); | |
4396 | ||
4397 | /* | |
4398 | * Remove device from global list | |
4399 | */ | |
4400 | list_remove(l2arc_dev_list, remdev); | |
4401 | l2arc_dev_last = NULL; /* may have been invalidated */ | |
b128c09f BB |
4402 | atomic_dec_64(&l2arc_ndev); |
4403 | mutex_exit(&l2arc_dev_mtx); | |
34dc7c2f BB |
4404 | |
4405 | /* | |
4406 | * Clear all buflists and ARC references. L2ARC device flush. | |
4407 | */ | |
4408 | l2arc_evict(remdev, 0, B_TRUE); | |
4409 | list_destroy(remdev->l2ad_buflist); | |
4410 | kmem_free(remdev->l2ad_buflist, sizeof (list_t)); | |
4411 | kmem_free(remdev, sizeof (l2arc_dev_t)); | |
34dc7c2f BB |
4412 | } |
4413 | ||
4414 | void | |
b128c09f | 4415 | l2arc_init(void) |
34dc7c2f BB |
4416 | { |
4417 | l2arc_thread_exit = 0; | |
4418 | l2arc_ndev = 0; | |
4419 | l2arc_writes_sent = 0; | |
4420 | l2arc_writes_done = 0; | |
4421 | ||
4422 | mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); | |
4423 | cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); | |
4424 | mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4425 | mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4426 | mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); | |
4427 | ||
4428 | l2arc_dev_list = &L2ARC_dev_list; | |
4429 | l2arc_free_on_write = &L2ARC_free_on_write; | |
4430 | list_create(l2arc_dev_list, sizeof (l2arc_dev_t), | |
4431 | offsetof(l2arc_dev_t, l2ad_node)); | |
4432 | list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), | |
4433 | offsetof(l2arc_data_free_t, l2df_list_node)); | |
34dc7c2f BB |
4434 | } |
4435 | ||
4436 | void | |
b128c09f | 4437 | l2arc_fini(void) |
34dc7c2f | 4438 | { |
b128c09f BB |
4439 | /* |
4440 | * This is called from dmu_fini(), which is called from spa_fini(); | |
4441 | * Because of this, we can assume that all l2arc devices have | |
4442 | * already been removed when the pools themselves were removed. | |
4443 | */ | |
4444 | ||
4445 | l2arc_do_free_on_write(); | |
34dc7c2f BB |
4446 | |
4447 | mutex_destroy(&l2arc_feed_thr_lock); | |
4448 | cv_destroy(&l2arc_feed_thr_cv); | |
4449 | mutex_destroy(&l2arc_dev_mtx); | |
4450 | mutex_destroy(&l2arc_buflist_mtx); | |
4451 | mutex_destroy(&l2arc_free_on_write_mtx); | |
4452 | ||
4453 | list_destroy(l2arc_dev_list); | |
4454 | list_destroy(l2arc_free_on_write); | |
4455 | } | |
b128c09f BB |
4456 | |
4457 | void | |
4458 | l2arc_start(void) | |
4459 | { | |
4460 | if (!(spa_mode & FWRITE)) | |
4461 | return; | |
4462 | ||
4463 | (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, | |
4464 | TS_RUN, minclsyspri); | |
4465 | } | |
4466 | ||
4467 | void | |
4468 | l2arc_stop(void) | |
4469 | { | |
4470 | if (!(spa_mode & FWRITE)) | |
4471 | return; | |
4472 | ||
4473 | mutex_enter(&l2arc_feed_thr_lock); | |
4474 | cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ | |
4475 | l2arc_thread_exit = 1; | |
4476 | while (l2arc_thread_exit != 0) | |
4477 | cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); | |
4478 | mutex_exit(&l2arc_feed_thr_lock); | |
4479 | } |