]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/arc.c
Fix gcc missing braces warnings
[mirror_zfs.git] / module / zfs / arc.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * DVA-based Adjustable Replacement Cache
27 *
28 * While much of the theory of operation used here is
29 * based on the self-tuning, low overhead replacement cache
30 * presented by Megiddo and Modha at FAST 2003, there are some
31 * significant differences:
32 *
33 * 1. The Megiddo and Modha model assumes any page is evictable.
34 * Pages in its cache cannot be "locked" into memory. This makes
35 * the eviction algorithm simple: evict the last page in the list.
36 * This also make the performance characteristics easy to reason
37 * about. Our cache is not so simple. At any given moment, some
38 * subset of the blocks in the cache are un-evictable because we
39 * have handed out a reference to them. Blocks are only evictable
40 * when there are no external references active. This makes
41 * eviction far more problematic: we choose to evict the evictable
42 * blocks that are the "lowest" in the list.
43 *
44 * There are times when it is not possible to evict the requested
45 * space. In these circumstances we are unable to adjust the cache
46 * size. To prevent the cache growing unbounded at these times we
47 * implement a "cache throttle" that slows the flow of new data
48 * into the cache until we can make space available.
49 *
50 * 2. The Megiddo and Modha model assumes a fixed cache size.
51 * Pages are evicted when the cache is full and there is a cache
52 * miss. Our model has a variable sized cache. It grows with
53 * high use, but also tries to react to memory pressure from the
54 * operating system: decreasing its size when system memory is
55 * tight.
56 *
57 * 3. The Megiddo and Modha model assumes a fixed page size. All
58 * elements of the cache are therefor exactly the same size. So
59 * when adjusting the cache size following a cache miss, its simply
60 * a matter of choosing a single page to evict. In our model, we
61 * have variable sized cache blocks (rangeing from 512 bytes to
62 * 128K bytes). We therefor choose a set of blocks to evict to make
63 * space for a cache miss that approximates as closely as possible
64 * the space used by the new block.
65 *
66 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
67 * by N. Megiddo & D. Modha, FAST 2003
68 */
69
70 /*
71 * The locking model:
72 *
73 * A new reference to a cache buffer can be obtained in two
74 * ways: 1) via a hash table lookup using the DVA as a key,
75 * or 2) via one of the ARC lists. The arc_read() interface
76 * uses method 1, while the internal arc algorithms for
77 * adjusting the cache use method 2. We therefor provide two
78 * types of locks: 1) the hash table lock array, and 2) the
79 * arc list locks.
80 *
81 * Buffers do not have their own mutexs, rather they rely on the
82 * hash table mutexs for the bulk of their protection (i.e. most
83 * fields in the arc_buf_hdr_t are protected by these mutexs).
84 *
85 * buf_hash_find() returns the appropriate mutex (held) when it
86 * locates the requested buffer in the hash table. It returns
87 * NULL for the mutex if the buffer was not in the table.
88 *
89 * buf_hash_remove() expects the appropriate hash mutex to be
90 * already held before it is invoked.
91 *
92 * Each arc state also has a mutex which is used to protect the
93 * buffer list associated with the state. When attempting to
94 * obtain a hash table lock while holding an arc list lock you
95 * must use: mutex_tryenter() to avoid deadlock. Also note that
96 * the active state mutex must be held before the ghost state mutex.
97 *
98 * Arc buffers may have an associated eviction callback function.
99 * This function will be invoked prior to removing the buffer (e.g.
100 * in arc_do_user_evicts()). Note however that the data associated
101 * with the buffer may be evicted prior to the callback. The callback
102 * must be made with *no locks held* (to prevent deadlock). Additionally,
103 * the users of callbacks must ensure that their private data is
104 * protected from simultaneous callbacks from arc_buf_evict()
105 * and arc_do_user_evicts().
106 *
107 * Note that the majority of the performance stats are manipulated
108 * with atomic operations.
109 *
110 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
111 *
112 * - L2ARC buflist creation
113 * - L2ARC buflist eviction
114 * - L2ARC write completion, which walks L2ARC buflists
115 * - ARC header destruction, as it removes from L2ARC buflists
116 * - ARC header release, as it removes from L2ARC buflists
117 */
118
119 #include <sys/spa.h>
120 #include <sys/zio.h>
121 #include <sys/zfs_context.h>
122 #include <sys/arc.h>
123 #include <sys/refcount.h>
124 #include <sys/vdev.h>
125 #include <sys/vdev_impl.h>
126 #ifdef _KERNEL
127 #include <sys/vmsystm.h>
128 #include <vm/anon.h>
129 #include <sys/fs/swapnode.h>
130 #include <sys/dnlc.h>
131 #endif
132 #include <sys/callb.h>
133 #include <sys/kstat.h>
134 #include <zfs_fletcher.h>
135
136 static kmutex_t arc_reclaim_thr_lock;
137 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
138 static uint8_t arc_thread_exit;
139
140 extern int zfs_write_limit_shift;
141 extern uint64_t zfs_write_limit_max;
142 extern kmutex_t zfs_write_limit_lock;
143
144 #define ARC_REDUCE_DNLC_PERCENT 3
145 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
146
147 typedef enum arc_reclaim_strategy {
148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
150 } arc_reclaim_strategy_t;
151
152 /* number of seconds before growing cache again */
153 static int arc_grow_retry = 60;
154
155 /* shift of arc_c for calculating both min and max arc_p */
156 static int arc_p_min_shift = 4;
157
158 /* log2(fraction of arc to reclaim) */
159 static int arc_shrink_shift = 5;
160
161 /*
162 * minimum lifespan of a prefetch block in clock ticks
163 * (initialized in arc_init())
164 */
165 static int arc_min_prefetch_lifespan;
166
167 static int arc_dead;
168
169 /*
170 * The arc has filled available memory and has now warmed up.
171 */
172 static boolean_t arc_warm;
173
174 /*
175 * These tunables are for performance analysis.
176 */
177 uint64_t zfs_arc_max;
178 uint64_t zfs_arc_min;
179 uint64_t zfs_arc_meta_limit = 0;
180 int zfs_arc_grow_retry = 0;
181 int zfs_arc_shrink_shift = 0;
182 int zfs_arc_p_min_shift = 0;
183
184 /*
185 * Note that buffers can be in one of 6 states:
186 * ARC_anon - anonymous (discussed below)
187 * ARC_mru - recently used, currently cached
188 * ARC_mru_ghost - recentely used, no longer in cache
189 * ARC_mfu - frequently used, currently cached
190 * ARC_mfu_ghost - frequently used, no longer in cache
191 * ARC_l2c_only - exists in L2ARC but not other states
192 * When there are no active references to the buffer, they are
193 * are linked onto a list in one of these arc states. These are
194 * the only buffers that can be evicted or deleted. Within each
195 * state there are multiple lists, one for meta-data and one for
196 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
197 * etc.) is tracked separately so that it can be managed more
198 * explicitly: favored over data, limited explicitly.
199 *
200 * Anonymous buffers are buffers that are not associated with
201 * a DVA. These are buffers that hold dirty block copies
202 * before they are written to stable storage. By definition,
203 * they are "ref'd" and are considered part of arc_mru
204 * that cannot be freed. Generally, they will aquire a DVA
205 * as they are written and migrate onto the arc_mru list.
206 *
207 * The ARC_l2c_only state is for buffers that are in the second
208 * level ARC but no longer in any of the ARC_m* lists. The second
209 * level ARC itself may also contain buffers that are in any of
210 * the ARC_m* states - meaning that a buffer can exist in two
211 * places. The reason for the ARC_l2c_only state is to keep the
212 * buffer header in the hash table, so that reads that hit the
213 * second level ARC benefit from these fast lookups.
214 */
215
216 typedef struct arc_state {
217 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
218 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
219 uint64_t arcs_size; /* total amount of data in this state */
220 kmutex_t arcs_mtx;
221 } arc_state_t;
222
223 /* The 6 states: */
224 static arc_state_t ARC_anon;
225 static arc_state_t ARC_mru;
226 static arc_state_t ARC_mru_ghost;
227 static arc_state_t ARC_mfu;
228 static arc_state_t ARC_mfu_ghost;
229 static arc_state_t ARC_l2c_only;
230
231 typedef struct arc_stats {
232 kstat_named_t arcstat_hits;
233 kstat_named_t arcstat_misses;
234 kstat_named_t arcstat_demand_data_hits;
235 kstat_named_t arcstat_demand_data_misses;
236 kstat_named_t arcstat_demand_metadata_hits;
237 kstat_named_t arcstat_demand_metadata_misses;
238 kstat_named_t arcstat_prefetch_data_hits;
239 kstat_named_t arcstat_prefetch_data_misses;
240 kstat_named_t arcstat_prefetch_metadata_hits;
241 kstat_named_t arcstat_prefetch_metadata_misses;
242 kstat_named_t arcstat_mru_hits;
243 kstat_named_t arcstat_mru_ghost_hits;
244 kstat_named_t arcstat_mfu_hits;
245 kstat_named_t arcstat_mfu_ghost_hits;
246 kstat_named_t arcstat_deleted;
247 kstat_named_t arcstat_recycle_miss;
248 kstat_named_t arcstat_mutex_miss;
249 kstat_named_t arcstat_evict_skip;
250 kstat_named_t arcstat_evict_l2_cached;
251 kstat_named_t arcstat_evict_l2_eligible;
252 kstat_named_t arcstat_evict_l2_ineligible;
253 kstat_named_t arcstat_hash_elements;
254 kstat_named_t arcstat_hash_elements_max;
255 kstat_named_t arcstat_hash_collisions;
256 kstat_named_t arcstat_hash_chains;
257 kstat_named_t arcstat_hash_chain_max;
258 kstat_named_t arcstat_p;
259 kstat_named_t arcstat_c;
260 kstat_named_t arcstat_c_min;
261 kstat_named_t arcstat_c_max;
262 kstat_named_t arcstat_size;
263 kstat_named_t arcstat_hdr_size;
264 kstat_named_t arcstat_data_size;
265 kstat_named_t arcstat_other_size;
266 kstat_named_t arcstat_l2_hits;
267 kstat_named_t arcstat_l2_misses;
268 kstat_named_t arcstat_l2_feeds;
269 kstat_named_t arcstat_l2_rw_clash;
270 kstat_named_t arcstat_l2_read_bytes;
271 kstat_named_t arcstat_l2_write_bytes;
272 kstat_named_t arcstat_l2_writes_sent;
273 kstat_named_t arcstat_l2_writes_done;
274 kstat_named_t arcstat_l2_writes_error;
275 kstat_named_t arcstat_l2_writes_hdr_miss;
276 kstat_named_t arcstat_l2_evict_lock_retry;
277 kstat_named_t arcstat_l2_evict_reading;
278 kstat_named_t arcstat_l2_free_on_write;
279 kstat_named_t arcstat_l2_abort_lowmem;
280 kstat_named_t arcstat_l2_cksum_bad;
281 kstat_named_t arcstat_l2_io_error;
282 kstat_named_t arcstat_l2_size;
283 kstat_named_t arcstat_l2_hdr_size;
284 kstat_named_t arcstat_memory_throttle_count;
285 } arc_stats_t;
286
287 static arc_stats_t arc_stats = {
288 { "hits", KSTAT_DATA_UINT64 },
289 { "misses", KSTAT_DATA_UINT64 },
290 { "demand_data_hits", KSTAT_DATA_UINT64 },
291 { "demand_data_misses", KSTAT_DATA_UINT64 },
292 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
293 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
294 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
295 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
296 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
297 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
298 { "mru_hits", KSTAT_DATA_UINT64 },
299 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
300 { "mfu_hits", KSTAT_DATA_UINT64 },
301 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
302 { "deleted", KSTAT_DATA_UINT64 },
303 { "recycle_miss", KSTAT_DATA_UINT64 },
304 { "mutex_miss", KSTAT_DATA_UINT64 },
305 { "evict_skip", KSTAT_DATA_UINT64 },
306 { "evict_l2_cached", KSTAT_DATA_UINT64 },
307 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
308 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
309 { "hash_elements", KSTAT_DATA_UINT64 },
310 { "hash_elements_max", KSTAT_DATA_UINT64 },
311 { "hash_collisions", KSTAT_DATA_UINT64 },
312 { "hash_chains", KSTAT_DATA_UINT64 },
313 { "hash_chain_max", KSTAT_DATA_UINT64 },
314 { "p", KSTAT_DATA_UINT64 },
315 { "c", KSTAT_DATA_UINT64 },
316 { "c_min", KSTAT_DATA_UINT64 },
317 { "c_max", KSTAT_DATA_UINT64 },
318 { "size", KSTAT_DATA_UINT64 },
319 { "hdr_size", KSTAT_DATA_UINT64 },
320 { "data_size", KSTAT_DATA_UINT64 },
321 { "other_size", KSTAT_DATA_UINT64 },
322 { "l2_hits", KSTAT_DATA_UINT64 },
323 { "l2_misses", KSTAT_DATA_UINT64 },
324 { "l2_feeds", KSTAT_DATA_UINT64 },
325 { "l2_rw_clash", KSTAT_DATA_UINT64 },
326 { "l2_read_bytes", KSTAT_DATA_UINT64 },
327 { "l2_write_bytes", KSTAT_DATA_UINT64 },
328 { "l2_writes_sent", KSTAT_DATA_UINT64 },
329 { "l2_writes_done", KSTAT_DATA_UINT64 },
330 { "l2_writes_error", KSTAT_DATA_UINT64 },
331 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
332 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
333 { "l2_evict_reading", KSTAT_DATA_UINT64 },
334 { "l2_free_on_write", KSTAT_DATA_UINT64 },
335 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
336 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
337 { "l2_io_error", KSTAT_DATA_UINT64 },
338 { "l2_size", KSTAT_DATA_UINT64 },
339 { "l2_hdr_size", KSTAT_DATA_UINT64 },
340 { "memory_throttle_count", KSTAT_DATA_UINT64 }
341 };
342
343 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
344
345 #define ARCSTAT_INCR(stat, val) \
346 atomic_add_64(&arc_stats.stat.value.ui64, (val));
347
348 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
349 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
350
351 #define ARCSTAT_MAX(stat, val) { \
352 uint64_t m; \
353 while ((val) > (m = arc_stats.stat.value.ui64) && \
354 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
355 continue; \
356 }
357
358 #define ARCSTAT_MAXSTAT(stat) \
359 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
360
361 /*
362 * We define a macro to allow ARC hits/misses to be easily broken down by
363 * two separate conditions, giving a total of four different subtypes for
364 * each of hits and misses (so eight statistics total).
365 */
366 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
367 if (cond1) { \
368 if (cond2) { \
369 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
370 } else { \
371 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
372 } \
373 } else { \
374 if (cond2) { \
375 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
376 } else { \
377 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
378 } \
379 }
380
381 kstat_t *arc_ksp;
382 static arc_state_t *arc_anon;
383 static arc_state_t *arc_mru;
384 static arc_state_t *arc_mru_ghost;
385 static arc_state_t *arc_mfu;
386 static arc_state_t *arc_mfu_ghost;
387 static arc_state_t *arc_l2c_only;
388
389 /*
390 * There are several ARC variables that are critical to export as kstats --
391 * but we don't want to have to grovel around in the kstat whenever we wish to
392 * manipulate them. For these variables, we therefore define them to be in
393 * terms of the statistic variable. This assures that we are not introducing
394 * the possibility of inconsistency by having shadow copies of the variables,
395 * while still allowing the code to be readable.
396 */
397 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
398 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
399 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
400 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
401 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
402
403 static int arc_no_grow; /* Don't try to grow cache size */
404 static uint64_t arc_tempreserve;
405 static uint64_t arc_loaned_bytes;
406 static uint64_t arc_meta_used;
407 static uint64_t arc_meta_limit;
408 static uint64_t arc_meta_max = 0;
409
410 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
411
412 typedef struct arc_callback arc_callback_t;
413
414 struct arc_callback {
415 void *acb_private;
416 arc_done_func_t *acb_done;
417 arc_buf_t *acb_buf;
418 zio_t *acb_zio_dummy;
419 arc_callback_t *acb_next;
420 };
421
422 typedef struct arc_write_callback arc_write_callback_t;
423
424 struct arc_write_callback {
425 void *awcb_private;
426 arc_done_func_t *awcb_ready;
427 arc_done_func_t *awcb_done;
428 arc_buf_t *awcb_buf;
429 };
430
431 struct arc_buf_hdr {
432 /* protected by hash lock */
433 dva_t b_dva;
434 uint64_t b_birth;
435 uint64_t b_cksum0;
436
437 kmutex_t b_freeze_lock;
438 zio_cksum_t *b_freeze_cksum;
439 void *b_thawed;
440
441 arc_buf_hdr_t *b_hash_next;
442 arc_buf_t *b_buf;
443 uint32_t b_flags;
444 uint32_t b_datacnt;
445
446 arc_callback_t *b_acb;
447 kcondvar_t b_cv;
448
449 /* immutable */
450 arc_buf_contents_t b_type;
451 uint64_t b_size;
452 uint64_t b_spa;
453
454 /* protected by arc state mutex */
455 arc_state_t *b_state;
456 list_node_t b_arc_node;
457
458 /* updated atomically */
459 clock_t b_arc_access;
460
461 /* self protecting */
462 refcount_t b_refcnt;
463
464 l2arc_buf_hdr_t *b_l2hdr;
465 list_node_t b_l2node;
466 };
467
468 static arc_buf_t *arc_eviction_list;
469 static kmutex_t arc_eviction_mtx;
470 static arc_buf_hdr_t arc_eviction_hdr;
471 static void arc_get_data_buf(arc_buf_t *buf);
472 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
473 static int arc_evict_needed(arc_buf_contents_t type);
474 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
475
476 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
477
478 #define GHOST_STATE(state) \
479 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
480 (state) == arc_l2c_only)
481
482 /*
483 * Private ARC flags. These flags are private ARC only flags that will show up
484 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
485 * be passed in as arc_flags in things like arc_read. However, these flags
486 * should never be passed and should only be set by ARC code. When adding new
487 * public flags, make sure not to smash the private ones.
488 */
489
490 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
491 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
492 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
493 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
494 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
495 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
496 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
497 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
498 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
499 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
500
501 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
502 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
503 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
504 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
505 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
506 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
507 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
508 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
509 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
510 (hdr)->b_l2hdr != NULL)
511 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
512 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
513 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
514
515 /*
516 * Other sizes
517 */
518
519 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
520 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
521
522 /*
523 * Hash table routines
524 */
525
526 #define HT_LOCK_PAD 64
527
528 struct ht_lock {
529 kmutex_t ht_lock;
530 #ifdef _KERNEL
531 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
532 #endif
533 };
534
535 #define BUF_LOCKS 256
536 typedef struct buf_hash_table {
537 uint64_t ht_mask;
538 arc_buf_hdr_t **ht_table;
539 struct ht_lock ht_locks[BUF_LOCKS];
540 } buf_hash_table_t;
541
542 static buf_hash_table_t buf_hash_table;
543
544 #define BUF_HASH_INDEX(spa, dva, birth) \
545 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
546 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
547 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
548 #define HDR_LOCK(hdr) \
549 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
550
551 uint64_t zfs_crc64_table[256];
552
553 /*
554 * Level 2 ARC
555 */
556
557 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
558 #define L2ARC_HEADROOM 2 /* num of writes */
559 #define L2ARC_FEED_SECS 1 /* caching interval secs */
560 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
561
562 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
563 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
564
565 /*
566 * L2ARC Performance Tunables
567 */
568 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
569 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
570 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
571 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
572 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
573 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
574 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
575 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
576
577 /*
578 * L2ARC Internals
579 */
580 typedef struct l2arc_dev {
581 vdev_t *l2ad_vdev; /* vdev */
582 spa_t *l2ad_spa; /* spa */
583 uint64_t l2ad_hand; /* next write location */
584 uint64_t l2ad_write; /* desired write size, bytes */
585 uint64_t l2ad_boost; /* warmup write boost, bytes */
586 uint64_t l2ad_start; /* first addr on device */
587 uint64_t l2ad_end; /* last addr on device */
588 uint64_t l2ad_evict; /* last addr eviction reached */
589 boolean_t l2ad_first; /* first sweep through */
590 boolean_t l2ad_writing; /* currently writing */
591 list_t *l2ad_buflist; /* buffer list */
592 list_node_t l2ad_node; /* device list node */
593 } l2arc_dev_t;
594
595 static list_t L2ARC_dev_list; /* device list */
596 static list_t *l2arc_dev_list; /* device list pointer */
597 static kmutex_t l2arc_dev_mtx; /* device list mutex */
598 static l2arc_dev_t *l2arc_dev_last; /* last device used */
599 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
600 static list_t L2ARC_free_on_write; /* free after write buf list */
601 static list_t *l2arc_free_on_write; /* free after write list ptr */
602 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
603 static uint64_t l2arc_ndev; /* number of devices */
604
605 typedef struct l2arc_read_callback {
606 arc_buf_t *l2rcb_buf; /* read buffer */
607 spa_t *l2rcb_spa; /* spa */
608 blkptr_t l2rcb_bp; /* original blkptr */
609 zbookmark_t l2rcb_zb; /* original bookmark */
610 int l2rcb_flags; /* original flags */
611 } l2arc_read_callback_t;
612
613 typedef struct l2arc_write_callback {
614 l2arc_dev_t *l2wcb_dev; /* device info */
615 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
616 } l2arc_write_callback_t;
617
618 struct l2arc_buf_hdr {
619 /* protected by arc_buf_hdr mutex */
620 l2arc_dev_t *b_dev; /* L2ARC device */
621 uint64_t b_daddr; /* disk address, offset byte */
622 };
623
624 typedef struct l2arc_data_free {
625 /* protected by l2arc_free_on_write_mtx */
626 void *l2df_data;
627 size_t l2df_size;
628 void (*l2df_func)(void *, size_t);
629 list_node_t l2df_list_node;
630 } l2arc_data_free_t;
631
632 static kmutex_t l2arc_feed_thr_lock;
633 static kcondvar_t l2arc_feed_thr_cv;
634 static uint8_t l2arc_thread_exit;
635
636 static void l2arc_read_done(zio_t *zio);
637 static void l2arc_hdr_stat_add(void);
638 static void l2arc_hdr_stat_remove(void);
639
640 static uint64_t
641 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
642 {
643 uint8_t *vdva = (uint8_t *)dva;
644 uint64_t crc = -1ULL;
645 int i;
646
647 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
648
649 for (i = 0; i < sizeof (dva_t); i++)
650 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
651
652 crc ^= (spa>>8) ^ birth;
653
654 return (crc);
655 }
656
657 #define BUF_EMPTY(buf) \
658 ((buf)->b_dva.dva_word[0] == 0 && \
659 (buf)->b_dva.dva_word[1] == 0 && \
660 (buf)->b_birth == 0)
661
662 #define BUF_EQUAL(spa, dva, birth, buf) \
663 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
664 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
665 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
666
667 static void
668 buf_discard_identity(arc_buf_hdr_t *hdr)
669 {
670 hdr->b_dva.dva_word[0] = 0;
671 hdr->b_dva.dva_word[1] = 0;
672 hdr->b_birth = 0;
673 hdr->b_cksum0 = 0;
674 }
675
676 static arc_buf_hdr_t *
677 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
678 {
679 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
680 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
681 arc_buf_hdr_t *buf;
682
683 mutex_enter(hash_lock);
684 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
685 buf = buf->b_hash_next) {
686 if (BUF_EQUAL(spa, dva, birth, buf)) {
687 *lockp = hash_lock;
688 return (buf);
689 }
690 }
691 mutex_exit(hash_lock);
692 *lockp = NULL;
693 return (NULL);
694 }
695
696 /*
697 * Insert an entry into the hash table. If there is already an element
698 * equal to elem in the hash table, then the already existing element
699 * will be returned and the new element will not be inserted.
700 * Otherwise returns NULL.
701 */
702 static arc_buf_hdr_t *
703 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
704 {
705 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
706 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
707 arc_buf_hdr_t *fbuf;
708 uint32_t i;
709
710 ASSERT(!HDR_IN_HASH_TABLE(buf));
711 *lockp = hash_lock;
712 mutex_enter(hash_lock);
713 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
714 fbuf = fbuf->b_hash_next, i++) {
715 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
716 return (fbuf);
717 }
718
719 buf->b_hash_next = buf_hash_table.ht_table[idx];
720 buf_hash_table.ht_table[idx] = buf;
721 buf->b_flags |= ARC_IN_HASH_TABLE;
722
723 /* collect some hash table performance data */
724 if (i > 0) {
725 ARCSTAT_BUMP(arcstat_hash_collisions);
726 if (i == 1)
727 ARCSTAT_BUMP(arcstat_hash_chains);
728
729 ARCSTAT_MAX(arcstat_hash_chain_max, i);
730 }
731
732 ARCSTAT_BUMP(arcstat_hash_elements);
733 ARCSTAT_MAXSTAT(arcstat_hash_elements);
734
735 return (NULL);
736 }
737
738 static void
739 buf_hash_remove(arc_buf_hdr_t *buf)
740 {
741 arc_buf_hdr_t *fbuf, **bufp;
742 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
743
744 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
745 ASSERT(HDR_IN_HASH_TABLE(buf));
746
747 bufp = &buf_hash_table.ht_table[idx];
748 while ((fbuf = *bufp) != buf) {
749 ASSERT(fbuf != NULL);
750 bufp = &fbuf->b_hash_next;
751 }
752 *bufp = buf->b_hash_next;
753 buf->b_hash_next = NULL;
754 buf->b_flags &= ~ARC_IN_HASH_TABLE;
755
756 /* collect some hash table performance data */
757 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
758
759 if (buf_hash_table.ht_table[idx] &&
760 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
761 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
762 }
763
764 /*
765 * Global data structures and functions for the buf kmem cache.
766 */
767 static kmem_cache_t *hdr_cache;
768 static kmem_cache_t *buf_cache;
769
770 static void
771 buf_fini(void)
772 {
773 int i;
774
775 kmem_free(buf_hash_table.ht_table,
776 (buf_hash_table.ht_mask + 1) * sizeof (void *));
777 for (i = 0; i < BUF_LOCKS; i++)
778 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
779 kmem_cache_destroy(hdr_cache);
780 kmem_cache_destroy(buf_cache);
781 }
782
783 /*
784 * Constructor callback - called when the cache is empty
785 * and a new buf is requested.
786 */
787 /* ARGSUSED */
788 static int
789 hdr_cons(void *vbuf, void *unused, int kmflag)
790 {
791 arc_buf_hdr_t *buf = vbuf;
792
793 bzero(buf, sizeof (arc_buf_hdr_t));
794 refcount_create(&buf->b_refcnt);
795 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
796 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
797 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
798
799 return (0);
800 }
801
802 /* ARGSUSED */
803 static int
804 buf_cons(void *vbuf, void *unused, int kmflag)
805 {
806 arc_buf_t *buf = vbuf;
807
808 bzero(buf, sizeof (arc_buf_t));
809 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
810 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL);
811 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
812
813 return (0);
814 }
815
816 /*
817 * Destructor callback - called when a cached buf is
818 * no longer required.
819 */
820 /* ARGSUSED */
821 static void
822 hdr_dest(void *vbuf, void *unused)
823 {
824 arc_buf_hdr_t *buf = vbuf;
825
826 ASSERT(BUF_EMPTY(buf));
827 refcount_destroy(&buf->b_refcnt);
828 cv_destroy(&buf->b_cv);
829 mutex_destroy(&buf->b_freeze_lock);
830 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
831 }
832
833 /* ARGSUSED */
834 static void
835 buf_dest(void *vbuf, void *unused)
836 {
837 arc_buf_t *buf = vbuf;
838
839 mutex_destroy(&buf->b_evict_lock);
840 rw_destroy(&buf->b_data_lock);
841 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
842 }
843
844 /*
845 * Reclaim callback -- invoked when memory is low.
846 */
847 /* ARGSUSED */
848 static void
849 hdr_recl(void *unused)
850 {
851 dprintf("hdr_recl called\n");
852 /*
853 * umem calls the reclaim func when we destroy the buf cache,
854 * which is after we do arc_fini().
855 */
856 if (!arc_dead)
857 cv_signal(&arc_reclaim_thr_cv);
858 }
859
860 static void
861 buf_init(void)
862 {
863 uint64_t *ct;
864 uint64_t hsize = 1ULL << 12;
865 int i, j;
866
867 /*
868 * The hash table is big enough to fill all of physical memory
869 * with an average 64K block size. The table will take up
870 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
871 */
872 while (hsize * 65536 < physmem * PAGESIZE)
873 hsize <<= 1;
874 retry:
875 buf_hash_table.ht_mask = hsize - 1;
876 buf_hash_table.ht_table =
877 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
878 if (buf_hash_table.ht_table == NULL) {
879 ASSERT(hsize > (1ULL << 8));
880 hsize >>= 1;
881 goto retry;
882 }
883
884 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
885 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
886 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
887 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
888
889 for (i = 0; i < 256; i++)
890 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
891 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
892
893 for (i = 0; i < BUF_LOCKS; i++) {
894 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
895 NULL, MUTEX_DEFAULT, NULL);
896 }
897 }
898
899 #define ARC_MINTIME (hz>>4) /* 62 ms */
900
901 static void
902 arc_cksum_verify(arc_buf_t *buf)
903 {
904 zio_cksum_t zc;
905
906 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
907 return;
908
909 mutex_enter(&buf->b_hdr->b_freeze_lock);
910 if (buf->b_hdr->b_freeze_cksum == NULL ||
911 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
912 mutex_exit(&buf->b_hdr->b_freeze_lock);
913 return;
914 }
915 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
916 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
917 panic("buffer modified while frozen!");
918 mutex_exit(&buf->b_hdr->b_freeze_lock);
919 }
920
921 static int
922 arc_cksum_equal(arc_buf_t *buf)
923 {
924 zio_cksum_t zc;
925 int equal;
926
927 mutex_enter(&buf->b_hdr->b_freeze_lock);
928 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
929 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
930 mutex_exit(&buf->b_hdr->b_freeze_lock);
931
932 return (equal);
933 }
934
935 static void
936 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
937 {
938 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
939 return;
940
941 mutex_enter(&buf->b_hdr->b_freeze_lock);
942 if (buf->b_hdr->b_freeze_cksum != NULL) {
943 mutex_exit(&buf->b_hdr->b_freeze_lock);
944 return;
945 }
946 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
947 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
948 buf->b_hdr->b_freeze_cksum);
949 mutex_exit(&buf->b_hdr->b_freeze_lock);
950 }
951
952 void
953 arc_buf_thaw(arc_buf_t *buf)
954 {
955 if (zfs_flags & ZFS_DEBUG_MODIFY) {
956 if (buf->b_hdr->b_state != arc_anon)
957 panic("modifying non-anon buffer!");
958 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
959 panic("modifying buffer while i/o in progress!");
960 arc_cksum_verify(buf);
961 }
962
963 mutex_enter(&buf->b_hdr->b_freeze_lock);
964 if (buf->b_hdr->b_freeze_cksum != NULL) {
965 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
966 buf->b_hdr->b_freeze_cksum = NULL;
967 }
968
969 if (zfs_flags & ZFS_DEBUG_MODIFY) {
970 if (buf->b_hdr->b_thawed)
971 kmem_free(buf->b_hdr->b_thawed, 1);
972 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
973 }
974
975 mutex_exit(&buf->b_hdr->b_freeze_lock);
976 }
977
978 void
979 arc_buf_freeze(arc_buf_t *buf)
980 {
981 kmutex_t *hash_lock;
982
983 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
984 return;
985
986 hash_lock = HDR_LOCK(buf->b_hdr);
987 mutex_enter(hash_lock);
988
989 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
990 buf->b_hdr->b_state == arc_anon);
991 arc_cksum_compute(buf, B_FALSE);
992 mutex_exit(hash_lock);
993 }
994
995 static void
996 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
997 {
998 ASSERT(MUTEX_HELD(hash_lock));
999
1000 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1001 (ab->b_state != arc_anon)) {
1002 uint64_t delta = ab->b_size * ab->b_datacnt;
1003 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1004 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1005
1006 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1007 mutex_enter(&ab->b_state->arcs_mtx);
1008 ASSERT(list_link_active(&ab->b_arc_node));
1009 list_remove(list, ab);
1010 if (GHOST_STATE(ab->b_state)) {
1011 ASSERT3U(ab->b_datacnt, ==, 0);
1012 ASSERT3P(ab->b_buf, ==, NULL);
1013 delta = ab->b_size;
1014 }
1015 ASSERT(delta > 0);
1016 ASSERT3U(*size, >=, delta);
1017 atomic_add_64(size, -delta);
1018 mutex_exit(&ab->b_state->arcs_mtx);
1019 /* remove the prefetch flag if we get a reference */
1020 if (ab->b_flags & ARC_PREFETCH)
1021 ab->b_flags &= ~ARC_PREFETCH;
1022 }
1023 }
1024
1025 static int
1026 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1027 {
1028 int cnt;
1029 arc_state_t *state = ab->b_state;
1030
1031 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1032 ASSERT(!GHOST_STATE(state));
1033
1034 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1035 (state != arc_anon)) {
1036 uint64_t *size = &state->arcs_lsize[ab->b_type];
1037
1038 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1039 mutex_enter(&state->arcs_mtx);
1040 ASSERT(!list_link_active(&ab->b_arc_node));
1041 list_insert_head(&state->arcs_list[ab->b_type], ab);
1042 ASSERT(ab->b_datacnt > 0);
1043 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1044 mutex_exit(&state->arcs_mtx);
1045 }
1046 return (cnt);
1047 }
1048
1049 /*
1050 * Move the supplied buffer to the indicated state. The mutex
1051 * for the buffer must be held by the caller.
1052 */
1053 static void
1054 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1055 {
1056 arc_state_t *old_state = ab->b_state;
1057 int64_t refcnt = refcount_count(&ab->b_refcnt);
1058 uint64_t from_delta, to_delta;
1059
1060 ASSERT(MUTEX_HELD(hash_lock));
1061 ASSERT(new_state != old_state);
1062 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1063 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1064 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1065
1066 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1067
1068 /*
1069 * If this buffer is evictable, transfer it from the
1070 * old state list to the new state list.
1071 */
1072 if (refcnt == 0) {
1073 if (old_state != arc_anon) {
1074 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1075 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1076
1077 if (use_mutex)
1078 mutex_enter(&old_state->arcs_mtx);
1079
1080 ASSERT(list_link_active(&ab->b_arc_node));
1081 list_remove(&old_state->arcs_list[ab->b_type], ab);
1082
1083 /*
1084 * If prefetching out of the ghost cache,
1085 * we will have a non-zero datacnt.
1086 */
1087 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1088 /* ghost elements have a ghost size */
1089 ASSERT(ab->b_buf == NULL);
1090 from_delta = ab->b_size;
1091 }
1092 ASSERT3U(*size, >=, from_delta);
1093 atomic_add_64(size, -from_delta);
1094
1095 if (use_mutex)
1096 mutex_exit(&old_state->arcs_mtx);
1097 }
1098 if (new_state != arc_anon) {
1099 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1100 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1101
1102 if (use_mutex)
1103 mutex_enter(&new_state->arcs_mtx);
1104
1105 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1106
1107 /* ghost elements have a ghost size */
1108 if (GHOST_STATE(new_state)) {
1109 ASSERT(ab->b_datacnt == 0);
1110 ASSERT(ab->b_buf == NULL);
1111 to_delta = ab->b_size;
1112 }
1113 atomic_add_64(size, to_delta);
1114
1115 if (use_mutex)
1116 mutex_exit(&new_state->arcs_mtx);
1117 }
1118 }
1119
1120 ASSERT(!BUF_EMPTY(ab));
1121 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1122 buf_hash_remove(ab);
1123
1124 /* adjust state sizes */
1125 if (to_delta)
1126 atomic_add_64(&new_state->arcs_size, to_delta);
1127 if (from_delta) {
1128 ASSERT3U(old_state->arcs_size, >=, from_delta);
1129 atomic_add_64(&old_state->arcs_size, -from_delta);
1130 }
1131 ab->b_state = new_state;
1132
1133 /* adjust l2arc hdr stats */
1134 if (new_state == arc_l2c_only)
1135 l2arc_hdr_stat_add();
1136 else if (old_state == arc_l2c_only)
1137 l2arc_hdr_stat_remove();
1138 }
1139
1140 void
1141 arc_space_consume(uint64_t space, arc_space_type_t type)
1142 {
1143 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1144
1145 switch (type) {
1146 case ARC_SPACE_DATA:
1147 ARCSTAT_INCR(arcstat_data_size, space);
1148 break;
1149 case ARC_SPACE_OTHER:
1150 ARCSTAT_INCR(arcstat_other_size, space);
1151 break;
1152 case ARC_SPACE_HDRS:
1153 ARCSTAT_INCR(arcstat_hdr_size, space);
1154 break;
1155 case ARC_SPACE_L2HDRS:
1156 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1157 break;
1158 }
1159
1160 atomic_add_64(&arc_meta_used, space);
1161 atomic_add_64(&arc_size, space);
1162 }
1163
1164 void
1165 arc_space_return(uint64_t space, arc_space_type_t type)
1166 {
1167 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1168
1169 switch (type) {
1170 case ARC_SPACE_DATA:
1171 ARCSTAT_INCR(arcstat_data_size, -space);
1172 break;
1173 case ARC_SPACE_OTHER:
1174 ARCSTAT_INCR(arcstat_other_size, -space);
1175 break;
1176 case ARC_SPACE_HDRS:
1177 ARCSTAT_INCR(arcstat_hdr_size, -space);
1178 break;
1179 case ARC_SPACE_L2HDRS:
1180 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1181 break;
1182 }
1183
1184 ASSERT(arc_meta_used >= space);
1185 if (arc_meta_max < arc_meta_used)
1186 arc_meta_max = arc_meta_used;
1187 atomic_add_64(&arc_meta_used, -space);
1188 ASSERT(arc_size >= space);
1189 atomic_add_64(&arc_size, -space);
1190 }
1191
1192 void *
1193 arc_data_buf_alloc(uint64_t size)
1194 {
1195 if (arc_evict_needed(ARC_BUFC_DATA))
1196 cv_signal(&arc_reclaim_thr_cv);
1197 atomic_add_64(&arc_size, size);
1198 return (zio_data_buf_alloc(size));
1199 }
1200
1201 void
1202 arc_data_buf_free(void *buf, uint64_t size)
1203 {
1204 zio_data_buf_free(buf, size);
1205 ASSERT(arc_size >= size);
1206 atomic_add_64(&arc_size, -size);
1207 }
1208
1209 arc_buf_t *
1210 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1211 {
1212 arc_buf_hdr_t *hdr;
1213 arc_buf_t *buf;
1214
1215 ASSERT3U(size, >, 0);
1216 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1217 ASSERT(BUF_EMPTY(hdr));
1218 hdr->b_size = size;
1219 hdr->b_type = type;
1220 hdr->b_spa = spa_guid(spa);
1221 hdr->b_state = arc_anon;
1222 hdr->b_arc_access = 0;
1223 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1224 buf->b_hdr = hdr;
1225 buf->b_data = NULL;
1226 buf->b_efunc = NULL;
1227 buf->b_private = NULL;
1228 buf->b_next = NULL;
1229 hdr->b_buf = buf;
1230 arc_get_data_buf(buf);
1231 hdr->b_datacnt = 1;
1232 hdr->b_flags = 0;
1233 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1234 (void) refcount_add(&hdr->b_refcnt, tag);
1235
1236 return (buf);
1237 }
1238
1239 static char *arc_onloan_tag = "onloan";
1240
1241 /*
1242 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1243 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1244 * buffers must be returned to the arc before they can be used by the DMU or
1245 * freed.
1246 */
1247 arc_buf_t *
1248 arc_loan_buf(spa_t *spa, int size)
1249 {
1250 arc_buf_t *buf;
1251
1252 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1253
1254 atomic_add_64(&arc_loaned_bytes, size);
1255 return (buf);
1256 }
1257
1258 /*
1259 * Return a loaned arc buffer to the arc.
1260 */
1261 void
1262 arc_return_buf(arc_buf_t *buf, void *tag)
1263 {
1264 arc_buf_hdr_t *hdr = buf->b_hdr;
1265
1266 ASSERT(buf->b_data != NULL);
1267 (void) refcount_add(&hdr->b_refcnt, tag);
1268 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1269
1270 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1271 }
1272
1273 /* Detach an arc_buf from a dbuf (tag) */
1274 void
1275 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1276 {
1277 arc_buf_hdr_t *hdr;
1278
1279 ASSERT(buf->b_data != NULL);
1280 hdr = buf->b_hdr;
1281 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1282 (void) refcount_remove(&hdr->b_refcnt, tag);
1283 buf->b_efunc = NULL;
1284 buf->b_private = NULL;
1285
1286 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1287 }
1288
1289 static arc_buf_t *
1290 arc_buf_clone(arc_buf_t *from)
1291 {
1292 arc_buf_t *buf;
1293 arc_buf_hdr_t *hdr = from->b_hdr;
1294 uint64_t size = hdr->b_size;
1295
1296 ASSERT(hdr->b_state != arc_anon);
1297
1298 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1299 buf->b_hdr = hdr;
1300 buf->b_data = NULL;
1301 buf->b_efunc = NULL;
1302 buf->b_private = NULL;
1303 buf->b_next = hdr->b_buf;
1304 hdr->b_buf = buf;
1305 arc_get_data_buf(buf);
1306 bcopy(from->b_data, buf->b_data, size);
1307 hdr->b_datacnt += 1;
1308 return (buf);
1309 }
1310
1311 void
1312 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1313 {
1314 arc_buf_hdr_t *hdr;
1315 kmutex_t *hash_lock;
1316
1317 /*
1318 * Check to see if this buffer is evicted. Callers
1319 * must verify b_data != NULL to know if the add_ref
1320 * was successful.
1321 */
1322 mutex_enter(&buf->b_evict_lock);
1323 if (buf->b_data == NULL) {
1324 mutex_exit(&buf->b_evict_lock);
1325 return;
1326 }
1327 hash_lock = HDR_LOCK(buf->b_hdr);
1328 mutex_enter(hash_lock);
1329 hdr = buf->b_hdr;
1330 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1331 mutex_exit(&buf->b_evict_lock);
1332
1333 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1334 add_reference(hdr, hash_lock, tag);
1335 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1336 arc_access(hdr, hash_lock);
1337 mutex_exit(hash_lock);
1338 ARCSTAT_BUMP(arcstat_hits);
1339 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1340 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1341 data, metadata, hits);
1342 }
1343
1344 /*
1345 * Free the arc data buffer. If it is an l2arc write in progress,
1346 * the buffer is placed on l2arc_free_on_write to be freed later.
1347 */
1348 static void
1349 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1350 void *data, size_t size)
1351 {
1352 if (HDR_L2_WRITING(hdr)) {
1353 l2arc_data_free_t *df;
1354 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1355 df->l2df_data = data;
1356 df->l2df_size = size;
1357 df->l2df_func = free_func;
1358 mutex_enter(&l2arc_free_on_write_mtx);
1359 list_insert_head(l2arc_free_on_write, df);
1360 mutex_exit(&l2arc_free_on_write_mtx);
1361 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1362 } else {
1363 free_func(data, size);
1364 }
1365 }
1366
1367 static void
1368 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1369 {
1370 arc_buf_t **bufp;
1371
1372 /* free up data associated with the buf */
1373 if (buf->b_data) {
1374 arc_state_t *state = buf->b_hdr->b_state;
1375 uint64_t size = buf->b_hdr->b_size;
1376 arc_buf_contents_t type = buf->b_hdr->b_type;
1377
1378 arc_cksum_verify(buf);
1379
1380 if (!recycle) {
1381 if (type == ARC_BUFC_METADATA) {
1382 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1383 buf->b_data, size);
1384 arc_space_return(size, ARC_SPACE_DATA);
1385 } else {
1386 ASSERT(type == ARC_BUFC_DATA);
1387 arc_buf_data_free(buf->b_hdr,
1388 zio_data_buf_free, buf->b_data, size);
1389 ARCSTAT_INCR(arcstat_data_size, -size);
1390 atomic_add_64(&arc_size, -size);
1391 }
1392 }
1393 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1394 uint64_t *cnt = &state->arcs_lsize[type];
1395
1396 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1397 ASSERT(state != arc_anon);
1398
1399 ASSERT3U(*cnt, >=, size);
1400 atomic_add_64(cnt, -size);
1401 }
1402 ASSERT3U(state->arcs_size, >=, size);
1403 atomic_add_64(&state->arcs_size, -size);
1404 buf->b_data = NULL;
1405 ASSERT(buf->b_hdr->b_datacnt > 0);
1406 buf->b_hdr->b_datacnt -= 1;
1407 }
1408
1409 /* only remove the buf if requested */
1410 if (!all)
1411 return;
1412
1413 /* remove the buf from the hdr list */
1414 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1415 continue;
1416 *bufp = buf->b_next;
1417 buf->b_next = NULL;
1418
1419 ASSERT(buf->b_efunc == NULL);
1420
1421 /* clean up the buf */
1422 buf->b_hdr = NULL;
1423 kmem_cache_free(buf_cache, buf);
1424 }
1425
1426 static void
1427 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1428 {
1429 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1430
1431 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1432 ASSERT3P(hdr->b_state, ==, arc_anon);
1433 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1434
1435 if (l2hdr != NULL) {
1436 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1437 /*
1438 * To prevent arc_free() and l2arc_evict() from
1439 * attempting to free the same buffer at the same time,
1440 * a FREE_IN_PROGRESS flag is given to arc_free() to
1441 * give it priority. l2arc_evict() can't destroy this
1442 * header while we are waiting on l2arc_buflist_mtx.
1443 *
1444 * The hdr may be removed from l2ad_buflist before we
1445 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1446 */
1447 if (!buflist_held) {
1448 mutex_enter(&l2arc_buflist_mtx);
1449 l2hdr = hdr->b_l2hdr;
1450 }
1451
1452 if (l2hdr != NULL) {
1453 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1454 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1455 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1456 if (hdr->b_state == arc_l2c_only)
1457 l2arc_hdr_stat_remove();
1458 hdr->b_l2hdr = NULL;
1459 }
1460
1461 if (!buflist_held)
1462 mutex_exit(&l2arc_buflist_mtx);
1463 }
1464
1465 if (!BUF_EMPTY(hdr)) {
1466 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1467 buf_discard_identity(hdr);
1468 }
1469 while (hdr->b_buf) {
1470 arc_buf_t *buf = hdr->b_buf;
1471
1472 if (buf->b_efunc) {
1473 mutex_enter(&arc_eviction_mtx);
1474 mutex_enter(&buf->b_evict_lock);
1475 ASSERT(buf->b_hdr != NULL);
1476 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1477 hdr->b_buf = buf->b_next;
1478 buf->b_hdr = &arc_eviction_hdr;
1479 buf->b_next = arc_eviction_list;
1480 arc_eviction_list = buf;
1481 mutex_exit(&buf->b_evict_lock);
1482 mutex_exit(&arc_eviction_mtx);
1483 } else {
1484 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1485 }
1486 }
1487 if (hdr->b_freeze_cksum != NULL) {
1488 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1489 hdr->b_freeze_cksum = NULL;
1490 }
1491 if (hdr->b_thawed) {
1492 kmem_free(hdr->b_thawed, 1);
1493 hdr->b_thawed = NULL;
1494 }
1495
1496 ASSERT(!list_link_active(&hdr->b_arc_node));
1497 ASSERT3P(hdr->b_hash_next, ==, NULL);
1498 ASSERT3P(hdr->b_acb, ==, NULL);
1499 kmem_cache_free(hdr_cache, hdr);
1500 }
1501
1502 void
1503 arc_buf_free(arc_buf_t *buf, void *tag)
1504 {
1505 arc_buf_hdr_t *hdr = buf->b_hdr;
1506 int hashed = hdr->b_state != arc_anon;
1507
1508 ASSERT(buf->b_efunc == NULL);
1509 ASSERT(buf->b_data != NULL);
1510
1511 if (hashed) {
1512 kmutex_t *hash_lock = HDR_LOCK(hdr);
1513
1514 mutex_enter(hash_lock);
1515 hdr = buf->b_hdr;
1516 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1517
1518 (void) remove_reference(hdr, hash_lock, tag);
1519 if (hdr->b_datacnt > 1) {
1520 arc_buf_destroy(buf, FALSE, TRUE);
1521 } else {
1522 ASSERT(buf == hdr->b_buf);
1523 ASSERT(buf->b_efunc == NULL);
1524 hdr->b_flags |= ARC_BUF_AVAILABLE;
1525 }
1526 mutex_exit(hash_lock);
1527 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1528 int destroy_hdr;
1529 /*
1530 * We are in the middle of an async write. Don't destroy
1531 * this buffer unless the write completes before we finish
1532 * decrementing the reference count.
1533 */
1534 mutex_enter(&arc_eviction_mtx);
1535 (void) remove_reference(hdr, NULL, tag);
1536 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1537 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1538 mutex_exit(&arc_eviction_mtx);
1539 if (destroy_hdr)
1540 arc_hdr_destroy(hdr);
1541 } else {
1542 if (remove_reference(hdr, NULL, tag) > 0)
1543 arc_buf_destroy(buf, FALSE, TRUE);
1544 else
1545 arc_hdr_destroy(hdr);
1546 }
1547 }
1548
1549 int
1550 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1551 {
1552 arc_buf_hdr_t *hdr = buf->b_hdr;
1553 kmutex_t *hash_lock = HDR_LOCK(hdr);
1554 int no_callback = (buf->b_efunc == NULL);
1555
1556 if (hdr->b_state == arc_anon) {
1557 ASSERT(hdr->b_datacnt == 1);
1558 arc_buf_free(buf, tag);
1559 return (no_callback);
1560 }
1561
1562 mutex_enter(hash_lock);
1563 hdr = buf->b_hdr;
1564 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1565 ASSERT(hdr->b_state != arc_anon);
1566 ASSERT(buf->b_data != NULL);
1567
1568 (void) remove_reference(hdr, hash_lock, tag);
1569 if (hdr->b_datacnt > 1) {
1570 if (no_callback)
1571 arc_buf_destroy(buf, FALSE, TRUE);
1572 } else if (no_callback) {
1573 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1574 ASSERT(buf->b_efunc == NULL);
1575 hdr->b_flags |= ARC_BUF_AVAILABLE;
1576 }
1577 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1578 refcount_is_zero(&hdr->b_refcnt));
1579 mutex_exit(hash_lock);
1580 return (no_callback);
1581 }
1582
1583 int
1584 arc_buf_size(arc_buf_t *buf)
1585 {
1586 return (buf->b_hdr->b_size);
1587 }
1588
1589 /*
1590 * Evict buffers from list until we've removed the specified number of
1591 * bytes. Move the removed buffers to the appropriate evict state.
1592 * If the recycle flag is set, then attempt to "recycle" a buffer:
1593 * - look for a buffer to evict that is `bytes' long.
1594 * - return the data block from this buffer rather than freeing it.
1595 * This flag is used by callers that are trying to make space for a
1596 * new buffer in a full arc cache.
1597 *
1598 * This function makes a "best effort". It skips over any buffers
1599 * it can't get a hash_lock on, and so may not catch all candidates.
1600 * It may also return without evicting as much space as requested.
1601 */
1602 static void *
1603 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1604 arc_buf_contents_t type)
1605 {
1606 arc_state_t *evicted_state;
1607 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1608 arc_buf_hdr_t *ab, *ab_prev = NULL;
1609 list_t *list = &state->arcs_list[type];
1610 kmutex_t *hash_lock;
1611 boolean_t have_lock;
1612 void *stolen = NULL;
1613
1614 ASSERT(state == arc_mru || state == arc_mfu);
1615
1616 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1617
1618 mutex_enter(&state->arcs_mtx);
1619 mutex_enter(&evicted_state->arcs_mtx);
1620
1621 for (ab = list_tail(list); ab; ab = ab_prev) {
1622 ab_prev = list_prev(list, ab);
1623 /* prefetch buffers have a minimum lifespan */
1624 if (HDR_IO_IN_PROGRESS(ab) ||
1625 (spa && ab->b_spa != spa) ||
1626 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1627 ddi_get_lbolt() - ab->b_arc_access <
1628 arc_min_prefetch_lifespan)) {
1629 skipped++;
1630 continue;
1631 }
1632 /* "lookahead" for better eviction candidate */
1633 if (recycle && ab->b_size != bytes &&
1634 ab_prev && ab_prev->b_size == bytes)
1635 continue;
1636 hash_lock = HDR_LOCK(ab);
1637 have_lock = MUTEX_HELD(hash_lock);
1638 if (have_lock || mutex_tryenter(hash_lock)) {
1639 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1640 ASSERT(ab->b_datacnt > 0);
1641 while (ab->b_buf) {
1642 arc_buf_t *buf = ab->b_buf;
1643 if (!mutex_tryenter(&buf->b_evict_lock)) {
1644 missed += 1;
1645 break;
1646 }
1647 if (buf->b_data) {
1648 bytes_evicted += ab->b_size;
1649 if (recycle && ab->b_type == type &&
1650 ab->b_size == bytes &&
1651 !HDR_L2_WRITING(ab)) {
1652 stolen = buf->b_data;
1653 recycle = FALSE;
1654 }
1655 }
1656 if (buf->b_efunc) {
1657 mutex_enter(&arc_eviction_mtx);
1658 arc_buf_destroy(buf,
1659 buf->b_data == stolen, FALSE);
1660 ab->b_buf = buf->b_next;
1661 buf->b_hdr = &arc_eviction_hdr;
1662 buf->b_next = arc_eviction_list;
1663 arc_eviction_list = buf;
1664 mutex_exit(&arc_eviction_mtx);
1665 mutex_exit(&buf->b_evict_lock);
1666 } else {
1667 mutex_exit(&buf->b_evict_lock);
1668 arc_buf_destroy(buf,
1669 buf->b_data == stolen, TRUE);
1670 }
1671 }
1672
1673 if (ab->b_l2hdr) {
1674 ARCSTAT_INCR(arcstat_evict_l2_cached,
1675 ab->b_size);
1676 } else {
1677 if (l2arc_write_eligible(ab->b_spa, ab)) {
1678 ARCSTAT_INCR(arcstat_evict_l2_eligible,
1679 ab->b_size);
1680 } else {
1681 ARCSTAT_INCR(
1682 arcstat_evict_l2_ineligible,
1683 ab->b_size);
1684 }
1685 }
1686
1687 if (ab->b_datacnt == 0) {
1688 arc_change_state(evicted_state, ab, hash_lock);
1689 ASSERT(HDR_IN_HASH_TABLE(ab));
1690 ab->b_flags |= ARC_IN_HASH_TABLE;
1691 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1692 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1693 }
1694 if (!have_lock)
1695 mutex_exit(hash_lock);
1696 if (bytes >= 0 && bytes_evicted >= bytes)
1697 break;
1698 } else {
1699 missed += 1;
1700 }
1701 }
1702
1703 mutex_exit(&evicted_state->arcs_mtx);
1704 mutex_exit(&state->arcs_mtx);
1705
1706 if (bytes_evicted < bytes)
1707 dprintf("only evicted %lld bytes from %x",
1708 (longlong_t)bytes_evicted, state);
1709
1710 if (skipped)
1711 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1712
1713 if (missed)
1714 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1715
1716 /*
1717 * We have just evicted some date into the ghost state, make
1718 * sure we also adjust the ghost state size if necessary.
1719 */
1720 if (arc_no_grow &&
1721 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1722 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1723 arc_mru_ghost->arcs_size - arc_c;
1724
1725 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1726 int64_t todelete =
1727 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1728 arc_evict_ghost(arc_mru_ghost, 0, todelete);
1729 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1730 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1731 arc_mru_ghost->arcs_size +
1732 arc_mfu_ghost->arcs_size - arc_c);
1733 arc_evict_ghost(arc_mfu_ghost, 0, todelete);
1734 }
1735 }
1736
1737 return (stolen);
1738 }
1739
1740 /*
1741 * Remove buffers from list until we've removed the specified number of
1742 * bytes. Destroy the buffers that are removed.
1743 */
1744 static void
1745 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1746 {
1747 arc_buf_hdr_t *ab, *ab_prev;
1748 arc_buf_hdr_t marker;
1749 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1750 kmutex_t *hash_lock;
1751 uint64_t bytes_deleted = 0;
1752 uint64_t bufs_skipped = 0;
1753
1754 ASSERT(GHOST_STATE(state));
1755 bzero(&marker, sizeof(marker));
1756 top:
1757 mutex_enter(&state->arcs_mtx);
1758 for (ab = list_tail(list); ab; ab = ab_prev) {
1759 ab_prev = list_prev(list, ab);
1760 if (spa && ab->b_spa != spa)
1761 continue;
1762
1763 /* ignore markers */
1764 if (ab->b_spa == 0)
1765 continue;
1766
1767 hash_lock = HDR_LOCK(ab);
1768 /* caller may be trying to modify this buffer, skip it */
1769 if (MUTEX_HELD(hash_lock))
1770 continue;
1771 if (mutex_tryenter(hash_lock)) {
1772 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1773 ASSERT(ab->b_buf == NULL);
1774 ARCSTAT_BUMP(arcstat_deleted);
1775 bytes_deleted += ab->b_size;
1776
1777 if (ab->b_l2hdr != NULL) {
1778 /*
1779 * This buffer is cached on the 2nd Level ARC;
1780 * don't destroy the header.
1781 */
1782 arc_change_state(arc_l2c_only, ab, hash_lock);
1783 mutex_exit(hash_lock);
1784 } else {
1785 arc_change_state(arc_anon, ab, hash_lock);
1786 mutex_exit(hash_lock);
1787 arc_hdr_destroy(ab);
1788 }
1789
1790 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1791 if (bytes >= 0 && bytes_deleted >= bytes)
1792 break;
1793 } else if (bytes < 0) {
1794 /*
1795 * Insert a list marker and then wait for the
1796 * hash lock to become available. Once its
1797 * available, restart from where we left off.
1798 */
1799 list_insert_after(list, ab, &marker);
1800 mutex_exit(&state->arcs_mtx);
1801 mutex_enter(hash_lock);
1802 mutex_exit(hash_lock);
1803 mutex_enter(&state->arcs_mtx);
1804 ab_prev = list_prev(list, &marker);
1805 list_remove(list, &marker);
1806 } else
1807 bufs_skipped += 1;
1808 }
1809 mutex_exit(&state->arcs_mtx);
1810
1811 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1812 (bytes < 0 || bytes_deleted < bytes)) {
1813 list = &state->arcs_list[ARC_BUFC_METADATA];
1814 goto top;
1815 }
1816
1817 if (bufs_skipped) {
1818 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1819 ASSERT(bytes >= 0);
1820 }
1821
1822 if (bytes_deleted < bytes)
1823 dprintf("only deleted %lld bytes from %p",
1824 (longlong_t)bytes_deleted, state);
1825 }
1826
1827 static void
1828 arc_adjust(void)
1829 {
1830 int64_t adjustment, delta;
1831
1832 /*
1833 * Adjust MRU size
1834 */
1835
1836 adjustment = MIN((int64_t)(arc_size - arc_c),
1837 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
1838 arc_p));
1839
1840 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1841 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1842 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
1843 adjustment -= delta;
1844 }
1845
1846 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1847 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1848 (void) arc_evict(arc_mru, 0, delta, FALSE,
1849 ARC_BUFC_METADATA);
1850 }
1851
1852 /*
1853 * Adjust MFU size
1854 */
1855
1856 adjustment = arc_size - arc_c;
1857
1858 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1859 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1860 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
1861 adjustment -= delta;
1862 }
1863
1864 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1865 int64_t delta = MIN(adjustment,
1866 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
1867 (void) arc_evict(arc_mfu, 0, delta, FALSE,
1868 ARC_BUFC_METADATA);
1869 }
1870
1871 /*
1872 * Adjust ghost lists
1873 */
1874
1875 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
1876
1877 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
1878 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
1879 arc_evict_ghost(arc_mru_ghost, 0, delta);
1880 }
1881
1882 adjustment =
1883 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
1884
1885 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
1886 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
1887 arc_evict_ghost(arc_mfu_ghost, 0, delta);
1888 }
1889 }
1890
1891 static void
1892 arc_do_user_evicts(void)
1893 {
1894 mutex_enter(&arc_eviction_mtx);
1895 while (arc_eviction_list != NULL) {
1896 arc_buf_t *buf = arc_eviction_list;
1897 arc_eviction_list = buf->b_next;
1898 mutex_enter(&buf->b_evict_lock);
1899 buf->b_hdr = NULL;
1900 mutex_exit(&buf->b_evict_lock);
1901 mutex_exit(&arc_eviction_mtx);
1902
1903 if (buf->b_efunc != NULL)
1904 VERIFY(buf->b_efunc(buf) == 0);
1905
1906 buf->b_efunc = NULL;
1907 buf->b_private = NULL;
1908 kmem_cache_free(buf_cache, buf);
1909 mutex_enter(&arc_eviction_mtx);
1910 }
1911 mutex_exit(&arc_eviction_mtx);
1912 }
1913
1914 /*
1915 * Flush all *evictable* data from the cache for the given spa.
1916 * NOTE: this will not touch "active" (i.e. referenced) data.
1917 */
1918 void
1919 arc_flush(spa_t *spa)
1920 {
1921 uint64_t guid = 0;
1922
1923 if (spa)
1924 guid = spa_guid(spa);
1925
1926 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1927 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
1928 if (spa)
1929 break;
1930 }
1931 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1932 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
1933 if (spa)
1934 break;
1935 }
1936 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1937 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
1938 if (spa)
1939 break;
1940 }
1941 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1942 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
1943 if (spa)
1944 break;
1945 }
1946
1947 arc_evict_ghost(arc_mru_ghost, guid, -1);
1948 arc_evict_ghost(arc_mfu_ghost, guid, -1);
1949
1950 mutex_enter(&arc_reclaim_thr_lock);
1951 arc_do_user_evicts();
1952 mutex_exit(&arc_reclaim_thr_lock);
1953 ASSERT(spa || arc_eviction_list == NULL);
1954 }
1955
1956 void
1957 arc_shrink(void)
1958 {
1959 if (arc_c > arc_c_min) {
1960 uint64_t to_free;
1961
1962 #ifdef _KERNEL
1963 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1964 #else
1965 to_free = arc_c >> arc_shrink_shift;
1966 #endif
1967 if (arc_c > arc_c_min + to_free)
1968 atomic_add_64(&arc_c, -to_free);
1969 else
1970 arc_c = arc_c_min;
1971
1972 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1973 if (arc_c > arc_size)
1974 arc_c = MAX(arc_size, arc_c_min);
1975 if (arc_p > arc_c)
1976 arc_p = (arc_c >> 1);
1977 ASSERT(arc_c >= arc_c_min);
1978 ASSERT((int64_t)arc_p >= 0);
1979 }
1980
1981 if (arc_size > arc_c)
1982 arc_adjust();
1983 }
1984
1985 static int
1986 arc_reclaim_needed(void)
1987 {
1988 uint64_t extra;
1989
1990 #ifdef _KERNEL
1991
1992 if (needfree)
1993 return (1);
1994
1995 /*
1996 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1997 */
1998 extra = desfree;
1999
2000 /*
2001 * check that we're out of range of the pageout scanner. It starts to
2002 * schedule paging if freemem is less than lotsfree and needfree.
2003 * lotsfree is the high-water mark for pageout, and needfree is the
2004 * number of needed free pages. We add extra pages here to make sure
2005 * the scanner doesn't start up while we're freeing memory.
2006 */
2007 if (freemem < lotsfree + needfree + extra)
2008 return (1);
2009
2010 /*
2011 * check to make sure that swapfs has enough space so that anon
2012 * reservations can still succeed. anon_resvmem() checks that the
2013 * availrmem is greater than swapfs_minfree, and the number of reserved
2014 * swap pages. We also add a bit of extra here just to prevent
2015 * circumstances from getting really dire.
2016 */
2017 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2018 return (1);
2019
2020 #if defined(__i386)
2021 /*
2022 * If we're on an i386 platform, it's possible that we'll exhaust the
2023 * kernel heap space before we ever run out of available physical
2024 * memory. Most checks of the size of the heap_area compare against
2025 * tune.t_minarmem, which is the minimum available real memory that we
2026 * can have in the system. However, this is generally fixed at 25 pages
2027 * which is so low that it's useless. In this comparison, we seek to
2028 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2029 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2030 * free)
2031 */
2032 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2033 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2034 return (1);
2035 #endif
2036
2037 #else
2038 if (spa_get_random(100) == 0)
2039 return (1);
2040 #endif
2041 return (0);
2042 }
2043
2044 static void
2045 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2046 {
2047 size_t i;
2048 kmem_cache_t *prev_cache = NULL;
2049 kmem_cache_t *prev_data_cache = NULL;
2050 extern kmem_cache_t *zio_buf_cache[];
2051 extern kmem_cache_t *zio_data_buf_cache[];
2052
2053 #ifdef _KERNEL
2054 if (arc_meta_used >= arc_meta_limit) {
2055 /*
2056 * We are exceeding our meta-data cache limit.
2057 * Purge some DNLC entries to release holds on meta-data.
2058 */
2059 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2060 }
2061 #if defined(__i386)
2062 /*
2063 * Reclaim unused memory from all kmem caches.
2064 */
2065 kmem_reap();
2066 #endif
2067 #endif
2068
2069 /*
2070 * An aggressive reclamation will shrink the cache size as well as
2071 * reap free buffers from the arc kmem caches.
2072 */
2073 if (strat == ARC_RECLAIM_AGGR)
2074 arc_shrink();
2075
2076 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2077 if (zio_buf_cache[i] != prev_cache) {
2078 prev_cache = zio_buf_cache[i];
2079 kmem_cache_reap_now(zio_buf_cache[i]);
2080 }
2081 if (zio_data_buf_cache[i] != prev_data_cache) {
2082 prev_data_cache = zio_data_buf_cache[i];
2083 kmem_cache_reap_now(zio_data_buf_cache[i]);
2084 }
2085 }
2086 kmem_cache_reap_now(buf_cache);
2087 kmem_cache_reap_now(hdr_cache);
2088 }
2089
2090 static void
2091 arc_reclaim_thread(void)
2092 {
2093 clock_t growtime = 0;
2094 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2095 callb_cpr_t cpr;
2096
2097 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2098
2099 mutex_enter(&arc_reclaim_thr_lock);
2100 while (arc_thread_exit == 0) {
2101 if (arc_reclaim_needed()) {
2102
2103 if (arc_no_grow) {
2104 if (last_reclaim == ARC_RECLAIM_CONS) {
2105 last_reclaim = ARC_RECLAIM_AGGR;
2106 } else {
2107 last_reclaim = ARC_RECLAIM_CONS;
2108 }
2109 } else {
2110 arc_no_grow = TRUE;
2111 last_reclaim = ARC_RECLAIM_AGGR;
2112 membar_producer();
2113 }
2114
2115 /* reset the growth delay for every reclaim */
2116 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2117
2118 arc_kmem_reap_now(last_reclaim);
2119 arc_warm = B_TRUE;
2120
2121 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2122 arc_no_grow = FALSE;
2123 }
2124
2125 arc_adjust();
2126
2127 if (arc_eviction_list != NULL)
2128 arc_do_user_evicts();
2129
2130 /* block until needed, or one second, whichever is shorter */
2131 CALLB_CPR_SAFE_BEGIN(&cpr);
2132 (void) cv_timedwait(&arc_reclaim_thr_cv,
2133 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2134 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2135 }
2136
2137 arc_thread_exit = 0;
2138 cv_broadcast(&arc_reclaim_thr_cv);
2139 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2140 thread_exit();
2141 }
2142
2143 /*
2144 * Adapt arc info given the number of bytes we are trying to add and
2145 * the state that we are comming from. This function is only called
2146 * when we are adding new content to the cache.
2147 */
2148 static void
2149 arc_adapt(int bytes, arc_state_t *state)
2150 {
2151 int mult;
2152 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2153
2154 if (state == arc_l2c_only)
2155 return;
2156
2157 ASSERT(bytes > 0);
2158 /*
2159 * Adapt the target size of the MRU list:
2160 * - if we just hit in the MRU ghost list, then increase
2161 * the target size of the MRU list.
2162 * - if we just hit in the MFU ghost list, then increase
2163 * the target size of the MFU list by decreasing the
2164 * target size of the MRU list.
2165 */
2166 if (state == arc_mru_ghost) {
2167 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2168 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2169 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2170
2171 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2172 } else if (state == arc_mfu_ghost) {
2173 uint64_t delta;
2174
2175 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2176 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2177 mult = MIN(mult, 10);
2178
2179 delta = MIN(bytes * mult, arc_p);
2180 arc_p = MAX(arc_p_min, arc_p - delta);
2181 }
2182 ASSERT((int64_t)arc_p >= 0);
2183
2184 if (arc_reclaim_needed()) {
2185 cv_signal(&arc_reclaim_thr_cv);
2186 return;
2187 }
2188
2189 if (arc_no_grow)
2190 return;
2191
2192 if (arc_c >= arc_c_max)
2193 return;
2194
2195 /*
2196 * If we're within (2 * maxblocksize) bytes of the target
2197 * cache size, increment the target cache size
2198 */
2199 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2200 atomic_add_64(&arc_c, (int64_t)bytes);
2201 if (arc_c > arc_c_max)
2202 arc_c = arc_c_max;
2203 else if (state == arc_anon)
2204 atomic_add_64(&arc_p, (int64_t)bytes);
2205 if (arc_p > arc_c)
2206 arc_p = arc_c;
2207 }
2208 ASSERT((int64_t)arc_p >= 0);
2209 }
2210
2211 /*
2212 * Check if the cache has reached its limits and eviction is required
2213 * prior to insert.
2214 */
2215 static int
2216 arc_evict_needed(arc_buf_contents_t type)
2217 {
2218 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2219 return (1);
2220
2221 #ifdef _KERNEL
2222 /*
2223 * If zio data pages are being allocated out of a separate heap segment,
2224 * then enforce that the size of available vmem for this area remains
2225 * above about 1/32nd free.
2226 */
2227 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2228 vmem_size(zio_arena, VMEM_FREE) <
2229 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2230 return (1);
2231 #endif
2232
2233 if (arc_reclaim_needed())
2234 return (1);
2235
2236 return (arc_size > arc_c);
2237 }
2238
2239 /*
2240 * The buffer, supplied as the first argument, needs a data block.
2241 * So, if we are at cache max, determine which cache should be victimized.
2242 * We have the following cases:
2243 *
2244 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2245 * In this situation if we're out of space, but the resident size of the MFU is
2246 * under the limit, victimize the MFU cache to satisfy this insertion request.
2247 *
2248 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2249 * Here, we've used up all of the available space for the MRU, so we need to
2250 * evict from our own cache instead. Evict from the set of resident MRU
2251 * entries.
2252 *
2253 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2254 * c minus p represents the MFU space in the cache, since p is the size of the
2255 * cache that is dedicated to the MRU. In this situation there's still space on
2256 * the MFU side, so the MRU side needs to be victimized.
2257 *
2258 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2259 * MFU's resident set is consuming more space than it has been allotted. In
2260 * this situation, we must victimize our own cache, the MFU, for this insertion.
2261 */
2262 static void
2263 arc_get_data_buf(arc_buf_t *buf)
2264 {
2265 arc_state_t *state = buf->b_hdr->b_state;
2266 uint64_t size = buf->b_hdr->b_size;
2267 arc_buf_contents_t type = buf->b_hdr->b_type;
2268
2269 arc_adapt(size, state);
2270
2271 /*
2272 * We have not yet reached cache maximum size,
2273 * just allocate a new buffer.
2274 */
2275 if (!arc_evict_needed(type)) {
2276 if (type == ARC_BUFC_METADATA) {
2277 buf->b_data = zio_buf_alloc(size);
2278 arc_space_consume(size, ARC_SPACE_DATA);
2279 } else {
2280 ASSERT(type == ARC_BUFC_DATA);
2281 buf->b_data = zio_data_buf_alloc(size);
2282 ARCSTAT_INCR(arcstat_data_size, size);
2283 atomic_add_64(&arc_size, size);
2284 }
2285 goto out;
2286 }
2287
2288 /*
2289 * If we are prefetching from the mfu ghost list, this buffer
2290 * will end up on the mru list; so steal space from there.
2291 */
2292 if (state == arc_mfu_ghost)
2293 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2294 else if (state == arc_mru_ghost)
2295 state = arc_mru;
2296
2297 if (state == arc_mru || state == arc_anon) {
2298 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2299 state = (arc_mfu->arcs_lsize[type] >= size &&
2300 arc_p > mru_used) ? arc_mfu : arc_mru;
2301 } else {
2302 /* MFU cases */
2303 uint64_t mfu_space = arc_c - arc_p;
2304 state = (arc_mru->arcs_lsize[type] >= size &&
2305 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2306 }
2307 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2308 if (type == ARC_BUFC_METADATA) {
2309 buf->b_data = zio_buf_alloc(size);
2310 arc_space_consume(size, ARC_SPACE_DATA);
2311 } else {
2312 ASSERT(type == ARC_BUFC_DATA);
2313 buf->b_data = zio_data_buf_alloc(size);
2314 ARCSTAT_INCR(arcstat_data_size, size);
2315 atomic_add_64(&arc_size, size);
2316 }
2317 ARCSTAT_BUMP(arcstat_recycle_miss);
2318 }
2319 ASSERT(buf->b_data != NULL);
2320 out:
2321 /*
2322 * Update the state size. Note that ghost states have a
2323 * "ghost size" and so don't need to be updated.
2324 */
2325 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2326 arc_buf_hdr_t *hdr = buf->b_hdr;
2327
2328 atomic_add_64(&hdr->b_state->arcs_size, size);
2329 if (list_link_active(&hdr->b_arc_node)) {
2330 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2331 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2332 }
2333 /*
2334 * If we are growing the cache, and we are adding anonymous
2335 * data, and we have outgrown arc_p, update arc_p
2336 */
2337 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2338 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2339 arc_p = MIN(arc_c, arc_p + size);
2340 }
2341 }
2342
2343 /*
2344 * This routine is called whenever a buffer is accessed.
2345 * NOTE: the hash lock is dropped in this function.
2346 */
2347 static void
2348 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2349 {
2350 clock_t now;
2351
2352 ASSERT(MUTEX_HELD(hash_lock));
2353
2354 if (buf->b_state == arc_anon) {
2355 /*
2356 * This buffer is not in the cache, and does not
2357 * appear in our "ghost" list. Add the new buffer
2358 * to the MRU state.
2359 */
2360
2361 ASSERT(buf->b_arc_access == 0);
2362 buf->b_arc_access = ddi_get_lbolt();
2363 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2364 arc_change_state(arc_mru, buf, hash_lock);
2365
2366 } else if (buf->b_state == arc_mru) {
2367 now = ddi_get_lbolt();
2368
2369 /*
2370 * If this buffer is here because of a prefetch, then either:
2371 * - clear the flag if this is a "referencing" read
2372 * (any subsequent access will bump this into the MFU state).
2373 * or
2374 * - move the buffer to the head of the list if this is
2375 * another prefetch (to make it less likely to be evicted).
2376 */
2377 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2378 if (refcount_count(&buf->b_refcnt) == 0) {
2379 ASSERT(list_link_active(&buf->b_arc_node));
2380 } else {
2381 buf->b_flags &= ~ARC_PREFETCH;
2382 ARCSTAT_BUMP(arcstat_mru_hits);
2383 }
2384 buf->b_arc_access = now;
2385 return;
2386 }
2387
2388 /*
2389 * This buffer has been "accessed" only once so far,
2390 * but it is still in the cache. Move it to the MFU
2391 * state.
2392 */
2393 if (now > buf->b_arc_access + ARC_MINTIME) {
2394 /*
2395 * More than 125ms have passed since we
2396 * instantiated this buffer. Move it to the
2397 * most frequently used state.
2398 */
2399 buf->b_arc_access = now;
2400 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2401 arc_change_state(arc_mfu, buf, hash_lock);
2402 }
2403 ARCSTAT_BUMP(arcstat_mru_hits);
2404 } else if (buf->b_state == arc_mru_ghost) {
2405 arc_state_t *new_state;
2406 /*
2407 * This buffer has been "accessed" recently, but
2408 * was evicted from the cache. Move it to the
2409 * MFU state.
2410 */
2411
2412 if (buf->b_flags & ARC_PREFETCH) {
2413 new_state = arc_mru;
2414 if (refcount_count(&buf->b_refcnt) > 0)
2415 buf->b_flags &= ~ARC_PREFETCH;
2416 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2417 } else {
2418 new_state = arc_mfu;
2419 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2420 }
2421
2422 buf->b_arc_access = ddi_get_lbolt();
2423 arc_change_state(new_state, buf, hash_lock);
2424
2425 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2426 } else if (buf->b_state == arc_mfu) {
2427 /*
2428 * This buffer has been accessed more than once and is
2429 * still in the cache. Keep it in the MFU state.
2430 *
2431 * NOTE: an add_reference() that occurred when we did
2432 * the arc_read() will have kicked this off the list.
2433 * If it was a prefetch, we will explicitly move it to
2434 * the head of the list now.
2435 */
2436 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2437 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2438 ASSERT(list_link_active(&buf->b_arc_node));
2439 }
2440 ARCSTAT_BUMP(arcstat_mfu_hits);
2441 buf->b_arc_access = ddi_get_lbolt();
2442 } else if (buf->b_state == arc_mfu_ghost) {
2443 arc_state_t *new_state = arc_mfu;
2444 /*
2445 * This buffer has been accessed more than once but has
2446 * been evicted from the cache. Move it back to the
2447 * MFU state.
2448 */
2449
2450 if (buf->b_flags & ARC_PREFETCH) {
2451 /*
2452 * This is a prefetch access...
2453 * move this block back to the MRU state.
2454 */
2455 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2456 new_state = arc_mru;
2457 }
2458
2459 buf->b_arc_access = ddi_get_lbolt();
2460 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2461 arc_change_state(new_state, buf, hash_lock);
2462
2463 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2464 } else if (buf->b_state == arc_l2c_only) {
2465 /*
2466 * This buffer is on the 2nd Level ARC.
2467 */
2468
2469 buf->b_arc_access = ddi_get_lbolt();
2470 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2471 arc_change_state(arc_mfu, buf, hash_lock);
2472 } else {
2473 ASSERT(!"invalid arc state");
2474 }
2475 }
2476
2477 /* a generic arc_done_func_t which you can use */
2478 /* ARGSUSED */
2479 void
2480 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2481 {
2482 if (zio == NULL || zio->io_error == 0)
2483 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2484 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2485 }
2486
2487 /* a generic arc_done_func_t */
2488 void
2489 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2490 {
2491 arc_buf_t **bufp = arg;
2492 if (zio && zio->io_error) {
2493 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2494 *bufp = NULL;
2495 } else {
2496 *bufp = buf;
2497 ASSERT(buf->b_data);
2498 }
2499 }
2500
2501 static void
2502 arc_read_done(zio_t *zio)
2503 {
2504 arc_buf_hdr_t *hdr, *found;
2505 arc_buf_t *buf;
2506 arc_buf_t *abuf; /* buffer we're assigning to callback */
2507 kmutex_t *hash_lock;
2508 arc_callback_t *callback_list, *acb;
2509 int freeable = FALSE;
2510
2511 buf = zio->io_private;
2512 hdr = buf->b_hdr;
2513
2514 /*
2515 * The hdr was inserted into hash-table and removed from lists
2516 * prior to starting I/O. We should find this header, since
2517 * it's in the hash table, and it should be legit since it's
2518 * not possible to evict it during the I/O. The only possible
2519 * reason for it not to be found is if we were freed during the
2520 * read.
2521 */
2522 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2523 &hash_lock);
2524
2525 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2526 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2527 (found == hdr && HDR_L2_READING(hdr)));
2528
2529 hdr->b_flags &= ~ARC_L2_EVICTED;
2530 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2531 hdr->b_flags &= ~ARC_L2CACHE;
2532
2533 /* byteswap if necessary */
2534 callback_list = hdr->b_acb;
2535 ASSERT(callback_list != NULL);
2536 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2537 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2538 byteswap_uint64_array :
2539 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
2540 func(buf->b_data, hdr->b_size);
2541 }
2542
2543 arc_cksum_compute(buf, B_FALSE);
2544
2545 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2546 /*
2547 * Only call arc_access on anonymous buffers. This is because
2548 * if we've issued an I/O for an evicted buffer, we've already
2549 * called arc_access (to prevent any simultaneous readers from
2550 * getting confused).
2551 */
2552 arc_access(hdr, hash_lock);
2553 }
2554
2555 /* create copies of the data buffer for the callers */
2556 abuf = buf;
2557 for (acb = callback_list; acb; acb = acb->acb_next) {
2558 if (acb->acb_done) {
2559 if (abuf == NULL)
2560 abuf = arc_buf_clone(buf);
2561 acb->acb_buf = abuf;
2562 abuf = NULL;
2563 }
2564 }
2565 hdr->b_acb = NULL;
2566 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2567 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2568 if (abuf == buf) {
2569 ASSERT(buf->b_efunc == NULL);
2570 ASSERT(hdr->b_datacnt == 1);
2571 hdr->b_flags |= ARC_BUF_AVAILABLE;
2572 }
2573
2574 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2575
2576 if (zio->io_error != 0) {
2577 hdr->b_flags |= ARC_IO_ERROR;
2578 if (hdr->b_state != arc_anon)
2579 arc_change_state(arc_anon, hdr, hash_lock);
2580 if (HDR_IN_HASH_TABLE(hdr))
2581 buf_hash_remove(hdr);
2582 freeable = refcount_is_zero(&hdr->b_refcnt);
2583 }
2584
2585 /*
2586 * Broadcast before we drop the hash_lock to avoid the possibility
2587 * that the hdr (and hence the cv) might be freed before we get to
2588 * the cv_broadcast().
2589 */
2590 cv_broadcast(&hdr->b_cv);
2591
2592 if (hash_lock) {
2593 mutex_exit(hash_lock);
2594 } else {
2595 /*
2596 * This block was freed while we waited for the read to
2597 * complete. It has been removed from the hash table and
2598 * moved to the anonymous state (so that it won't show up
2599 * in the cache).
2600 */
2601 ASSERT3P(hdr->b_state, ==, arc_anon);
2602 freeable = refcount_is_zero(&hdr->b_refcnt);
2603 }
2604
2605 /* execute each callback and free its structure */
2606 while ((acb = callback_list) != NULL) {
2607 if (acb->acb_done)
2608 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2609
2610 if (acb->acb_zio_dummy != NULL) {
2611 acb->acb_zio_dummy->io_error = zio->io_error;
2612 zio_nowait(acb->acb_zio_dummy);
2613 }
2614
2615 callback_list = acb->acb_next;
2616 kmem_free(acb, sizeof (arc_callback_t));
2617 }
2618
2619 if (freeable)
2620 arc_hdr_destroy(hdr);
2621 }
2622
2623 /*
2624 * "Read" the block block at the specified DVA (in bp) via the
2625 * cache. If the block is found in the cache, invoke the provided
2626 * callback immediately and return. Note that the `zio' parameter
2627 * in the callback will be NULL in this case, since no IO was
2628 * required. If the block is not in the cache pass the read request
2629 * on to the spa with a substitute callback function, so that the
2630 * requested block will be added to the cache.
2631 *
2632 * If a read request arrives for a block that has a read in-progress,
2633 * either wait for the in-progress read to complete (and return the
2634 * results); or, if this is a read with a "done" func, add a record
2635 * to the read to invoke the "done" func when the read completes,
2636 * and return; or just return.
2637 *
2638 * arc_read_done() will invoke all the requested "done" functions
2639 * for readers of this block.
2640 *
2641 * Normal callers should use arc_read and pass the arc buffer and offset
2642 * for the bp. But if you know you don't need locking, you can use
2643 * arc_read_bp.
2644 */
2645 int
2646 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf,
2647 arc_done_func_t *done, void *private, int priority, int zio_flags,
2648 uint32_t *arc_flags, const zbookmark_t *zb)
2649 {
2650 int err;
2651
2652 if (pbuf == NULL) {
2653 /*
2654 * XXX This happens from traverse callback funcs, for
2655 * the objset_phys_t block.
2656 */
2657 return (arc_read_nolock(pio, spa, bp, done, private, priority,
2658 zio_flags, arc_flags, zb));
2659 }
2660
2661 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2662 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2663 rw_enter(&pbuf->b_data_lock, RW_READER);
2664
2665 err = arc_read_nolock(pio, spa, bp, done, private, priority,
2666 zio_flags, arc_flags, zb);
2667 rw_exit(&pbuf->b_data_lock);
2668
2669 return (err);
2670 }
2671
2672 int
2673 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp,
2674 arc_done_func_t *done, void *private, int priority, int zio_flags,
2675 uint32_t *arc_flags, const zbookmark_t *zb)
2676 {
2677 arc_buf_hdr_t *hdr;
2678 arc_buf_t *buf;
2679 kmutex_t *hash_lock;
2680 zio_t *rzio;
2681 uint64_t guid = spa_guid(spa);
2682
2683 top:
2684 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2685 &hash_lock);
2686 if (hdr && hdr->b_datacnt > 0) {
2687
2688 *arc_flags |= ARC_CACHED;
2689
2690 if (HDR_IO_IN_PROGRESS(hdr)) {
2691
2692 if (*arc_flags & ARC_WAIT) {
2693 cv_wait(&hdr->b_cv, hash_lock);
2694 mutex_exit(hash_lock);
2695 goto top;
2696 }
2697 ASSERT(*arc_flags & ARC_NOWAIT);
2698
2699 if (done) {
2700 arc_callback_t *acb = NULL;
2701
2702 acb = kmem_zalloc(sizeof (arc_callback_t),
2703 KM_SLEEP);
2704 acb->acb_done = done;
2705 acb->acb_private = private;
2706 if (pio != NULL)
2707 acb->acb_zio_dummy = zio_null(pio,
2708 spa, NULL, NULL, NULL, zio_flags);
2709
2710 ASSERT(acb->acb_done != NULL);
2711 acb->acb_next = hdr->b_acb;
2712 hdr->b_acb = acb;
2713 add_reference(hdr, hash_lock, private);
2714 mutex_exit(hash_lock);
2715 return (0);
2716 }
2717 mutex_exit(hash_lock);
2718 return (0);
2719 }
2720
2721 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2722
2723 if (done) {
2724 add_reference(hdr, hash_lock, private);
2725 /*
2726 * If this block is already in use, create a new
2727 * copy of the data so that we will be guaranteed
2728 * that arc_release() will always succeed.
2729 */
2730 buf = hdr->b_buf;
2731 ASSERT(buf);
2732 ASSERT(buf->b_data);
2733 if (HDR_BUF_AVAILABLE(hdr)) {
2734 ASSERT(buf->b_efunc == NULL);
2735 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2736 } else {
2737 buf = arc_buf_clone(buf);
2738 }
2739
2740 } else if (*arc_flags & ARC_PREFETCH &&
2741 refcount_count(&hdr->b_refcnt) == 0) {
2742 hdr->b_flags |= ARC_PREFETCH;
2743 }
2744 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2745 arc_access(hdr, hash_lock);
2746 if (*arc_flags & ARC_L2CACHE)
2747 hdr->b_flags |= ARC_L2CACHE;
2748 mutex_exit(hash_lock);
2749 ARCSTAT_BUMP(arcstat_hits);
2750 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2751 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2752 data, metadata, hits);
2753
2754 if (done)
2755 done(NULL, buf, private);
2756 } else {
2757 uint64_t size = BP_GET_LSIZE(bp);
2758 arc_callback_t *acb;
2759 vdev_t *vd = NULL;
2760 uint64_t addr;
2761 boolean_t devw = B_FALSE;
2762
2763 if (hdr == NULL) {
2764 /* this block is not in the cache */
2765 arc_buf_hdr_t *exists;
2766 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2767 buf = arc_buf_alloc(spa, size, private, type);
2768 hdr = buf->b_hdr;
2769 hdr->b_dva = *BP_IDENTITY(bp);
2770 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2771 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2772 exists = buf_hash_insert(hdr, &hash_lock);
2773 if (exists) {
2774 /* somebody beat us to the hash insert */
2775 mutex_exit(hash_lock);
2776 buf_discard_identity(hdr);
2777 (void) arc_buf_remove_ref(buf, private);
2778 goto top; /* restart the IO request */
2779 }
2780 /* if this is a prefetch, we don't have a reference */
2781 if (*arc_flags & ARC_PREFETCH) {
2782 (void) remove_reference(hdr, hash_lock,
2783 private);
2784 hdr->b_flags |= ARC_PREFETCH;
2785 }
2786 if (*arc_flags & ARC_L2CACHE)
2787 hdr->b_flags |= ARC_L2CACHE;
2788 if (BP_GET_LEVEL(bp) > 0)
2789 hdr->b_flags |= ARC_INDIRECT;
2790 } else {
2791 /* this block is in the ghost cache */
2792 ASSERT(GHOST_STATE(hdr->b_state));
2793 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2794 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2795 ASSERT(hdr->b_buf == NULL);
2796
2797 /* if this is a prefetch, we don't have a reference */
2798 if (*arc_flags & ARC_PREFETCH)
2799 hdr->b_flags |= ARC_PREFETCH;
2800 else
2801 add_reference(hdr, hash_lock, private);
2802 if (*arc_flags & ARC_L2CACHE)
2803 hdr->b_flags |= ARC_L2CACHE;
2804 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2805 buf->b_hdr = hdr;
2806 buf->b_data = NULL;
2807 buf->b_efunc = NULL;
2808 buf->b_private = NULL;
2809 buf->b_next = NULL;
2810 hdr->b_buf = buf;
2811 ASSERT(hdr->b_datacnt == 0);
2812 hdr->b_datacnt = 1;
2813 arc_get_data_buf(buf);
2814 arc_access(hdr, hash_lock);
2815 }
2816
2817 ASSERT(!GHOST_STATE(hdr->b_state));
2818
2819 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2820 acb->acb_done = done;
2821 acb->acb_private = private;
2822
2823 ASSERT(hdr->b_acb == NULL);
2824 hdr->b_acb = acb;
2825 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2826
2827 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2828 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2829 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2830 addr = hdr->b_l2hdr->b_daddr;
2831 /*
2832 * Lock out device removal.
2833 */
2834 if (vdev_is_dead(vd) ||
2835 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2836 vd = NULL;
2837 }
2838
2839 mutex_exit(hash_lock);
2840
2841 ASSERT3U(hdr->b_size, ==, size);
2842 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
2843 uint64_t, size, zbookmark_t *, zb);
2844 ARCSTAT_BUMP(arcstat_misses);
2845 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2846 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2847 data, metadata, misses);
2848
2849 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2850 /*
2851 * Read from the L2ARC if the following are true:
2852 * 1. The L2ARC vdev was previously cached.
2853 * 2. This buffer still has L2ARC metadata.
2854 * 3. This buffer isn't currently writing to the L2ARC.
2855 * 4. The L2ARC entry wasn't evicted, which may
2856 * also have invalidated the vdev.
2857 * 5. This isn't prefetch and l2arc_noprefetch is set.
2858 */
2859 if (hdr->b_l2hdr != NULL &&
2860 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2861 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2862 l2arc_read_callback_t *cb;
2863
2864 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2865 ARCSTAT_BUMP(arcstat_l2_hits);
2866
2867 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2868 KM_SLEEP);
2869 cb->l2rcb_buf = buf;
2870 cb->l2rcb_spa = spa;
2871 cb->l2rcb_bp = *bp;
2872 cb->l2rcb_zb = *zb;
2873 cb->l2rcb_flags = zio_flags;
2874
2875 /*
2876 * l2arc read. The SCL_L2ARC lock will be
2877 * released by l2arc_read_done().
2878 */
2879 rzio = zio_read_phys(pio, vd, addr, size,
2880 buf->b_data, ZIO_CHECKSUM_OFF,
2881 l2arc_read_done, cb, priority, zio_flags |
2882 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2883 ZIO_FLAG_DONT_PROPAGATE |
2884 ZIO_FLAG_DONT_RETRY, B_FALSE);
2885 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2886 zio_t *, rzio);
2887 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
2888
2889 if (*arc_flags & ARC_NOWAIT) {
2890 zio_nowait(rzio);
2891 return (0);
2892 }
2893
2894 ASSERT(*arc_flags & ARC_WAIT);
2895 if (zio_wait(rzio) == 0)
2896 return (0);
2897
2898 /* l2arc read error; goto zio_read() */
2899 } else {
2900 DTRACE_PROBE1(l2arc__miss,
2901 arc_buf_hdr_t *, hdr);
2902 ARCSTAT_BUMP(arcstat_l2_misses);
2903 if (HDR_L2_WRITING(hdr))
2904 ARCSTAT_BUMP(arcstat_l2_rw_clash);
2905 spa_config_exit(spa, SCL_L2ARC, vd);
2906 }
2907 } else {
2908 if (vd != NULL)
2909 spa_config_exit(spa, SCL_L2ARC, vd);
2910 if (l2arc_ndev != 0) {
2911 DTRACE_PROBE1(l2arc__miss,
2912 arc_buf_hdr_t *, hdr);
2913 ARCSTAT_BUMP(arcstat_l2_misses);
2914 }
2915 }
2916
2917 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2918 arc_read_done, buf, priority, zio_flags, zb);
2919
2920 if (*arc_flags & ARC_WAIT)
2921 return (zio_wait(rzio));
2922
2923 ASSERT(*arc_flags & ARC_NOWAIT);
2924 zio_nowait(rzio);
2925 }
2926 return (0);
2927 }
2928
2929 void
2930 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2931 {
2932 ASSERT(buf->b_hdr != NULL);
2933 ASSERT(buf->b_hdr->b_state != arc_anon);
2934 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2935 ASSERT(buf->b_efunc == NULL);
2936 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
2937
2938 buf->b_efunc = func;
2939 buf->b_private = private;
2940 }
2941
2942 /*
2943 * This is used by the DMU to let the ARC know that a buffer is
2944 * being evicted, so the ARC should clean up. If this arc buf
2945 * is not yet in the evicted state, it will be put there.
2946 */
2947 int
2948 arc_buf_evict(arc_buf_t *buf)
2949 {
2950 arc_buf_hdr_t *hdr;
2951 kmutex_t *hash_lock;
2952 arc_buf_t **bufp;
2953
2954 mutex_enter(&buf->b_evict_lock);
2955 hdr = buf->b_hdr;
2956 if (hdr == NULL) {
2957 /*
2958 * We are in arc_do_user_evicts().
2959 */
2960 ASSERT(buf->b_data == NULL);
2961 mutex_exit(&buf->b_evict_lock);
2962 return (0);
2963 } else if (buf->b_data == NULL) {
2964 arc_buf_t copy = *buf; /* structure assignment */
2965 /*
2966 * We are on the eviction list; process this buffer now
2967 * but let arc_do_user_evicts() do the reaping.
2968 */
2969 buf->b_efunc = NULL;
2970 mutex_exit(&buf->b_evict_lock);
2971 VERIFY(copy.b_efunc(&copy) == 0);
2972 return (1);
2973 }
2974 hash_lock = HDR_LOCK(hdr);
2975 mutex_enter(hash_lock);
2976 hdr = buf->b_hdr;
2977 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2978
2979 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2980 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2981
2982 /*
2983 * Pull this buffer off of the hdr
2984 */
2985 bufp = &hdr->b_buf;
2986 while (*bufp != buf)
2987 bufp = &(*bufp)->b_next;
2988 *bufp = buf->b_next;
2989
2990 ASSERT(buf->b_data != NULL);
2991 arc_buf_destroy(buf, FALSE, FALSE);
2992
2993 if (hdr->b_datacnt == 0) {
2994 arc_state_t *old_state = hdr->b_state;
2995 arc_state_t *evicted_state;
2996
2997 ASSERT(hdr->b_buf == NULL);
2998 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2999
3000 evicted_state =
3001 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3002
3003 mutex_enter(&old_state->arcs_mtx);
3004 mutex_enter(&evicted_state->arcs_mtx);
3005
3006 arc_change_state(evicted_state, hdr, hash_lock);
3007 ASSERT(HDR_IN_HASH_TABLE(hdr));
3008 hdr->b_flags |= ARC_IN_HASH_TABLE;
3009 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3010
3011 mutex_exit(&evicted_state->arcs_mtx);
3012 mutex_exit(&old_state->arcs_mtx);
3013 }
3014 mutex_exit(hash_lock);
3015 mutex_exit(&buf->b_evict_lock);
3016
3017 VERIFY(buf->b_efunc(buf) == 0);
3018 buf->b_efunc = NULL;
3019 buf->b_private = NULL;
3020 buf->b_hdr = NULL;
3021 buf->b_next = NULL;
3022 kmem_cache_free(buf_cache, buf);
3023 return (1);
3024 }
3025
3026 /*
3027 * Release this buffer from the cache. This must be done
3028 * after a read and prior to modifying the buffer contents.
3029 * If the buffer has more than one reference, we must make
3030 * a new hdr for the buffer.
3031 */
3032 void
3033 arc_release(arc_buf_t *buf, void *tag)
3034 {
3035 arc_buf_hdr_t *hdr;
3036 kmutex_t *hash_lock = NULL;
3037 l2arc_buf_hdr_t *l2hdr;
3038 uint64_t buf_size;
3039
3040 /*
3041 * It would be nice to assert that if it's DMU metadata (level >
3042 * 0 || it's the dnode file), then it must be syncing context.
3043 * But we don't know that information at this level.
3044 */
3045
3046 mutex_enter(&buf->b_evict_lock);
3047 hdr = buf->b_hdr;
3048
3049 /* this buffer is not on any list */
3050 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3051
3052 if (hdr->b_state == arc_anon) {
3053 /* this buffer is already released */
3054 ASSERT(buf->b_efunc == NULL);
3055 } else {
3056 hash_lock = HDR_LOCK(hdr);
3057 mutex_enter(hash_lock);
3058 hdr = buf->b_hdr;
3059 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3060 }
3061
3062 l2hdr = hdr->b_l2hdr;
3063 if (l2hdr) {
3064 mutex_enter(&l2arc_buflist_mtx);
3065 hdr->b_l2hdr = NULL;
3066 buf_size = hdr->b_size;
3067 }
3068
3069 /*
3070 * Do we have more than one buf?
3071 */
3072 if (hdr->b_datacnt > 1) {
3073 arc_buf_hdr_t *nhdr;
3074 arc_buf_t **bufp;
3075 uint64_t blksz = hdr->b_size;
3076 uint64_t spa = hdr->b_spa;
3077 arc_buf_contents_t type = hdr->b_type;
3078 uint32_t flags = hdr->b_flags;
3079
3080 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3081 /*
3082 * Pull the data off of this hdr and attach it to
3083 * a new anonymous hdr.
3084 */
3085 (void) remove_reference(hdr, hash_lock, tag);
3086 bufp = &hdr->b_buf;
3087 while (*bufp != buf)
3088 bufp = &(*bufp)->b_next;
3089 *bufp = buf->b_next;
3090 buf->b_next = NULL;
3091
3092 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3093 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3094 if (refcount_is_zero(&hdr->b_refcnt)) {
3095 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3096 ASSERT3U(*size, >=, hdr->b_size);
3097 atomic_add_64(size, -hdr->b_size);
3098 }
3099 hdr->b_datacnt -= 1;
3100 arc_cksum_verify(buf);
3101
3102 mutex_exit(hash_lock);
3103
3104 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3105 nhdr->b_size = blksz;
3106 nhdr->b_spa = spa;
3107 nhdr->b_type = type;
3108 nhdr->b_buf = buf;
3109 nhdr->b_state = arc_anon;
3110 nhdr->b_arc_access = 0;
3111 nhdr->b_flags = flags & ARC_L2_WRITING;
3112 nhdr->b_l2hdr = NULL;
3113 nhdr->b_datacnt = 1;
3114 nhdr->b_freeze_cksum = NULL;
3115 (void) refcount_add(&nhdr->b_refcnt, tag);
3116 buf->b_hdr = nhdr;
3117 mutex_exit(&buf->b_evict_lock);
3118 atomic_add_64(&arc_anon->arcs_size, blksz);
3119 } else {
3120 mutex_exit(&buf->b_evict_lock);
3121 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3122 ASSERT(!list_link_active(&hdr->b_arc_node));
3123 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3124 if (hdr->b_state != arc_anon)
3125 arc_change_state(arc_anon, hdr, hash_lock);
3126 hdr->b_arc_access = 0;
3127 if (hash_lock)
3128 mutex_exit(hash_lock);
3129
3130 buf_discard_identity(hdr);
3131 arc_buf_thaw(buf);
3132 }
3133 buf->b_efunc = NULL;
3134 buf->b_private = NULL;
3135
3136 if (l2hdr) {
3137 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3138 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3139 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3140 mutex_exit(&l2arc_buflist_mtx);
3141 }
3142 }
3143
3144 /*
3145 * Release this buffer. If it does not match the provided BP, fill it
3146 * with that block's contents.
3147 */
3148 /* ARGSUSED */
3149 int
3150 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa,
3151 zbookmark_t *zb)
3152 {
3153 arc_release(buf, tag);
3154 return (0);
3155 }
3156
3157 int
3158 arc_released(arc_buf_t *buf)
3159 {
3160 int released;
3161
3162 mutex_enter(&buf->b_evict_lock);
3163 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3164 mutex_exit(&buf->b_evict_lock);
3165 return (released);
3166 }
3167
3168 int
3169 arc_has_callback(arc_buf_t *buf)
3170 {
3171 int callback;
3172
3173 mutex_enter(&buf->b_evict_lock);
3174 callback = (buf->b_efunc != NULL);
3175 mutex_exit(&buf->b_evict_lock);
3176 return (callback);
3177 }
3178
3179 #ifdef ZFS_DEBUG
3180 int
3181 arc_referenced(arc_buf_t *buf)
3182 {
3183 int referenced;
3184
3185 mutex_enter(&buf->b_evict_lock);
3186 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3187 mutex_exit(&buf->b_evict_lock);
3188 return (referenced);
3189 }
3190 #endif
3191
3192 static void
3193 arc_write_ready(zio_t *zio)
3194 {
3195 arc_write_callback_t *callback = zio->io_private;
3196 arc_buf_t *buf = callback->awcb_buf;
3197 arc_buf_hdr_t *hdr = buf->b_hdr;
3198
3199 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3200 callback->awcb_ready(zio, buf, callback->awcb_private);
3201
3202 /*
3203 * If the IO is already in progress, then this is a re-write
3204 * attempt, so we need to thaw and re-compute the cksum.
3205 * It is the responsibility of the callback to handle the
3206 * accounting for any re-write attempt.
3207 */
3208 if (HDR_IO_IN_PROGRESS(hdr)) {
3209 mutex_enter(&hdr->b_freeze_lock);
3210 if (hdr->b_freeze_cksum != NULL) {
3211 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3212 hdr->b_freeze_cksum = NULL;
3213 }
3214 mutex_exit(&hdr->b_freeze_lock);
3215 }
3216 arc_cksum_compute(buf, B_FALSE);
3217 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3218 }
3219
3220 static void
3221 arc_write_done(zio_t *zio)
3222 {
3223 arc_write_callback_t *callback = zio->io_private;
3224 arc_buf_t *buf = callback->awcb_buf;
3225 arc_buf_hdr_t *hdr = buf->b_hdr;
3226
3227 ASSERT(hdr->b_acb == NULL);
3228
3229 if (zio->io_error == 0) {
3230 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3231 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3232 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3233 } else {
3234 ASSERT(BUF_EMPTY(hdr));
3235 }
3236
3237 /*
3238 * If the block to be written was all-zero, we may have
3239 * compressed it away. In this case no write was performed
3240 * so there will be no dva/birth/checksum. The buffer must
3241 * therefore remain anonymous (and uncached).
3242 */
3243 if (!BUF_EMPTY(hdr)) {
3244 arc_buf_hdr_t *exists;
3245 kmutex_t *hash_lock;
3246
3247 ASSERT(zio->io_error == 0);
3248
3249 arc_cksum_verify(buf);
3250
3251 exists = buf_hash_insert(hdr, &hash_lock);
3252 if (exists) {
3253 /*
3254 * This can only happen if we overwrite for
3255 * sync-to-convergence, because we remove
3256 * buffers from the hash table when we arc_free().
3257 */
3258 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3259 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3260 panic("bad overwrite, hdr=%p exists=%p",
3261 (void *)hdr, (void *)exists);
3262 ASSERT(refcount_is_zero(&exists->b_refcnt));
3263 arc_change_state(arc_anon, exists, hash_lock);
3264 mutex_exit(hash_lock);
3265 arc_hdr_destroy(exists);
3266 exists = buf_hash_insert(hdr, &hash_lock);
3267 ASSERT3P(exists, ==, NULL);
3268 } else {
3269 /* Dedup */
3270 ASSERT(hdr->b_datacnt == 1);
3271 ASSERT(hdr->b_state == arc_anon);
3272 ASSERT(BP_GET_DEDUP(zio->io_bp));
3273 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3274 }
3275 }
3276 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3277 /* if it's not anon, we are doing a scrub */
3278 if (!exists && hdr->b_state == arc_anon)
3279 arc_access(hdr, hash_lock);
3280 mutex_exit(hash_lock);
3281 } else {
3282 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3283 }
3284
3285 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3286 callback->awcb_done(zio, buf, callback->awcb_private);
3287
3288 kmem_free(callback, sizeof (arc_write_callback_t));
3289 }
3290
3291 zio_t *
3292 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3293 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3294 arc_done_func_t *ready, arc_done_func_t *done, void *private,
3295 int priority, int zio_flags, const zbookmark_t *zb)
3296 {
3297 arc_buf_hdr_t *hdr = buf->b_hdr;
3298 arc_write_callback_t *callback;
3299 zio_t *zio;
3300
3301 ASSERT(ready != NULL);
3302 ASSERT(done != NULL);
3303 ASSERT(!HDR_IO_ERROR(hdr));
3304 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3305 ASSERT(hdr->b_acb == NULL);
3306 if (l2arc)
3307 hdr->b_flags |= ARC_L2CACHE;
3308 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3309 callback->awcb_ready = ready;
3310 callback->awcb_done = done;
3311 callback->awcb_private = private;
3312 callback->awcb_buf = buf;
3313
3314 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3315 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3316
3317 return (zio);
3318 }
3319
3320 static int
3321 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3322 {
3323 #ifdef _KERNEL
3324 uint64_t available_memory = ptob(freemem);
3325 static uint64_t page_load = 0;
3326 static uint64_t last_txg = 0;
3327
3328 #if defined(__i386)
3329 available_memory =
3330 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3331 #endif
3332 if (available_memory >= zfs_write_limit_max)
3333 return (0);
3334
3335 if (txg > last_txg) {
3336 last_txg = txg;
3337 page_load = 0;
3338 }
3339 /*
3340 * If we are in pageout, we know that memory is already tight,
3341 * the arc is already going to be evicting, so we just want to
3342 * continue to let page writes occur as quickly as possible.
3343 */
3344 if (curproc == proc_pageout) {
3345 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3346 return (ERESTART);
3347 /* Note: reserve is inflated, so we deflate */
3348 page_load += reserve / 8;
3349 return (0);
3350 } else if (page_load > 0 && arc_reclaim_needed()) {
3351 /* memory is low, delay before restarting */
3352 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3353 return (EAGAIN);
3354 }
3355 page_load = 0;
3356
3357 if (arc_size > arc_c_min) {
3358 uint64_t evictable_memory =
3359 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3360 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3361 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3362 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3363 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3364 }
3365
3366 if (inflight_data > available_memory / 4) {
3367 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3368 return (ERESTART);
3369 }
3370 #endif
3371 return (0);
3372 }
3373
3374 void
3375 arc_tempreserve_clear(uint64_t reserve)
3376 {
3377 atomic_add_64(&arc_tempreserve, -reserve);
3378 ASSERT((int64_t)arc_tempreserve >= 0);
3379 }
3380
3381 int
3382 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3383 {
3384 int error;
3385 uint64_t anon_size;
3386
3387 #ifdef ZFS_DEBUG
3388 /*
3389 * Once in a while, fail for no reason. Everything should cope.
3390 */
3391 if (spa_get_random(10000) == 0) {
3392 dprintf("forcing random failure\n");
3393 return (ERESTART);
3394 }
3395 #endif
3396 if (reserve > arc_c/4 && !arc_no_grow)
3397 arc_c = MIN(arc_c_max, reserve * 4);
3398 if (reserve > arc_c)
3399 return (ENOMEM);
3400
3401 /*
3402 * Don't count loaned bufs as in flight dirty data to prevent long
3403 * network delays from blocking transactions that are ready to be
3404 * assigned to a txg.
3405 */
3406 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3407
3408 /*
3409 * Writes will, almost always, require additional memory allocations
3410 * in order to compress/encrypt/etc the data. We therefor need to
3411 * make sure that there is sufficient available memory for this.
3412 */
3413 if (error = arc_memory_throttle(reserve, anon_size, txg))
3414 return (error);
3415
3416 /*
3417 * Throttle writes when the amount of dirty data in the cache
3418 * gets too large. We try to keep the cache less than half full
3419 * of dirty blocks so that our sync times don't grow too large.
3420 * Note: if two requests come in concurrently, we might let them
3421 * both succeed, when one of them should fail. Not a huge deal.
3422 */
3423
3424 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3425 anon_size > arc_c / 4) {
3426 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3427 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3428 arc_tempreserve>>10,
3429 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3430 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3431 reserve>>10, arc_c>>10);
3432 return (ERESTART);
3433 }
3434 atomic_add_64(&arc_tempreserve, reserve);
3435 return (0);
3436 }
3437
3438 void
3439 arc_init(void)
3440 {
3441 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3442 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3443
3444 /* Convert seconds to clock ticks */
3445 arc_min_prefetch_lifespan = 1 * hz;
3446
3447 /* Start out with 1/8 of all memory */
3448 arc_c = physmem * PAGESIZE / 8;
3449
3450 #ifdef _KERNEL
3451 /*
3452 * On architectures where the physical memory can be larger
3453 * than the addressable space (intel in 32-bit mode), we may
3454 * need to limit the cache to 1/8 of VM size.
3455 */
3456 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3457 #endif
3458
3459 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3460 arc_c_min = MAX(arc_c / 4, 64<<20);
3461 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3462 if (arc_c * 8 >= 1<<30)
3463 arc_c_max = (arc_c * 8) - (1<<30);
3464 else
3465 arc_c_max = arc_c_min;
3466 arc_c_max = MAX(arc_c * 6, arc_c_max);
3467
3468 /*
3469 * Allow the tunables to override our calculations if they are
3470 * reasonable (ie. over 64MB)
3471 */
3472 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3473 arc_c_max = zfs_arc_max;
3474 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3475 arc_c_min = zfs_arc_min;
3476
3477 arc_c = arc_c_max;
3478 arc_p = (arc_c >> 1);
3479
3480 /* limit meta-data to 1/4 of the arc capacity */
3481 arc_meta_limit = arc_c_max / 4;
3482
3483 /* Allow the tunable to override if it is reasonable */
3484 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3485 arc_meta_limit = zfs_arc_meta_limit;
3486
3487 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3488 arc_c_min = arc_meta_limit / 2;
3489
3490 if (zfs_arc_grow_retry > 0)
3491 arc_grow_retry = zfs_arc_grow_retry;
3492
3493 if (zfs_arc_shrink_shift > 0)
3494 arc_shrink_shift = zfs_arc_shrink_shift;
3495
3496 if (zfs_arc_p_min_shift > 0)
3497 arc_p_min_shift = zfs_arc_p_min_shift;
3498
3499 /* if kmem_flags are set, lets try to use less memory */
3500 if (kmem_debugging())
3501 arc_c = arc_c / 2;
3502 if (arc_c < arc_c_min)
3503 arc_c = arc_c_min;
3504
3505 arc_anon = &ARC_anon;
3506 arc_mru = &ARC_mru;
3507 arc_mru_ghost = &ARC_mru_ghost;
3508 arc_mfu = &ARC_mfu;
3509 arc_mfu_ghost = &ARC_mfu_ghost;
3510 arc_l2c_only = &ARC_l2c_only;
3511 arc_size = 0;
3512
3513 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3514 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3515 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3516 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3517 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3518 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3519
3520 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3521 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3522 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3523 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3524 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3525 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3526 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3527 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3528 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3529 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3530 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3531 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3532 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3533 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3534 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3535 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3536 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3537 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3538 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3539 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3540
3541 buf_init();
3542
3543 arc_thread_exit = 0;
3544 arc_eviction_list = NULL;
3545 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3546 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3547
3548 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3549 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3550
3551 if (arc_ksp != NULL) {
3552 arc_ksp->ks_data = &arc_stats;
3553 kstat_install(arc_ksp);
3554 }
3555
3556 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3557 TS_RUN, minclsyspri);
3558
3559 arc_dead = FALSE;
3560 arc_warm = B_FALSE;
3561
3562 if (zfs_write_limit_max == 0)
3563 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3564 else
3565 zfs_write_limit_shift = 0;
3566 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3567 }
3568
3569 void
3570 arc_fini(void)
3571 {
3572 mutex_enter(&arc_reclaim_thr_lock);
3573 arc_thread_exit = 1;
3574 while (arc_thread_exit != 0)
3575 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3576 mutex_exit(&arc_reclaim_thr_lock);
3577
3578 arc_flush(NULL);
3579
3580 arc_dead = TRUE;
3581
3582 if (arc_ksp != NULL) {
3583 kstat_delete(arc_ksp);
3584 arc_ksp = NULL;
3585 }
3586
3587 mutex_destroy(&arc_eviction_mtx);
3588 mutex_destroy(&arc_reclaim_thr_lock);
3589 cv_destroy(&arc_reclaim_thr_cv);
3590
3591 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3592 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3593 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3594 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3595 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3596 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3597 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3598 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3599
3600 mutex_destroy(&arc_anon->arcs_mtx);
3601 mutex_destroy(&arc_mru->arcs_mtx);
3602 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3603 mutex_destroy(&arc_mfu->arcs_mtx);
3604 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3605 mutex_destroy(&arc_l2c_only->arcs_mtx);
3606
3607 mutex_destroy(&zfs_write_limit_lock);
3608
3609 buf_fini();
3610
3611 ASSERT(arc_loaned_bytes == 0);
3612 }
3613
3614 /*
3615 * Level 2 ARC
3616 *
3617 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3618 * It uses dedicated storage devices to hold cached data, which are populated
3619 * using large infrequent writes. The main role of this cache is to boost
3620 * the performance of random read workloads. The intended L2ARC devices
3621 * include short-stroked disks, solid state disks, and other media with
3622 * substantially faster read latency than disk.
3623 *
3624 * +-----------------------+
3625 * | ARC |
3626 * +-----------------------+
3627 * | ^ ^
3628 * | | |
3629 * l2arc_feed_thread() arc_read()
3630 * | | |
3631 * | l2arc read |
3632 * V | |
3633 * +---------------+ |
3634 * | L2ARC | |
3635 * +---------------+ |
3636 * | ^ |
3637 * l2arc_write() | |
3638 * | | |
3639 * V | |
3640 * +-------+ +-------+
3641 * | vdev | | vdev |
3642 * | cache | | cache |
3643 * +-------+ +-------+
3644 * +=========+ .-----.
3645 * : L2ARC : |-_____-|
3646 * : devices : | Disks |
3647 * +=========+ `-_____-'
3648 *
3649 * Read requests are satisfied from the following sources, in order:
3650 *
3651 * 1) ARC
3652 * 2) vdev cache of L2ARC devices
3653 * 3) L2ARC devices
3654 * 4) vdev cache of disks
3655 * 5) disks
3656 *
3657 * Some L2ARC device types exhibit extremely slow write performance.
3658 * To accommodate for this there are some significant differences between
3659 * the L2ARC and traditional cache design:
3660 *
3661 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3662 * the ARC behave as usual, freeing buffers and placing headers on ghost
3663 * lists. The ARC does not send buffers to the L2ARC during eviction as
3664 * this would add inflated write latencies for all ARC memory pressure.
3665 *
3666 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3667 * It does this by periodically scanning buffers from the eviction-end of
3668 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3669 * not already there. It scans until a headroom of buffers is satisfied,
3670 * which itself is a buffer for ARC eviction. The thread that does this is
3671 * l2arc_feed_thread(), illustrated below; example sizes are included to
3672 * provide a better sense of ratio than this diagram:
3673 *
3674 * head --> tail
3675 * +---------------------+----------+
3676 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3677 * +---------------------+----------+ | o L2ARC eligible
3678 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3679 * +---------------------+----------+ |
3680 * 15.9 Gbytes ^ 32 Mbytes |
3681 * headroom |
3682 * l2arc_feed_thread()
3683 * |
3684 * l2arc write hand <--[oooo]--'
3685 * | 8 Mbyte
3686 * | write max
3687 * V
3688 * +==============================+
3689 * L2ARC dev |####|#|###|###| |####| ... |
3690 * +==============================+
3691 * 32 Gbytes
3692 *
3693 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3694 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3695 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3696 * safe to say that this is an uncommon case, since buffers at the end of
3697 * the ARC lists have moved there due to inactivity.
3698 *
3699 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3700 * then the L2ARC simply misses copying some buffers. This serves as a
3701 * pressure valve to prevent heavy read workloads from both stalling the ARC
3702 * with waits and clogging the L2ARC with writes. This also helps prevent
3703 * the potential for the L2ARC to churn if it attempts to cache content too
3704 * quickly, such as during backups of the entire pool.
3705 *
3706 * 5. After system boot and before the ARC has filled main memory, there are
3707 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3708 * lists can remain mostly static. Instead of searching from tail of these
3709 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3710 * for eligible buffers, greatly increasing its chance of finding them.
3711 *
3712 * The L2ARC device write speed is also boosted during this time so that
3713 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3714 * there are no L2ARC reads, and no fear of degrading read performance
3715 * through increased writes.
3716 *
3717 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3718 * the vdev queue can aggregate them into larger and fewer writes. Each
3719 * device is written to in a rotor fashion, sweeping writes through
3720 * available space then repeating.
3721 *
3722 * 7. The L2ARC does not store dirty content. It never needs to flush
3723 * write buffers back to disk based storage.
3724 *
3725 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3726 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3727 *
3728 * The performance of the L2ARC can be tweaked by a number of tunables, which
3729 * may be necessary for different workloads:
3730 *
3731 * l2arc_write_max max write bytes per interval
3732 * l2arc_write_boost extra write bytes during device warmup
3733 * l2arc_noprefetch skip caching prefetched buffers
3734 * l2arc_headroom number of max device writes to precache
3735 * l2arc_feed_secs seconds between L2ARC writing
3736 *
3737 * Tunables may be removed or added as future performance improvements are
3738 * integrated, and also may become zpool properties.
3739 *
3740 * There are three key functions that control how the L2ARC warms up:
3741 *
3742 * l2arc_write_eligible() check if a buffer is eligible to cache
3743 * l2arc_write_size() calculate how much to write
3744 * l2arc_write_interval() calculate sleep delay between writes
3745 *
3746 * These three functions determine what to write, how much, and how quickly
3747 * to send writes.
3748 */
3749
3750 static boolean_t
3751 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3752 {
3753 /*
3754 * A buffer is *not* eligible for the L2ARC if it:
3755 * 1. belongs to a different spa.
3756 * 2. is already cached on the L2ARC.
3757 * 3. has an I/O in progress (it may be an incomplete read).
3758 * 4. is flagged not eligible (zfs property).
3759 */
3760 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3761 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3762 return (B_FALSE);
3763
3764 return (B_TRUE);
3765 }
3766
3767 static uint64_t
3768 l2arc_write_size(l2arc_dev_t *dev)
3769 {
3770 uint64_t size;
3771
3772 size = dev->l2ad_write;
3773
3774 if (arc_warm == B_FALSE)
3775 size += dev->l2ad_boost;
3776
3777 return (size);
3778
3779 }
3780
3781 static clock_t
3782 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3783 {
3784 clock_t interval, next, now;
3785
3786 /*
3787 * If the ARC lists are busy, increase our write rate; if the
3788 * lists are stale, idle back. This is achieved by checking
3789 * how much we previously wrote - if it was more than half of
3790 * what we wanted, schedule the next write much sooner.
3791 */
3792 if (l2arc_feed_again && wrote > (wanted / 2))
3793 interval = (hz * l2arc_feed_min_ms) / 1000;
3794 else
3795 interval = hz * l2arc_feed_secs;
3796
3797 now = ddi_get_lbolt();
3798 next = MAX(now, MIN(now + interval, began + interval));
3799
3800 return (next);
3801 }
3802
3803 static void
3804 l2arc_hdr_stat_add(void)
3805 {
3806 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3807 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3808 }
3809
3810 static void
3811 l2arc_hdr_stat_remove(void)
3812 {
3813 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3814 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3815 }
3816
3817 /*
3818 * Cycle through L2ARC devices. This is how L2ARC load balances.
3819 * If a device is returned, this also returns holding the spa config lock.
3820 */
3821 static l2arc_dev_t *
3822 l2arc_dev_get_next(void)
3823 {
3824 l2arc_dev_t *first, *next = NULL;
3825
3826 /*
3827 * Lock out the removal of spas (spa_namespace_lock), then removal
3828 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3829 * both locks will be dropped and a spa config lock held instead.
3830 */
3831 mutex_enter(&spa_namespace_lock);
3832 mutex_enter(&l2arc_dev_mtx);
3833
3834 /* if there are no vdevs, there is nothing to do */
3835 if (l2arc_ndev == 0)
3836 goto out;
3837
3838 first = NULL;
3839 next = l2arc_dev_last;
3840 do {
3841 /* loop around the list looking for a non-faulted vdev */
3842 if (next == NULL) {
3843 next = list_head(l2arc_dev_list);
3844 } else {
3845 next = list_next(l2arc_dev_list, next);
3846 if (next == NULL)
3847 next = list_head(l2arc_dev_list);
3848 }
3849
3850 /* if we have come back to the start, bail out */
3851 if (first == NULL)
3852 first = next;
3853 else if (next == first)
3854 break;
3855
3856 } while (vdev_is_dead(next->l2ad_vdev));
3857
3858 /* if we were unable to find any usable vdevs, return NULL */
3859 if (vdev_is_dead(next->l2ad_vdev))
3860 next = NULL;
3861
3862 l2arc_dev_last = next;
3863
3864 out:
3865 mutex_exit(&l2arc_dev_mtx);
3866
3867 /*
3868 * Grab the config lock to prevent the 'next' device from being
3869 * removed while we are writing to it.
3870 */
3871 if (next != NULL)
3872 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3873 mutex_exit(&spa_namespace_lock);
3874
3875 return (next);
3876 }
3877
3878 /*
3879 * Free buffers that were tagged for destruction.
3880 */
3881 static void
3882 l2arc_do_free_on_write(void)
3883 {
3884 list_t *buflist;
3885 l2arc_data_free_t *df, *df_prev;
3886
3887 mutex_enter(&l2arc_free_on_write_mtx);
3888 buflist = l2arc_free_on_write;
3889
3890 for (df = list_tail(buflist); df; df = df_prev) {
3891 df_prev = list_prev(buflist, df);
3892 ASSERT(df->l2df_data != NULL);
3893 ASSERT(df->l2df_func != NULL);
3894 df->l2df_func(df->l2df_data, df->l2df_size);
3895 list_remove(buflist, df);
3896 kmem_free(df, sizeof (l2arc_data_free_t));
3897 }
3898
3899 mutex_exit(&l2arc_free_on_write_mtx);
3900 }
3901
3902 /*
3903 * A write to a cache device has completed. Update all headers to allow
3904 * reads from these buffers to begin.
3905 */
3906 static void
3907 l2arc_write_done(zio_t *zio)
3908 {
3909 l2arc_write_callback_t *cb;
3910 l2arc_dev_t *dev;
3911 list_t *buflist;
3912 arc_buf_hdr_t *head, *ab, *ab_prev;
3913 l2arc_buf_hdr_t *abl2;
3914 kmutex_t *hash_lock;
3915
3916 cb = zio->io_private;
3917 ASSERT(cb != NULL);
3918 dev = cb->l2wcb_dev;
3919 ASSERT(dev != NULL);
3920 head = cb->l2wcb_head;
3921 ASSERT(head != NULL);
3922 buflist = dev->l2ad_buflist;
3923 ASSERT(buflist != NULL);
3924 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3925 l2arc_write_callback_t *, cb);
3926
3927 if (zio->io_error != 0)
3928 ARCSTAT_BUMP(arcstat_l2_writes_error);
3929
3930 mutex_enter(&l2arc_buflist_mtx);
3931
3932 /*
3933 * All writes completed, or an error was hit.
3934 */
3935 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3936 ab_prev = list_prev(buflist, ab);
3937
3938 hash_lock = HDR_LOCK(ab);
3939 if (!mutex_tryenter(hash_lock)) {
3940 /*
3941 * This buffer misses out. It may be in a stage
3942 * of eviction. Its ARC_L2_WRITING flag will be
3943 * left set, denying reads to this buffer.
3944 */
3945 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3946 continue;
3947 }
3948
3949 if (zio->io_error != 0) {
3950 /*
3951 * Error - drop L2ARC entry.
3952 */
3953 list_remove(buflist, ab);
3954 abl2 = ab->b_l2hdr;
3955 ab->b_l2hdr = NULL;
3956 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3957 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3958 }
3959
3960 /*
3961 * Allow ARC to begin reads to this L2ARC entry.
3962 */
3963 ab->b_flags &= ~ARC_L2_WRITING;
3964
3965 mutex_exit(hash_lock);
3966 }
3967
3968 atomic_inc_64(&l2arc_writes_done);
3969 list_remove(buflist, head);
3970 kmem_cache_free(hdr_cache, head);
3971 mutex_exit(&l2arc_buflist_mtx);
3972
3973 l2arc_do_free_on_write();
3974
3975 kmem_free(cb, sizeof (l2arc_write_callback_t));
3976 }
3977
3978 /*
3979 * A read to a cache device completed. Validate buffer contents before
3980 * handing over to the regular ARC routines.
3981 */
3982 static void
3983 l2arc_read_done(zio_t *zio)
3984 {
3985 l2arc_read_callback_t *cb;
3986 arc_buf_hdr_t *hdr;
3987 arc_buf_t *buf;
3988 kmutex_t *hash_lock;
3989 int equal;
3990
3991 ASSERT(zio->io_vd != NULL);
3992 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
3993
3994 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
3995
3996 cb = zio->io_private;
3997 ASSERT(cb != NULL);
3998 buf = cb->l2rcb_buf;
3999 ASSERT(buf != NULL);
4000
4001 hash_lock = HDR_LOCK(buf->b_hdr);
4002 mutex_enter(hash_lock);
4003 hdr = buf->b_hdr;
4004 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4005
4006 /*
4007 * Check this survived the L2ARC journey.
4008 */
4009 equal = arc_cksum_equal(buf);
4010 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4011 mutex_exit(hash_lock);
4012 zio->io_private = buf;
4013 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4014 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4015 arc_read_done(zio);
4016 } else {
4017 mutex_exit(hash_lock);
4018 /*
4019 * Buffer didn't survive caching. Increment stats and
4020 * reissue to the original storage device.
4021 */
4022 if (zio->io_error != 0) {
4023 ARCSTAT_BUMP(arcstat_l2_io_error);
4024 } else {
4025 zio->io_error = EIO;
4026 }
4027 if (!equal)
4028 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4029
4030 /*
4031 * If there's no waiter, issue an async i/o to the primary
4032 * storage now. If there *is* a waiter, the caller must
4033 * issue the i/o in a context where it's OK to block.
4034 */
4035 if (zio->io_waiter == NULL) {
4036 zio_t *pio = zio_unique_parent(zio);
4037
4038 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4039
4040 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4041 buf->b_data, zio->io_size, arc_read_done, buf,
4042 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4043 }
4044 }
4045
4046 kmem_free(cb, sizeof (l2arc_read_callback_t));
4047 }
4048
4049 /*
4050 * This is the list priority from which the L2ARC will search for pages to
4051 * cache. This is used within loops (0..3) to cycle through lists in the
4052 * desired order. This order can have a significant effect on cache
4053 * performance.
4054 *
4055 * Currently the metadata lists are hit first, MFU then MRU, followed by
4056 * the data lists. This function returns a locked list, and also returns
4057 * the lock pointer.
4058 */
4059 static list_t *
4060 l2arc_list_locked(int list_num, kmutex_t **lock)
4061 {
4062 list_t *list;
4063
4064 ASSERT(list_num >= 0 && list_num <= 3);
4065
4066 switch (list_num) {
4067 case 0:
4068 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4069 *lock = &arc_mfu->arcs_mtx;
4070 break;
4071 case 1:
4072 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4073 *lock = &arc_mru->arcs_mtx;
4074 break;
4075 case 2:
4076 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4077 *lock = &arc_mfu->arcs_mtx;
4078 break;
4079 case 3:
4080 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4081 *lock = &arc_mru->arcs_mtx;
4082 break;
4083 }
4084
4085 ASSERT(!(MUTEX_HELD(*lock)));
4086 mutex_enter(*lock);
4087 return (list);
4088 }
4089
4090 /*
4091 * Evict buffers from the device write hand to the distance specified in
4092 * bytes. This distance may span populated buffers, it may span nothing.
4093 * This is clearing a region on the L2ARC device ready for writing.
4094 * If the 'all' boolean is set, every buffer is evicted.
4095 */
4096 static void
4097 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4098 {
4099 list_t *buflist;
4100 l2arc_buf_hdr_t *abl2;
4101 arc_buf_hdr_t *ab, *ab_prev;
4102 kmutex_t *hash_lock;
4103 uint64_t taddr;
4104
4105 buflist = dev->l2ad_buflist;
4106
4107 if (buflist == NULL)
4108 return;
4109
4110 if (!all && dev->l2ad_first) {
4111 /*
4112 * This is the first sweep through the device. There is
4113 * nothing to evict.
4114 */
4115 return;
4116 }
4117
4118 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4119 /*
4120 * When nearing the end of the device, evict to the end
4121 * before the device write hand jumps to the start.
4122 */
4123 taddr = dev->l2ad_end;
4124 } else {
4125 taddr = dev->l2ad_hand + distance;
4126 }
4127 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4128 uint64_t, taddr, boolean_t, all);
4129
4130 top:
4131 mutex_enter(&l2arc_buflist_mtx);
4132 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4133 ab_prev = list_prev(buflist, ab);
4134
4135 hash_lock = HDR_LOCK(ab);
4136 if (!mutex_tryenter(hash_lock)) {
4137 /*
4138 * Missed the hash lock. Retry.
4139 */
4140 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4141 mutex_exit(&l2arc_buflist_mtx);
4142 mutex_enter(hash_lock);
4143 mutex_exit(hash_lock);
4144 goto top;
4145 }
4146
4147 if (HDR_L2_WRITE_HEAD(ab)) {
4148 /*
4149 * We hit a write head node. Leave it for
4150 * l2arc_write_done().
4151 */
4152 list_remove(buflist, ab);
4153 mutex_exit(hash_lock);
4154 continue;
4155 }
4156
4157 if (!all && ab->b_l2hdr != NULL &&
4158 (ab->b_l2hdr->b_daddr > taddr ||
4159 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4160 /*
4161 * We've evicted to the target address,
4162 * or the end of the device.
4163 */
4164 mutex_exit(hash_lock);
4165 break;
4166 }
4167
4168 if (HDR_FREE_IN_PROGRESS(ab)) {
4169 /*
4170 * Already on the path to destruction.
4171 */
4172 mutex_exit(hash_lock);
4173 continue;
4174 }
4175
4176 if (ab->b_state == arc_l2c_only) {
4177 ASSERT(!HDR_L2_READING(ab));
4178 /*
4179 * This doesn't exist in the ARC. Destroy.
4180 * arc_hdr_destroy() will call list_remove()
4181 * and decrement arcstat_l2_size.
4182 */
4183 arc_change_state(arc_anon, ab, hash_lock);
4184 arc_hdr_destroy(ab);
4185 } else {
4186 /*
4187 * Invalidate issued or about to be issued
4188 * reads, since we may be about to write
4189 * over this location.
4190 */
4191 if (HDR_L2_READING(ab)) {
4192 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4193 ab->b_flags |= ARC_L2_EVICTED;
4194 }
4195
4196 /*
4197 * Tell ARC this no longer exists in L2ARC.
4198 */
4199 if (ab->b_l2hdr != NULL) {
4200 abl2 = ab->b_l2hdr;
4201 ab->b_l2hdr = NULL;
4202 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4203 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4204 }
4205 list_remove(buflist, ab);
4206
4207 /*
4208 * This may have been leftover after a
4209 * failed write.
4210 */
4211 ab->b_flags &= ~ARC_L2_WRITING;
4212 }
4213 mutex_exit(hash_lock);
4214 }
4215 mutex_exit(&l2arc_buflist_mtx);
4216
4217 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4218 dev->l2ad_evict = taddr;
4219 }
4220
4221 /*
4222 * Find and write ARC buffers to the L2ARC device.
4223 *
4224 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4225 * for reading until they have completed writing.
4226 */
4227 static uint64_t
4228 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4229 {
4230 arc_buf_hdr_t *ab, *ab_prev, *head;
4231 l2arc_buf_hdr_t *hdrl2;
4232 list_t *list;
4233 uint64_t passed_sz, write_sz, buf_sz, headroom;
4234 void *buf_data;
4235 kmutex_t *hash_lock, *list_lock;
4236 boolean_t have_lock, full;
4237 l2arc_write_callback_t *cb;
4238 zio_t *pio, *wzio;
4239 uint64_t guid = spa_guid(spa);
4240 int try;
4241
4242 ASSERT(dev->l2ad_vdev != NULL);
4243
4244 pio = NULL;
4245 write_sz = 0;
4246 full = B_FALSE;
4247 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4248 head->b_flags |= ARC_L2_WRITE_HEAD;
4249
4250 /*
4251 * Copy buffers for L2ARC writing.
4252 */
4253 mutex_enter(&l2arc_buflist_mtx);
4254 for (try = 0; try <= 3; try++) {
4255 list = l2arc_list_locked(try, &list_lock);
4256 passed_sz = 0;
4257
4258 /*
4259 * L2ARC fast warmup.
4260 *
4261 * Until the ARC is warm and starts to evict, read from the
4262 * head of the ARC lists rather than the tail.
4263 */
4264 headroom = target_sz * l2arc_headroom;
4265 if (arc_warm == B_FALSE)
4266 ab = list_head(list);
4267 else
4268 ab = list_tail(list);
4269
4270 for (; ab; ab = ab_prev) {
4271 if (arc_warm == B_FALSE)
4272 ab_prev = list_next(list, ab);
4273 else
4274 ab_prev = list_prev(list, ab);
4275
4276 hash_lock = HDR_LOCK(ab);
4277 have_lock = MUTEX_HELD(hash_lock);
4278 if (!have_lock && !mutex_tryenter(hash_lock)) {
4279 /*
4280 * Skip this buffer rather than waiting.
4281 */
4282 continue;
4283 }
4284
4285 passed_sz += ab->b_size;
4286 if (passed_sz > headroom) {
4287 /*
4288 * Searched too far.
4289 */
4290 mutex_exit(hash_lock);
4291 break;
4292 }
4293
4294 if (!l2arc_write_eligible(guid, ab)) {
4295 mutex_exit(hash_lock);
4296 continue;
4297 }
4298
4299 if ((write_sz + ab->b_size) > target_sz) {
4300 full = B_TRUE;
4301 mutex_exit(hash_lock);
4302 break;
4303 }
4304
4305 if (pio == NULL) {
4306 /*
4307 * Insert a dummy header on the buflist so
4308 * l2arc_write_done() can find where the
4309 * write buffers begin without searching.
4310 */
4311 list_insert_head(dev->l2ad_buflist, head);
4312
4313 cb = kmem_alloc(
4314 sizeof (l2arc_write_callback_t), KM_SLEEP);
4315 cb->l2wcb_dev = dev;
4316 cb->l2wcb_head = head;
4317 pio = zio_root(spa, l2arc_write_done, cb,
4318 ZIO_FLAG_CANFAIL);
4319 }
4320
4321 /*
4322 * Create and add a new L2ARC header.
4323 */
4324 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4325 hdrl2->b_dev = dev;
4326 hdrl2->b_daddr = dev->l2ad_hand;
4327
4328 ab->b_flags |= ARC_L2_WRITING;
4329 ab->b_l2hdr = hdrl2;
4330 list_insert_head(dev->l2ad_buflist, ab);
4331 buf_data = ab->b_buf->b_data;
4332 buf_sz = ab->b_size;
4333
4334 /*
4335 * Compute and store the buffer cksum before
4336 * writing. On debug the cksum is verified first.
4337 */
4338 arc_cksum_verify(ab->b_buf);
4339 arc_cksum_compute(ab->b_buf, B_TRUE);
4340
4341 mutex_exit(hash_lock);
4342
4343 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4344 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4345 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4346 ZIO_FLAG_CANFAIL, B_FALSE);
4347
4348 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4349 zio_t *, wzio);
4350 (void) zio_nowait(wzio);
4351
4352 /*
4353 * Keep the clock hand suitably device-aligned.
4354 */
4355 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4356
4357 write_sz += buf_sz;
4358 dev->l2ad_hand += buf_sz;
4359 }
4360
4361 mutex_exit(list_lock);
4362
4363 if (full == B_TRUE)
4364 break;
4365 }
4366 mutex_exit(&l2arc_buflist_mtx);
4367
4368 if (pio == NULL) {
4369 ASSERT3U(write_sz, ==, 0);
4370 kmem_cache_free(hdr_cache, head);
4371 return (0);
4372 }
4373
4374 ASSERT3U(write_sz, <=, target_sz);
4375 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4376 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4377 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4378 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4379
4380 /*
4381 * Bump device hand to the device start if it is approaching the end.
4382 * l2arc_evict() will already have evicted ahead for this case.
4383 */
4384 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4385 vdev_space_update(dev->l2ad_vdev,
4386 dev->l2ad_end - dev->l2ad_hand, 0, 0);
4387 dev->l2ad_hand = dev->l2ad_start;
4388 dev->l2ad_evict = dev->l2ad_start;
4389 dev->l2ad_first = B_FALSE;
4390 }
4391
4392 dev->l2ad_writing = B_TRUE;
4393 (void) zio_wait(pio);
4394 dev->l2ad_writing = B_FALSE;
4395
4396 return (write_sz);
4397 }
4398
4399 /*
4400 * This thread feeds the L2ARC at regular intervals. This is the beating
4401 * heart of the L2ARC.
4402 */
4403 static void
4404 l2arc_feed_thread(void)
4405 {
4406 callb_cpr_t cpr;
4407 l2arc_dev_t *dev;
4408 spa_t *spa;
4409 uint64_t size, wrote;
4410 clock_t begin, next = ddi_get_lbolt();
4411
4412 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4413
4414 mutex_enter(&l2arc_feed_thr_lock);
4415
4416 while (l2arc_thread_exit == 0) {
4417 CALLB_CPR_SAFE_BEGIN(&cpr);
4418 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4419 next);
4420 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4421 next = ddi_get_lbolt() + hz;
4422
4423 /*
4424 * Quick check for L2ARC devices.
4425 */
4426 mutex_enter(&l2arc_dev_mtx);
4427 if (l2arc_ndev == 0) {
4428 mutex_exit(&l2arc_dev_mtx);
4429 continue;
4430 }
4431 mutex_exit(&l2arc_dev_mtx);
4432 begin = ddi_get_lbolt();
4433
4434 /*
4435 * This selects the next l2arc device to write to, and in
4436 * doing so the next spa to feed from: dev->l2ad_spa. This
4437 * will return NULL if there are now no l2arc devices or if
4438 * they are all faulted.
4439 *
4440 * If a device is returned, its spa's config lock is also
4441 * held to prevent device removal. l2arc_dev_get_next()
4442 * will grab and release l2arc_dev_mtx.
4443 */
4444 if ((dev = l2arc_dev_get_next()) == NULL)
4445 continue;
4446
4447 spa = dev->l2ad_spa;
4448 ASSERT(spa != NULL);
4449
4450 /*
4451 * If the pool is read-only then force the feed thread to
4452 * sleep a little longer.
4453 */
4454 if (!spa_writeable(spa)) {
4455 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
4456 spa_config_exit(spa, SCL_L2ARC, dev);
4457 continue;
4458 }
4459
4460 /*
4461 * Avoid contributing to memory pressure.
4462 */
4463 if (arc_reclaim_needed()) {
4464 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4465 spa_config_exit(spa, SCL_L2ARC, dev);
4466 continue;
4467 }
4468
4469 ARCSTAT_BUMP(arcstat_l2_feeds);
4470
4471 size = l2arc_write_size(dev);
4472
4473 /*
4474 * Evict L2ARC buffers that will be overwritten.
4475 */
4476 l2arc_evict(dev, size, B_FALSE);
4477
4478 /*
4479 * Write ARC buffers.
4480 */
4481 wrote = l2arc_write_buffers(spa, dev, size);
4482
4483 /*
4484 * Calculate interval between writes.
4485 */
4486 next = l2arc_write_interval(begin, size, wrote);
4487 spa_config_exit(spa, SCL_L2ARC, dev);
4488 }
4489
4490 l2arc_thread_exit = 0;
4491 cv_broadcast(&l2arc_feed_thr_cv);
4492 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
4493 thread_exit();
4494 }
4495
4496 boolean_t
4497 l2arc_vdev_present(vdev_t *vd)
4498 {
4499 l2arc_dev_t *dev;
4500
4501 mutex_enter(&l2arc_dev_mtx);
4502 for (dev = list_head(l2arc_dev_list); dev != NULL;
4503 dev = list_next(l2arc_dev_list, dev)) {
4504 if (dev->l2ad_vdev == vd)
4505 break;
4506 }
4507 mutex_exit(&l2arc_dev_mtx);
4508
4509 return (dev != NULL);
4510 }
4511
4512 /*
4513 * Add a vdev for use by the L2ARC. By this point the spa has already
4514 * validated the vdev and opened it.
4515 */
4516 void
4517 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
4518 {
4519 l2arc_dev_t *adddev;
4520
4521 ASSERT(!l2arc_vdev_present(vd));
4522
4523 /*
4524 * Create a new l2arc device entry.
4525 */
4526 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4527 adddev->l2ad_spa = spa;
4528 adddev->l2ad_vdev = vd;
4529 adddev->l2ad_write = l2arc_write_max;
4530 adddev->l2ad_boost = l2arc_write_boost;
4531 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
4532 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
4533 adddev->l2ad_hand = adddev->l2ad_start;
4534 adddev->l2ad_evict = adddev->l2ad_start;
4535 adddev->l2ad_first = B_TRUE;
4536 adddev->l2ad_writing = B_FALSE;
4537 ASSERT3U(adddev->l2ad_write, >, 0);
4538
4539 /*
4540 * This is a list of all ARC buffers that are still valid on the
4541 * device.
4542 */
4543 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4544 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4545 offsetof(arc_buf_hdr_t, b_l2node));
4546
4547 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
4548
4549 /*
4550 * Add device to global list
4551 */
4552 mutex_enter(&l2arc_dev_mtx);
4553 list_insert_head(l2arc_dev_list, adddev);
4554 atomic_inc_64(&l2arc_ndev);
4555 mutex_exit(&l2arc_dev_mtx);
4556 }
4557
4558 /*
4559 * Remove a vdev from the L2ARC.
4560 */
4561 void
4562 l2arc_remove_vdev(vdev_t *vd)
4563 {
4564 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4565
4566 /*
4567 * Find the device by vdev
4568 */
4569 mutex_enter(&l2arc_dev_mtx);
4570 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4571 nextdev = list_next(l2arc_dev_list, dev);
4572 if (vd == dev->l2ad_vdev) {
4573 remdev = dev;
4574 break;
4575 }
4576 }
4577 ASSERT(remdev != NULL);
4578
4579 /*
4580 * Remove device from global list
4581 */
4582 list_remove(l2arc_dev_list, remdev);
4583 l2arc_dev_last = NULL; /* may have been invalidated */
4584 atomic_dec_64(&l2arc_ndev);
4585 mutex_exit(&l2arc_dev_mtx);
4586
4587 /*
4588 * Clear all buflists and ARC references. L2ARC device flush.
4589 */
4590 l2arc_evict(remdev, 0, B_TRUE);
4591 list_destroy(remdev->l2ad_buflist);
4592 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4593 kmem_free(remdev, sizeof (l2arc_dev_t));
4594 }
4595
4596 void
4597 l2arc_init(void)
4598 {
4599 l2arc_thread_exit = 0;
4600 l2arc_ndev = 0;
4601 l2arc_writes_sent = 0;
4602 l2arc_writes_done = 0;
4603
4604 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4605 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4606 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4607 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4608 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4609
4610 l2arc_dev_list = &L2ARC_dev_list;
4611 l2arc_free_on_write = &L2ARC_free_on_write;
4612 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4613 offsetof(l2arc_dev_t, l2ad_node));
4614 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4615 offsetof(l2arc_data_free_t, l2df_list_node));
4616 }
4617
4618 void
4619 l2arc_fini(void)
4620 {
4621 /*
4622 * This is called from dmu_fini(), which is called from spa_fini();
4623 * Because of this, we can assume that all l2arc devices have
4624 * already been removed when the pools themselves were removed.
4625 */
4626
4627 l2arc_do_free_on_write();
4628
4629 mutex_destroy(&l2arc_feed_thr_lock);
4630 cv_destroy(&l2arc_feed_thr_cv);
4631 mutex_destroy(&l2arc_dev_mtx);
4632 mutex_destroy(&l2arc_buflist_mtx);
4633 mutex_destroy(&l2arc_free_on_write_mtx);
4634
4635 list_destroy(l2arc_dev_list);
4636 list_destroy(l2arc_free_on_write);
4637 }
4638
4639 void
4640 l2arc_start(void)
4641 {
4642 if (!(spa_mode_global & FWRITE))
4643 return;
4644
4645 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4646 TS_RUN, minclsyspri);
4647 }
4648
4649 void
4650 l2arc_stop(void)
4651 {
4652 if (!(spa_mode_global & FWRITE))
4653 return;
4654
4655 mutex_enter(&l2arc_feed_thr_lock);
4656 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
4657 l2arc_thread_exit = 1;
4658 while (l2arc_thread_exit != 0)
4659 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4660 mutex_exit(&l2arc_feed_thr_lock);
4661 }