]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/arc.c
Update core ZFS code from build 121 to build 141.
[mirror_zfs.git] / module / zfs / arc.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24
25 /*
26 * DVA-based Adjustable Replacement Cache
27 *
28 * While much of the theory of operation used here is
29 * based on the self-tuning, low overhead replacement cache
30 * presented by Megiddo and Modha at FAST 2003, there are some
31 * significant differences:
32 *
33 * 1. The Megiddo and Modha model assumes any page is evictable.
34 * Pages in its cache cannot be "locked" into memory. This makes
35 * the eviction algorithm simple: evict the last page in the list.
36 * This also make the performance characteristics easy to reason
37 * about. Our cache is not so simple. At any given moment, some
38 * subset of the blocks in the cache are un-evictable because we
39 * have handed out a reference to them. Blocks are only evictable
40 * when there are no external references active. This makes
41 * eviction far more problematic: we choose to evict the evictable
42 * blocks that are the "lowest" in the list.
43 *
44 * There are times when it is not possible to evict the requested
45 * space. In these circumstances we are unable to adjust the cache
46 * size. To prevent the cache growing unbounded at these times we
47 * implement a "cache throttle" that slows the flow of new data
48 * into the cache until we can make space available.
49 *
50 * 2. The Megiddo and Modha model assumes a fixed cache size.
51 * Pages are evicted when the cache is full and there is a cache
52 * miss. Our model has a variable sized cache. It grows with
53 * high use, but also tries to react to memory pressure from the
54 * operating system: decreasing its size when system memory is
55 * tight.
56 *
57 * 3. The Megiddo and Modha model assumes a fixed page size. All
58 * elements of the cache are therefor exactly the same size. So
59 * when adjusting the cache size following a cache miss, its simply
60 * a matter of choosing a single page to evict. In our model, we
61 * have variable sized cache blocks (rangeing from 512 bytes to
62 * 128K bytes). We therefor choose a set of blocks to evict to make
63 * space for a cache miss that approximates as closely as possible
64 * the space used by the new block.
65 *
66 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
67 * by N. Megiddo & D. Modha, FAST 2003
68 */
69
70 /*
71 * The locking model:
72 *
73 * A new reference to a cache buffer can be obtained in two
74 * ways: 1) via a hash table lookup using the DVA as a key,
75 * or 2) via one of the ARC lists. The arc_read() interface
76 * uses method 1, while the internal arc algorithms for
77 * adjusting the cache use method 2. We therefor provide two
78 * types of locks: 1) the hash table lock array, and 2) the
79 * arc list locks.
80 *
81 * Buffers do not have their own mutexs, rather they rely on the
82 * hash table mutexs for the bulk of their protection (i.e. most
83 * fields in the arc_buf_hdr_t are protected by these mutexs).
84 *
85 * buf_hash_find() returns the appropriate mutex (held) when it
86 * locates the requested buffer in the hash table. It returns
87 * NULL for the mutex if the buffer was not in the table.
88 *
89 * buf_hash_remove() expects the appropriate hash mutex to be
90 * already held before it is invoked.
91 *
92 * Each arc state also has a mutex which is used to protect the
93 * buffer list associated with the state. When attempting to
94 * obtain a hash table lock while holding an arc list lock you
95 * must use: mutex_tryenter() to avoid deadlock. Also note that
96 * the active state mutex must be held before the ghost state mutex.
97 *
98 * Arc buffers may have an associated eviction callback function.
99 * This function will be invoked prior to removing the buffer (e.g.
100 * in arc_do_user_evicts()). Note however that the data associated
101 * with the buffer may be evicted prior to the callback. The callback
102 * must be made with *no locks held* (to prevent deadlock). Additionally,
103 * the users of callbacks must ensure that their private data is
104 * protected from simultaneous callbacks from arc_buf_evict()
105 * and arc_do_user_evicts().
106 *
107 * Note that the majority of the performance stats are manipulated
108 * with atomic operations.
109 *
110 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
111 *
112 * - L2ARC buflist creation
113 * - L2ARC buflist eviction
114 * - L2ARC write completion, which walks L2ARC buflists
115 * - ARC header destruction, as it removes from L2ARC buflists
116 * - ARC header release, as it removes from L2ARC buflists
117 */
118
119 #include <sys/spa.h>
120 #include <sys/zio.h>
121 #include <sys/zfs_context.h>
122 #include <sys/arc.h>
123 #include <sys/refcount.h>
124 #include <sys/vdev.h>
125 #include <sys/vdev_impl.h>
126 #ifdef _KERNEL
127 #include <sys/vmsystm.h>
128 #include <vm/anon.h>
129 #include <sys/fs/swapnode.h>
130 #include <sys/dnlc.h>
131 #endif
132 #include <sys/callb.h>
133 #include <sys/kstat.h>
134 #include <zfs_fletcher.h>
135
136 static kmutex_t arc_reclaim_thr_lock;
137 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
138 static uint8_t arc_thread_exit;
139
140 extern int zfs_write_limit_shift;
141 extern uint64_t zfs_write_limit_max;
142 extern kmutex_t zfs_write_limit_lock;
143
144 #define ARC_REDUCE_DNLC_PERCENT 3
145 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
146
147 typedef enum arc_reclaim_strategy {
148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
150 } arc_reclaim_strategy_t;
151
152 /* number of seconds before growing cache again */
153 static int arc_grow_retry = 60;
154
155 /* shift of arc_c for calculating both min and max arc_p */
156 static int arc_p_min_shift = 4;
157
158 /* log2(fraction of arc to reclaim) */
159 static int arc_shrink_shift = 5;
160
161 /*
162 * minimum lifespan of a prefetch block in clock ticks
163 * (initialized in arc_init())
164 */
165 static int arc_min_prefetch_lifespan;
166
167 static int arc_dead;
168
169 /*
170 * The arc has filled available memory and has now warmed up.
171 */
172 static boolean_t arc_warm;
173
174 /*
175 * These tunables are for performance analysis.
176 */
177 uint64_t zfs_arc_max;
178 uint64_t zfs_arc_min;
179 uint64_t zfs_arc_meta_limit = 0;
180 int zfs_arc_grow_retry = 0;
181 int zfs_arc_shrink_shift = 0;
182 int zfs_arc_p_min_shift = 0;
183
184 /*
185 * Note that buffers can be in one of 6 states:
186 * ARC_anon - anonymous (discussed below)
187 * ARC_mru - recently used, currently cached
188 * ARC_mru_ghost - recentely used, no longer in cache
189 * ARC_mfu - frequently used, currently cached
190 * ARC_mfu_ghost - frequently used, no longer in cache
191 * ARC_l2c_only - exists in L2ARC but not other states
192 * When there are no active references to the buffer, they are
193 * are linked onto a list in one of these arc states. These are
194 * the only buffers that can be evicted or deleted. Within each
195 * state there are multiple lists, one for meta-data and one for
196 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
197 * etc.) is tracked separately so that it can be managed more
198 * explicitly: favored over data, limited explicitly.
199 *
200 * Anonymous buffers are buffers that are not associated with
201 * a DVA. These are buffers that hold dirty block copies
202 * before they are written to stable storage. By definition,
203 * they are "ref'd" and are considered part of arc_mru
204 * that cannot be freed. Generally, they will aquire a DVA
205 * as they are written and migrate onto the arc_mru list.
206 *
207 * The ARC_l2c_only state is for buffers that are in the second
208 * level ARC but no longer in any of the ARC_m* lists. The second
209 * level ARC itself may also contain buffers that are in any of
210 * the ARC_m* states - meaning that a buffer can exist in two
211 * places. The reason for the ARC_l2c_only state is to keep the
212 * buffer header in the hash table, so that reads that hit the
213 * second level ARC benefit from these fast lookups.
214 */
215
216 typedef struct arc_state {
217 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
218 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
219 uint64_t arcs_size; /* total amount of data in this state */
220 kmutex_t arcs_mtx;
221 } arc_state_t;
222
223 /* The 6 states: */
224 static arc_state_t ARC_anon;
225 static arc_state_t ARC_mru;
226 static arc_state_t ARC_mru_ghost;
227 static arc_state_t ARC_mfu;
228 static arc_state_t ARC_mfu_ghost;
229 static arc_state_t ARC_l2c_only;
230
231 typedef struct arc_stats {
232 kstat_named_t arcstat_hits;
233 kstat_named_t arcstat_misses;
234 kstat_named_t arcstat_demand_data_hits;
235 kstat_named_t arcstat_demand_data_misses;
236 kstat_named_t arcstat_demand_metadata_hits;
237 kstat_named_t arcstat_demand_metadata_misses;
238 kstat_named_t arcstat_prefetch_data_hits;
239 kstat_named_t arcstat_prefetch_data_misses;
240 kstat_named_t arcstat_prefetch_metadata_hits;
241 kstat_named_t arcstat_prefetch_metadata_misses;
242 kstat_named_t arcstat_mru_hits;
243 kstat_named_t arcstat_mru_ghost_hits;
244 kstat_named_t arcstat_mfu_hits;
245 kstat_named_t arcstat_mfu_ghost_hits;
246 kstat_named_t arcstat_deleted;
247 kstat_named_t arcstat_recycle_miss;
248 kstat_named_t arcstat_mutex_miss;
249 kstat_named_t arcstat_evict_skip;
250 kstat_named_t arcstat_evict_l2_cached;
251 kstat_named_t arcstat_evict_l2_eligible;
252 kstat_named_t arcstat_evict_l2_ineligible;
253 kstat_named_t arcstat_hash_elements;
254 kstat_named_t arcstat_hash_elements_max;
255 kstat_named_t arcstat_hash_collisions;
256 kstat_named_t arcstat_hash_chains;
257 kstat_named_t arcstat_hash_chain_max;
258 kstat_named_t arcstat_p;
259 kstat_named_t arcstat_c;
260 kstat_named_t arcstat_c_min;
261 kstat_named_t arcstat_c_max;
262 kstat_named_t arcstat_size;
263 kstat_named_t arcstat_hdr_size;
264 kstat_named_t arcstat_data_size;
265 kstat_named_t arcstat_other_size;
266 kstat_named_t arcstat_l2_hits;
267 kstat_named_t arcstat_l2_misses;
268 kstat_named_t arcstat_l2_feeds;
269 kstat_named_t arcstat_l2_rw_clash;
270 kstat_named_t arcstat_l2_read_bytes;
271 kstat_named_t arcstat_l2_write_bytes;
272 kstat_named_t arcstat_l2_writes_sent;
273 kstat_named_t arcstat_l2_writes_done;
274 kstat_named_t arcstat_l2_writes_error;
275 kstat_named_t arcstat_l2_writes_hdr_miss;
276 kstat_named_t arcstat_l2_evict_lock_retry;
277 kstat_named_t arcstat_l2_evict_reading;
278 kstat_named_t arcstat_l2_free_on_write;
279 kstat_named_t arcstat_l2_abort_lowmem;
280 kstat_named_t arcstat_l2_cksum_bad;
281 kstat_named_t arcstat_l2_io_error;
282 kstat_named_t arcstat_l2_size;
283 kstat_named_t arcstat_l2_hdr_size;
284 kstat_named_t arcstat_memory_throttle_count;
285 } arc_stats_t;
286
287 static arc_stats_t arc_stats = {
288 { "hits", KSTAT_DATA_UINT64 },
289 { "misses", KSTAT_DATA_UINT64 },
290 { "demand_data_hits", KSTAT_DATA_UINT64 },
291 { "demand_data_misses", KSTAT_DATA_UINT64 },
292 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
293 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
294 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
295 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
296 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
297 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
298 { "mru_hits", KSTAT_DATA_UINT64 },
299 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
300 { "mfu_hits", KSTAT_DATA_UINT64 },
301 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
302 { "deleted", KSTAT_DATA_UINT64 },
303 { "recycle_miss", KSTAT_DATA_UINT64 },
304 { "mutex_miss", KSTAT_DATA_UINT64 },
305 { "evict_skip", KSTAT_DATA_UINT64 },
306 { "evict_l2_cached", KSTAT_DATA_UINT64 },
307 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
308 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
309 { "hash_elements", KSTAT_DATA_UINT64 },
310 { "hash_elements_max", KSTAT_DATA_UINT64 },
311 { "hash_collisions", KSTAT_DATA_UINT64 },
312 { "hash_chains", KSTAT_DATA_UINT64 },
313 { "hash_chain_max", KSTAT_DATA_UINT64 },
314 { "p", KSTAT_DATA_UINT64 },
315 { "c", KSTAT_DATA_UINT64 },
316 { "c_min", KSTAT_DATA_UINT64 },
317 { "c_max", KSTAT_DATA_UINT64 },
318 { "size", KSTAT_DATA_UINT64 },
319 { "hdr_size", KSTAT_DATA_UINT64 },
320 { "data_size", KSTAT_DATA_UINT64 },
321 { "other_size", KSTAT_DATA_UINT64 },
322 { "l2_hits", KSTAT_DATA_UINT64 },
323 { "l2_misses", KSTAT_DATA_UINT64 },
324 { "l2_feeds", KSTAT_DATA_UINT64 },
325 { "l2_rw_clash", KSTAT_DATA_UINT64 },
326 { "l2_read_bytes", KSTAT_DATA_UINT64 },
327 { "l2_write_bytes", KSTAT_DATA_UINT64 },
328 { "l2_writes_sent", KSTAT_DATA_UINT64 },
329 { "l2_writes_done", KSTAT_DATA_UINT64 },
330 { "l2_writes_error", KSTAT_DATA_UINT64 },
331 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
332 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
333 { "l2_evict_reading", KSTAT_DATA_UINT64 },
334 { "l2_free_on_write", KSTAT_DATA_UINT64 },
335 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
336 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
337 { "l2_io_error", KSTAT_DATA_UINT64 },
338 { "l2_size", KSTAT_DATA_UINT64 },
339 { "l2_hdr_size", KSTAT_DATA_UINT64 },
340 { "memory_throttle_count", KSTAT_DATA_UINT64 }
341 };
342
343 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
344
345 #define ARCSTAT_INCR(stat, val) \
346 atomic_add_64(&arc_stats.stat.value.ui64, (val));
347
348 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
349 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
350
351 #define ARCSTAT_MAX(stat, val) { \
352 uint64_t m; \
353 while ((val) > (m = arc_stats.stat.value.ui64) && \
354 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
355 continue; \
356 }
357
358 #define ARCSTAT_MAXSTAT(stat) \
359 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
360
361 /*
362 * We define a macro to allow ARC hits/misses to be easily broken down by
363 * two separate conditions, giving a total of four different subtypes for
364 * each of hits and misses (so eight statistics total).
365 */
366 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
367 if (cond1) { \
368 if (cond2) { \
369 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
370 } else { \
371 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
372 } \
373 } else { \
374 if (cond2) { \
375 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
376 } else { \
377 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
378 } \
379 }
380
381 kstat_t *arc_ksp;
382 static arc_state_t *arc_anon;
383 static arc_state_t *arc_mru;
384 static arc_state_t *arc_mru_ghost;
385 static arc_state_t *arc_mfu;
386 static arc_state_t *arc_mfu_ghost;
387 static arc_state_t *arc_l2c_only;
388
389 /*
390 * There are several ARC variables that are critical to export as kstats --
391 * but we don't want to have to grovel around in the kstat whenever we wish to
392 * manipulate them. For these variables, we therefore define them to be in
393 * terms of the statistic variable. This assures that we are not introducing
394 * the possibility of inconsistency by having shadow copies of the variables,
395 * while still allowing the code to be readable.
396 */
397 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
398 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
399 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
400 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
401 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
402
403 static int arc_no_grow; /* Don't try to grow cache size */
404 static uint64_t arc_tempreserve;
405 static uint64_t arc_loaned_bytes;
406 static uint64_t arc_meta_used;
407 static uint64_t arc_meta_limit;
408 static uint64_t arc_meta_max = 0;
409
410 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
411
412 typedef struct arc_callback arc_callback_t;
413
414 struct arc_callback {
415 void *acb_private;
416 arc_done_func_t *acb_done;
417 arc_buf_t *acb_buf;
418 zio_t *acb_zio_dummy;
419 arc_callback_t *acb_next;
420 };
421
422 typedef struct arc_write_callback arc_write_callback_t;
423
424 struct arc_write_callback {
425 void *awcb_private;
426 arc_done_func_t *awcb_ready;
427 arc_done_func_t *awcb_done;
428 arc_buf_t *awcb_buf;
429 };
430
431 struct arc_buf_hdr {
432 /* protected by hash lock */
433 dva_t b_dva;
434 uint64_t b_birth;
435 uint64_t b_cksum0;
436
437 kmutex_t b_freeze_lock;
438 zio_cksum_t *b_freeze_cksum;
439 void *b_thawed;
440
441 arc_buf_hdr_t *b_hash_next;
442 arc_buf_t *b_buf;
443 uint32_t b_flags;
444 uint32_t b_datacnt;
445
446 arc_callback_t *b_acb;
447 kcondvar_t b_cv;
448
449 /* immutable */
450 arc_buf_contents_t b_type;
451 uint64_t b_size;
452 uint64_t b_spa;
453
454 /* protected by arc state mutex */
455 arc_state_t *b_state;
456 list_node_t b_arc_node;
457
458 /* updated atomically */
459 clock_t b_arc_access;
460
461 /* self protecting */
462 refcount_t b_refcnt;
463
464 l2arc_buf_hdr_t *b_l2hdr;
465 list_node_t b_l2node;
466 };
467
468 static arc_buf_t *arc_eviction_list;
469 static kmutex_t arc_eviction_mtx;
470 static arc_buf_hdr_t arc_eviction_hdr;
471 static void arc_get_data_buf(arc_buf_t *buf);
472 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
473 static int arc_evict_needed(arc_buf_contents_t type);
474 static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
475
476 static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
477
478 #define GHOST_STATE(state) \
479 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
480 (state) == arc_l2c_only)
481
482 /*
483 * Private ARC flags. These flags are private ARC only flags that will show up
484 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
485 * be passed in as arc_flags in things like arc_read. However, these flags
486 * should never be passed and should only be set by ARC code. When adding new
487 * public flags, make sure not to smash the private ones.
488 */
489
490 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
491 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
492 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
493 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
494 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
495 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
496 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
497 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
498 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
499 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
500
501 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
502 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
503 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
504 #define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
505 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
506 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
507 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
508 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
509 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
510 (hdr)->b_l2hdr != NULL)
511 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
512 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
513 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
514
515 /*
516 * Other sizes
517 */
518
519 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
520 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
521
522 /*
523 * Hash table routines
524 */
525
526 #define HT_LOCK_PAD 64
527
528 struct ht_lock {
529 kmutex_t ht_lock;
530 #ifdef _KERNEL
531 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
532 #endif
533 };
534
535 #define BUF_LOCKS 256
536 typedef struct buf_hash_table {
537 uint64_t ht_mask;
538 arc_buf_hdr_t **ht_table;
539 struct ht_lock ht_locks[BUF_LOCKS];
540 } buf_hash_table_t;
541
542 static buf_hash_table_t buf_hash_table;
543
544 #define BUF_HASH_INDEX(spa, dva, birth) \
545 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
546 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
547 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
548 #define HDR_LOCK(hdr) \
549 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
550
551 uint64_t zfs_crc64_table[256];
552
553 /*
554 * Level 2 ARC
555 */
556
557 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
558 #define L2ARC_HEADROOM 2 /* num of writes */
559 #define L2ARC_FEED_SECS 1 /* caching interval secs */
560 #define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
561
562 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
563 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
564
565 /*
566 * L2ARC Performance Tunables
567 */
568 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
569 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
570 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
571 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
572 uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
573 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
574 boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
575 boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
576
577 /*
578 * L2ARC Internals
579 */
580 typedef struct l2arc_dev {
581 vdev_t *l2ad_vdev; /* vdev */
582 spa_t *l2ad_spa; /* spa */
583 uint64_t l2ad_hand; /* next write location */
584 uint64_t l2ad_write; /* desired write size, bytes */
585 uint64_t l2ad_boost; /* warmup write boost, bytes */
586 uint64_t l2ad_start; /* first addr on device */
587 uint64_t l2ad_end; /* last addr on device */
588 uint64_t l2ad_evict; /* last addr eviction reached */
589 boolean_t l2ad_first; /* first sweep through */
590 boolean_t l2ad_writing; /* currently writing */
591 list_t *l2ad_buflist; /* buffer list */
592 list_node_t l2ad_node; /* device list node */
593 } l2arc_dev_t;
594
595 static list_t L2ARC_dev_list; /* device list */
596 static list_t *l2arc_dev_list; /* device list pointer */
597 static kmutex_t l2arc_dev_mtx; /* device list mutex */
598 static l2arc_dev_t *l2arc_dev_last; /* last device used */
599 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
600 static list_t L2ARC_free_on_write; /* free after write buf list */
601 static list_t *l2arc_free_on_write; /* free after write list ptr */
602 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
603 static uint64_t l2arc_ndev; /* number of devices */
604
605 typedef struct l2arc_read_callback {
606 arc_buf_t *l2rcb_buf; /* read buffer */
607 spa_t *l2rcb_spa; /* spa */
608 blkptr_t l2rcb_bp; /* original blkptr */
609 zbookmark_t l2rcb_zb; /* original bookmark */
610 int l2rcb_flags; /* original flags */
611 } l2arc_read_callback_t;
612
613 typedef struct l2arc_write_callback {
614 l2arc_dev_t *l2wcb_dev; /* device info */
615 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
616 } l2arc_write_callback_t;
617
618 struct l2arc_buf_hdr {
619 /* protected by arc_buf_hdr mutex */
620 l2arc_dev_t *b_dev; /* L2ARC device */
621 uint64_t b_daddr; /* disk address, offset byte */
622 };
623
624 typedef struct l2arc_data_free {
625 /* protected by l2arc_free_on_write_mtx */
626 void *l2df_data;
627 size_t l2df_size;
628 void (*l2df_func)(void *, size_t);
629 list_node_t l2df_list_node;
630 } l2arc_data_free_t;
631
632 static kmutex_t l2arc_feed_thr_lock;
633 static kcondvar_t l2arc_feed_thr_cv;
634 static uint8_t l2arc_thread_exit;
635
636 static void l2arc_read_done(zio_t *zio);
637 static void l2arc_hdr_stat_add(void);
638 static void l2arc_hdr_stat_remove(void);
639
640 static uint64_t
641 buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
642 {
643 uint8_t *vdva = (uint8_t *)dva;
644 uint64_t crc = -1ULL;
645 int i;
646
647 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
648
649 for (i = 0; i < sizeof (dva_t); i++)
650 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
651
652 crc ^= (spa>>8) ^ birth;
653
654 return (crc);
655 }
656
657 #define BUF_EMPTY(buf) \
658 ((buf)->b_dva.dva_word[0] == 0 && \
659 (buf)->b_dva.dva_word[1] == 0 && \
660 (buf)->b_birth == 0)
661
662 #define BUF_EQUAL(spa, dva, birth, buf) \
663 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
664 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
665 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
666
667 static void
668 buf_discard_identity(arc_buf_hdr_t *hdr)
669 {
670 hdr->b_dva.dva_word[0] = 0;
671 hdr->b_dva.dva_word[1] = 0;
672 hdr->b_birth = 0;
673 hdr->b_cksum0 = 0;
674 }
675
676 static arc_buf_hdr_t *
677 buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
678 {
679 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
680 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
681 arc_buf_hdr_t *buf;
682
683 mutex_enter(hash_lock);
684 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
685 buf = buf->b_hash_next) {
686 if (BUF_EQUAL(spa, dva, birth, buf)) {
687 *lockp = hash_lock;
688 return (buf);
689 }
690 }
691 mutex_exit(hash_lock);
692 *lockp = NULL;
693 return (NULL);
694 }
695
696 /*
697 * Insert an entry into the hash table. If there is already an element
698 * equal to elem in the hash table, then the already existing element
699 * will be returned and the new element will not be inserted.
700 * Otherwise returns NULL.
701 */
702 static arc_buf_hdr_t *
703 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
704 {
705 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
706 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
707 arc_buf_hdr_t *fbuf;
708 uint32_t i;
709
710 ASSERT(!HDR_IN_HASH_TABLE(buf));
711 *lockp = hash_lock;
712 mutex_enter(hash_lock);
713 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
714 fbuf = fbuf->b_hash_next, i++) {
715 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
716 return (fbuf);
717 }
718
719 buf->b_hash_next = buf_hash_table.ht_table[idx];
720 buf_hash_table.ht_table[idx] = buf;
721 buf->b_flags |= ARC_IN_HASH_TABLE;
722
723 /* collect some hash table performance data */
724 if (i > 0) {
725 ARCSTAT_BUMP(arcstat_hash_collisions);
726 if (i == 1)
727 ARCSTAT_BUMP(arcstat_hash_chains);
728
729 ARCSTAT_MAX(arcstat_hash_chain_max, i);
730 }
731
732 ARCSTAT_BUMP(arcstat_hash_elements);
733 ARCSTAT_MAXSTAT(arcstat_hash_elements);
734
735 return (NULL);
736 }
737
738 static void
739 buf_hash_remove(arc_buf_hdr_t *buf)
740 {
741 arc_buf_hdr_t *fbuf, **bufp;
742 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
743
744 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
745 ASSERT(HDR_IN_HASH_TABLE(buf));
746
747 bufp = &buf_hash_table.ht_table[idx];
748 while ((fbuf = *bufp) != buf) {
749 ASSERT(fbuf != NULL);
750 bufp = &fbuf->b_hash_next;
751 }
752 *bufp = buf->b_hash_next;
753 buf->b_hash_next = NULL;
754 buf->b_flags &= ~ARC_IN_HASH_TABLE;
755
756 /* collect some hash table performance data */
757 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
758
759 if (buf_hash_table.ht_table[idx] &&
760 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
761 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
762 }
763
764 /*
765 * Global data structures and functions for the buf kmem cache.
766 */
767 static kmem_cache_t *hdr_cache;
768 static kmem_cache_t *buf_cache;
769
770 static void
771 buf_fini(void)
772 {
773 int i;
774
775 kmem_free(buf_hash_table.ht_table,
776 (buf_hash_table.ht_mask + 1) * sizeof (void *));
777 for (i = 0; i < BUF_LOCKS; i++)
778 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
779 kmem_cache_destroy(hdr_cache);
780 kmem_cache_destroy(buf_cache);
781 }
782
783 /*
784 * Constructor callback - called when the cache is empty
785 * and a new buf is requested.
786 */
787 /* ARGSUSED */
788 static int
789 hdr_cons(void *vbuf, void *unused, int kmflag)
790 {
791 arc_buf_hdr_t *buf = vbuf;
792
793 bzero(buf, sizeof (arc_buf_hdr_t));
794 refcount_create(&buf->b_refcnt);
795 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
796 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
797 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
798
799 return (0);
800 }
801
802 /* ARGSUSED */
803 static int
804 buf_cons(void *vbuf, void *unused, int kmflag)
805 {
806 arc_buf_t *buf = vbuf;
807
808 bzero(buf, sizeof (arc_buf_t));
809 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
810 rw_init(&buf->b_data_lock, NULL, RW_DEFAULT, NULL);
811 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
812
813 return (0);
814 }
815
816 /*
817 * Destructor callback - called when a cached buf is
818 * no longer required.
819 */
820 /* ARGSUSED */
821 static void
822 hdr_dest(void *vbuf, void *unused)
823 {
824 arc_buf_hdr_t *buf = vbuf;
825
826 ASSERT(BUF_EMPTY(buf));
827 refcount_destroy(&buf->b_refcnt);
828 cv_destroy(&buf->b_cv);
829 mutex_destroy(&buf->b_freeze_lock);
830 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
831 }
832
833 /* ARGSUSED */
834 static void
835 buf_dest(void *vbuf, void *unused)
836 {
837 arc_buf_t *buf = vbuf;
838
839 mutex_destroy(&buf->b_evict_lock);
840 rw_destroy(&buf->b_data_lock);
841 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
842 }
843
844 /*
845 * Reclaim callback -- invoked when memory is low.
846 */
847 /* ARGSUSED */
848 static void
849 hdr_recl(void *unused)
850 {
851 dprintf("hdr_recl called\n");
852 /*
853 * umem calls the reclaim func when we destroy the buf cache,
854 * which is after we do arc_fini().
855 */
856 if (!arc_dead)
857 cv_signal(&arc_reclaim_thr_cv);
858 }
859
860 static void
861 buf_init(void)
862 {
863 uint64_t *ct;
864 uint64_t hsize = 1ULL << 12;
865 int i, j;
866
867 /*
868 * The hash table is big enough to fill all of physical memory
869 * with an average 64K block size. The table will take up
870 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
871 */
872 while (hsize * 65536 < physmem * PAGESIZE)
873 hsize <<= 1;
874 retry:
875 buf_hash_table.ht_mask = hsize - 1;
876 buf_hash_table.ht_table =
877 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
878 if (buf_hash_table.ht_table == NULL) {
879 ASSERT(hsize > (1ULL << 8));
880 hsize >>= 1;
881 goto retry;
882 }
883
884 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
885 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
886 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
887 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
888
889 for (i = 0; i < 256; i++)
890 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
891 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
892
893 for (i = 0; i < BUF_LOCKS; i++) {
894 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
895 NULL, MUTEX_DEFAULT, NULL);
896 }
897 }
898
899 #define ARC_MINTIME (hz>>4) /* 62 ms */
900
901 static void
902 arc_cksum_verify(arc_buf_t *buf)
903 {
904 zio_cksum_t zc;
905
906 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
907 return;
908
909 mutex_enter(&buf->b_hdr->b_freeze_lock);
910 if (buf->b_hdr->b_freeze_cksum == NULL ||
911 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
912 mutex_exit(&buf->b_hdr->b_freeze_lock);
913 return;
914 }
915 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
916 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
917 panic("buffer modified while frozen!");
918 mutex_exit(&buf->b_hdr->b_freeze_lock);
919 }
920
921 static int
922 arc_cksum_equal(arc_buf_t *buf)
923 {
924 zio_cksum_t zc;
925 int equal;
926
927 mutex_enter(&buf->b_hdr->b_freeze_lock);
928 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
929 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
930 mutex_exit(&buf->b_hdr->b_freeze_lock);
931
932 return (equal);
933 }
934
935 static void
936 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
937 {
938 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
939 return;
940
941 mutex_enter(&buf->b_hdr->b_freeze_lock);
942 if (buf->b_hdr->b_freeze_cksum != NULL) {
943 mutex_exit(&buf->b_hdr->b_freeze_lock);
944 return;
945 }
946 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
947 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
948 buf->b_hdr->b_freeze_cksum);
949 mutex_exit(&buf->b_hdr->b_freeze_lock);
950 }
951
952 void
953 arc_buf_thaw(arc_buf_t *buf)
954 {
955 kmutex_t *hash_lock;
956
957 hash_lock = HDR_LOCK(buf->b_hdr);
958 mutex_enter(hash_lock);
959
960 if (zfs_flags & ZFS_DEBUG_MODIFY) {
961 if (buf->b_hdr->b_state != arc_anon)
962 panic("modifying non-anon buffer!");
963 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
964 panic("modifying buffer while i/o in progress!");
965 arc_cksum_verify(buf);
966 }
967
968 mutex_enter(&buf->b_hdr->b_freeze_lock);
969 if (buf->b_hdr->b_freeze_cksum != NULL) {
970 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
971 buf->b_hdr->b_freeze_cksum = NULL;
972 }
973
974 if (zfs_flags & ZFS_DEBUG_MODIFY) {
975 if (buf->b_hdr->b_thawed)
976 kmem_free(buf->b_hdr->b_thawed, 1);
977 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
978 }
979
980 mutex_exit(&buf->b_hdr->b_freeze_lock);
981 mutex_exit(hash_lock);
982 }
983
984 void
985 arc_buf_freeze(arc_buf_t *buf)
986 {
987 kmutex_t *hash_lock;
988
989 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
990 return;
991
992 hash_lock = HDR_LOCK(buf->b_hdr);
993 mutex_enter(hash_lock);
994
995 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
996 buf->b_hdr->b_state == arc_anon);
997 arc_cksum_compute(buf, B_FALSE);
998 mutex_exit(hash_lock);
999 }
1000
1001 static void
1002 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1003 {
1004 ASSERT(MUTEX_HELD(hash_lock));
1005
1006 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1007 (ab->b_state != arc_anon)) {
1008 uint64_t delta = ab->b_size * ab->b_datacnt;
1009 list_t *list = &ab->b_state->arcs_list[ab->b_type];
1010 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1011
1012 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
1013 mutex_enter(&ab->b_state->arcs_mtx);
1014 ASSERT(list_link_active(&ab->b_arc_node));
1015 list_remove(list, ab);
1016 if (GHOST_STATE(ab->b_state)) {
1017 ASSERT3U(ab->b_datacnt, ==, 0);
1018 ASSERT3P(ab->b_buf, ==, NULL);
1019 delta = ab->b_size;
1020 }
1021 ASSERT(delta > 0);
1022 ASSERT3U(*size, >=, delta);
1023 atomic_add_64(size, -delta);
1024 mutex_exit(&ab->b_state->arcs_mtx);
1025 /* remove the prefetch flag if we get a reference */
1026 if (ab->b_flags & ARC_PREFETCH)
1027 ab->b_flags &= ~ARC_PREFETCH;
1028 }
1029 }
1030
1031 static int
1032 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1033 {
1034 int cnt;
1035 arc_state_t *state = ab->b_state;
1036
1037 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1038 ASSERT(!GHOST_STATE(state));
1039
1040 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1041 (state != arc_anon)) {
1042 uint64_t *size = &state->arcs_lsize[ab->b_type];
1043
1044 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
1045 mutex_enter(&state->arcs_mtx);
1046 ASSERT(!list_link_active(&ab->b_arc_node));
1047 list_insert_head(&state->arcs_list[ab->b_type], ab);
1048 ASSERT(ab->b_datacnt > 0);
1049 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1050 mutex_exit(&state->arcs_mtx);
1051 }
1052 return (cnt);
1053 }
1054
1055 /*
1056 * Move the supplied buffer to the indicated state. The mutex
1057 * for the buffer must be held by the caller.
1058 */
1059 static void
1060 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1061 {
1062 arc_state_t *old_state = ab->b_state;
1063 int64_t refcnt = refcount_count(&ab->b_refcnt);
1064 uint64_t from_delta, to_delta;
1065
1066 ASSERT(MUTEX_HELD(hash_lock));
1067 ASSERT(new_state != old_state);
1068 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1069 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1070 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1071
1072 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1073
1074 /*
1075 * If this buffer is evictable, transfer it from the
1076 * old state list to the new state list.
1077 */
1078 if (refcnt == 0) {
1079 if (old_state != arc_anon) {
1080 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1081 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1082
1083 if (use_mutex)
1084 mutex_enter(&old_state->arcs_mtx);
1085
1086 ASSERT(list_link_active(&ab->b_arc_node));
1087 list_remove(&old_state->arcs_list[ab->b_type], ab);
1088
1089 /*
1090 * If prefetching out of the ghost cache,
1091 * we will have a non-zero datacnt.
1092 */
1093 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1094 /* ghost elements have a ghost size */
1095 ASSERT(ab->b_buf == NULL);
1096 from_delta = ab->b_size;
1097 }
1098 ASSERT3U(*size, >=, from_delta);
1099 atomic_add_64(size, -from_delta);
1100
1101 if (use_mutex)
1102 mutex_exit(&old_state->arcs_mtx);
1103 }
1104 if (new_state != arc_anon) {
1105 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1106 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1107
1108 if (use_mutex)
1109 mutex_enter(&new_state->arcs_mtx);
1110
1111 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1112
1113 /* ghost elements have a ghost size */
1114 if (GHOST_STATE(new_state)) {
1115 ASSERT(ab->b_datacnt == 0);
1116 ASSERT(ab->b_buf == NULL);
1117 to_delta = ab->b_size;
1118 }
1119 atomic_add_64(size, to_delta);
1120
1121 if (use_mutex)
1122 mutex_exit(&new_state->arcs_mtx);
1123 }
1124 }
1125
1126 ASSERT(!BUF_EMPTY(ab));
1127 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1128 buf_hash_remove(ab);
1129
1130 /* adjust state sizes */
1131 if (to_delta)
1132 atomic_add_64(&new_state->arcs_size, to_delta);
1133 if (from_delta) {
1134 ASSERT3U(old_state->arcs_size, >=, from_delta);
1135 atomic_add_64(&old_state->arcs_size, -from_delta);
1136 }
1137 ab->b_state = new_state;
1138
1139 /* adjust l2arc hdr stats */
1140 if (new_state == arc_l2c_only)
1141 l2arc_hdr_stat_add();
1142 else if (old_state == arc_l2c_only)
1143 l2arc_hdr_stat_remove();
1144 }
1145
1146 void
1147 arc_space_consume(uint64_t space, arc_space_type_t type)
1148 {
1149 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1150
1151 switch (type) {
1152 case ARC_SPACE_DATA:
1153 ARCSTAT_INCR(arcstat_data_size, space);
1154 break;
1155 case ARC_SPACE_OTHER:
1156 ARCSTAT_INCR(arcstat_other_size, space);
1157 break;
1158 case ARC_SPACE_HDRS:
1159 ARCSTAT_INCR(arcstat_hdr_size, space);
1160 break;
1161 case ARC_SPACE_L2HDRS:
1162 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1163 break;
1164 }
1165
1166 atomic_add_64(&arc_meta_used, space);
1167 atomic_add_64(&arc_size, space);
1168 }
1169
1170 void
1171 arc_space_return(uint64_t space, arc_space_type_t type)
1172 {
1173 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1174
1175 switch (type) {
1176 case ARC_SPACE_DATA:
1177 ARCSTAT_INCR(arcstat_data_size, -space);
1178 break;
1179 case ARC_SPACE_OTHER:
1180 ARCSTAT_INCR(arcstat_other_size, -space);
1181 break;
1182 case ARC_SPACE_HDRS:
1183 ARCSTAT_INCR(arcstat_hdr_size, -space);
1184 break;
1185 case ARC_SPACE_L2HDRS:
1186 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1187 break;
1188 }
1189
1190 ASSERT(arc_meta_used >= space);
1191 if (arc_meta_max < arc_meta_used)
1192 arc_meta_max = arc_meta_used;
1193 atomic_add_64(&arc_meta_used, -space);
1194 ASSERT(arc_size >= space);
1195 atomic_add_64(&arc_size, -space);
1196 }
1197
1198 void *
1199 arc_data_buf_alloc(uint64_t size)
1200 {
1201 if (arc_evict_needed(ARC_BUFC_DATA))
1202 cv_signal(&arc_reclaim_thr_cv);
1203 atomic_add_64(&arc_size, size);
1204 return (zio_data_buf_alloc(size));
1205 }
1206
1207 void
1208 arc_data_buf_free(void *buf, uint64_t size)
1209 {
1210 zio_data_buf_free(buf, size);
1211 ASSERT(arc_size >= size);
1212 atomic_add_64(&arc_size, -size);
1213 }
1214
1215 arc_buf_t *
1216 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1217 {
1218 arc_buf_hdr_t *hdr;
1219 arc_buf_t *buf;
1220
1221 ASSERT3U(size, >, 0);
1222 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1223 ASSERT(BUF_EMPTY(hdr));
1224 hdr->b_size = size;
1225 hdr->b_type = type;
1226 hdr->b_spa = spa_guid(spa);
1227 hdr->b_state = arc_anon;
1228 hdr->b_arc_access = 0;
1229 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1230 buf->b_hdr = hdr;
1231 buf->b_data = NULL;
1232 buf->b_efunc = NULL;
1233 buf->b_private = NULL;
1234 buf->b_next = NULL;
1235 hdr->b_buf = buf;
1236 arc_get_data_buf(buf);
1237 hdr->b_datacnt = 1;
1238 hdr->b_flags = 0;
1239 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1240 (void) refcount_add(&hdr->b_refcnt, tag);
1241
1242 return (buf);
1243 }
1244
1245 static char *arc_onloan_tag = "onloan";
1246
1247 /*
1248 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1249 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1250 * buffers must be returned to the arc before they can be used by the DMU or
1251 * freed.
1252 */
1253 arc_buf_t *
1254 arc_loan_buf(spa_t *spa, int size)
1255 {
1256 arc_buf_t *buf;
1257
1258 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1259
1260 atomic_add_64(&arc_loaned_bytes, size);
1261 return (buf);
1262 }
1263
1264 /*
1265 * Return a loaned arc buffer to the arc.
1266 */
1267 void
1268 arc_return_buf(arc_buf_t *buf, void *tag)
1269 {
1270 arc_buf_hdr_t *hdr = buf->b_hdr;
1271
1272 ASSERT(buf->b_data != NULL);
1273 (void) refcount_add(&hdr->b_refcnt, tag);
1274 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1275
1276 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1277 }
1278
1279 /* Detach an arc_buf from a dbuf (tag) */
1280 void
1281 arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1282 {
1283 arc_buf_hdr_t *hdr;
1284
1285 ASSERT(buf->b_data != NULL);
1286 hdr = buf->b_hdr;
1287 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1288 (void) refcount_remove(&hdr->b_refcnt, tag);
1289 buf->b_efunc = NULL;
1290 buf->b_private = NULL;
1291
1292 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1293 }
1294
1295 static arc_buf_t *
1296 arc_buf_clone(arc_buf_t *from)
1297 {
1298 arc_buf_t *buf;
1299 arc_buf_hdr_t *hdr = from->b_hdr;
1300 uint64_t size = hdr->b_size;
1301
1302 ASSERT(hdr->b_state != arc_anon);
1303
1304 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1305 buf->b_hdr = hdr;
1306 buf->b_data = NULL;
1307 buf->b_efunc = NULL;
1308 buf->b_private = NULL;
1309 buf->b_next = hdr->b_buf;
1310 hdr->b_buf = buf;
1311 arc_get_data_buf(buf);
1312 bcopy(from->b_data, buf->b_data, size);
1313 hdr->b_datacnt += 1;
1314 return (buf);
1315 }
1316
1317 void
1318 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1319 {
1320 arc_buf_hdr_t *hdr;
1321 kmutex_t *hash_lock;
1322
1323 /*
1324 * Check to see if this buffer is evicted. Callers
1325 * must verify b_data != NULL to know if the add_ref
1326 * was successful.
1327 */
1328 mutex_enter(&buf->b_evict_lock);
1329 if (buf->b_data == NULL) {
1330 mutex_exit(&buf->b_evict_lock);
1331 return;
1332 }
1333 hash_lock = HDR_LOCK(buf->b_hdr);
1334 mutex_enter(hash_lock);
1335 hdr = buf->b_hdr;
1336 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1337 mutex_exit(&buf->b_evict_lock);
1338
1339 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1340 add_reference(hdr, hash_lock, tag);
1341 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1342 arc_access(hdr, hash_lock);
1343 mutex_exit(hash_lock);
1344 ARCSTAT_BUMP(arcstat_hits);
1345 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1346 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1347 data, metadata, hits);
1348 }
1349
1350 /*
1351 * Free the arc data buffer. If it is an l2arc write in progress,
1352 * the buffer is placed on l2arc_free_on_write to be freed later.
1353 */
1354 static void
1355 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1356 void *data, size_t size)
1357 {
1358 if (HDR_L2_WRITING(hdr)) {
1359 l2arc_data_free_t *df;
1360 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1361 df->l2df_data = data;
1362 df->l2df_size = size;
1363 df->l2df_func = free_func;
1364 mutex_enter(&l2arc_free_on_write_mtx);
1365 list_insert_head(l2arc_free_on_write, df);
1366 mutex_exit(&l2arc_free_on_write_mtx);
1367 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1368 } else {
1369 free_func(data, size);
1370 }
1371 }
1372
1373 static void
1374 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1375 {
1376 arc_buf_t **bufp;
1377
1378 /* free up data associated with the buf */
1379 if (buf->b_data) {
1380 arc_state_t *state = buf->b_hdr->b_state;
1381 uint64_t size = buf->b_hdr->b_size;
1382 arc_buf_contents_t type = buf->b_hdr->b_type;
1383
1384 arc_cksum_verify(buf);
1385
1386 if (!recycle) {
1387 if (type == ARC_BUFC_METADATA) {
1388 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1389 buf->b_data, size);
1390 arc_space_return(size, ARC_SPACE_DATA);
1391 } else {
1392 ASSERT(type == ARC_BUFC_DATA);
1393 arc_buf_data_free(buf->b_hdr,
1394 zio_data_buf_free, buf->b_data, size);
1395 ARCSTAT_INCR(arcstat_data_size, -size);
1396 atomic_add_64(&arc_size, -size);
1397 }
1398 }
1399 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1400 uint64_t *cnt = &state->arcs_lsize[type];
1401
1402 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1403 ASSERT(state != arc_anon);
1404
1405 ASSERT3U(*cnt, >=, size);
1406 atomic_add_64(cnt, -size);
1407 }
1408 ASSERT3U(state->arcs_size, >=, size);
1409 atomic_add_64(&state->arcs_size, -size);
1410 buf->b_data = NULL;
1411 ASSERT(buf->b_hdr->b_datacnt > 0);
1412 buf->b_hdr->b_datacnt -= 1;
1413 }
1414
1415 /* only remove the buf if requested */
1416 if (!all)
1417 return;
1418
1419 /* remove the buf from the hdr list */
1420 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1421 continue;
1422 *bufp = buf->b_next;
1423 buf->b_next = NULL;
1424
1425 ASSERT(buf->b_efunc == NULL);
1426
1427 /* clean up the buf */
1428 buf->b_hdr = NULL;
1429 kmem_cache_free(buf_cache, buf);
1430 }
1431
1432 static void
1433 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1434 {
1435 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1436 ASSERT3P(hdr->b_state, ==, arc_anon);
1437 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1438 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1439
1440 if (l2hdr != NULL) {
1441 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1442 /*
1443 * To prevent arc_free() and l2arc_evict() from
1444 * attempting to free the same buffer at the same time,
1445 * a FREE_IN_PROGRESS flag is given to arc_free() to
1446 * give it priority. l2arc_evict() can't destroy this
1447 * header while we are waiting on l2arc_buflist_mtx.
1448 *
1449 * The hdr may be removed from l2ad_buflist before we
1450 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1451 */
1452 if (!buflist_held) {
1453 mutex_enter(&l2arc_buflist_mtx);
1454 l2hdr = hdr->b_l2hdr;
1455 }
1456
1457 if (l2hdr != NULL) {
1458 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1459 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1460 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1461 if (hdr->b_state == arc_l2c_only)
1462 l2arc_hdr_stat_remove();
1463 hdr->b_l2hdr = NULL;
1464 }
1465
1466 if (!buflist_held)
1467 mutex_exit(&l2arc_buflist_mtx);
1468 }
1469
1470 if (!BUF_EMPTY(hdr)) {
1471 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1472 buf_discard_identity(hdr);
1473 }
1474 while (hdr->b_buf) {
1475 arc_buf_t *buf = hdr->b_buf;
1476
1477 if (buf->b_efunc) {
1478 mutex_enter(&arc_eviction_mtx);
1479 mutex_enter(&buf->b_evict_lock);
1480 ASSERT(buf->b_hdr != NULL);
1481 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1482 hdr->b_buf = buf->b_next;
1483 buf->b_hdr = &arc_eviction_hdr;
1484 buf->b_next = arc_eviction_list;
1485 arc_eviction_list = buf;
1486 mutex_exit(&buf->b_evict_lock);
1487 mutex_exit(&arc_eviction_mtx);
1488 } else {
1489 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1490 }
1491 }
1492 if (hdr->b_freeze_cksum != NULL) {
1493 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1494 hdr->b_freeze_cksum = NULL;
1495 }
1496 if (hdr->b_thawed) {
1497 kmem_free(hdr->b_thawed, 1);
1498 hdr->b_thawed = NULL;
1499 }
1500
1501 ASSERT(!list_link_active(&hdr->b_arc_node));
1502 ASSERT3P(hdr->b_hash_next, ==, NULL);
1503 ASSERT3P(hdr->b_acb, ==, NULL);
1504 kmem_cache_free(hdr_cache, hdr);
1505 }
1506
1507 void
1508 arc_buf_free(arc_buf_t *buf, void *tag)
1509 {
1510 arc_buf_hdr_t *hdr = buf->b_hdr;
1511 int hashed = hdr->b_state != arc_anon;
1512
1513 ASSERT(buf->b_efunc == NULL);
1514 ASSERT(buf->b_data != NULL);
1515
1516 if (hashed) {
1517 kmutex_t *hash_lock = HDR_LOCK(hdr);
1518
1519 mutex_enter(hash_lock);
1520 hdr = buf->b_hdr;
1521 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1522
1523 (void) remove_reference(hdr, hash_lock, tag);
1524 if (hdr->b_datacnt > 1) {
1525 arc_buf_destroy(buf, FALSE, TRUE);
1526 } else {
1527 ASSERT(buf == hdr->b_buf);
1528 ASSERT(buf->b_efunc == NULL);
1529 hdr->b_flags |= ARC_BUF_AVAILABLE;
1530 }
1531 mutex_exit(hash_lock);
1532 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1533 int destroy_hdr;
1534 /*
1535 * We are in the middle of an async write. Don't destroy
1536 * this buffer unless the write completes before we finish
1537 * decrementing the reference count.
1538 */
1539 mutex_enter(&arc_eviction_mtx);
1540 (void) remove_reference(hdr, NULL, tag);
1541 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1542 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1543 mutex_exit(&arc_eviction_mtx);
1544 if (destroy_hdr)
1545 arc_hdr_destroy(hdr);
1546 } else {
1547 if (remove_reference(hdr, NULL, tag) > 0)
1548 arc_buf_destroy(buf, FALSE, TRUE);
1549 else
1550 arc_hdr_destroy(hdr);
1551 }
1552 }
1553
1554 int
1555 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1556 {
1557 arc_buf_hdr_t *hdr = buf->b_hdr;
1558 kmutex_t *hash_lock = HDR_LOCK(hdr);
1559 int no_callback = (buf->b_efunc == NULL);
1560
1561 if (hdr->b_state == arc_anon) {
1562 ASSERT(hdr->b_datacnt == 1);
1563 arc_buf_free(buf, tag);
1564 return (no_callback);
1565 }
1566
1567 mutex_enter(hash_lock);
1568 hdr = buf->b_hdr;
1569 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1570 ASSERT(hdr->b_state != arc_anon);
1571 ASSERT(buf->b_data != NULL);
1572
1573 (void) remove_reference(hdr, hash_lock, tag);
1574 if (hdr->b_datacnt > 1) {
1575 if (no_callback)
1576 arc_buf_destroy(buf, FALSE, TRUE);
1577 } else if (no_callback) {
1578 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1579 ASSERT(buf->b_efunc == NULL);
1580 hdr->b_flags |= ARC_BUF_AVAILABLE;
1581 }
1582 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1583 refcount_is_zero(&hdr->b_refcnt));
1584 mutex_exit(hash_lock);
1585 return (no_callback);
1586 }
1587
1588 int
1589 arc_buf_size(arc_buf_t *buf)
1590 {
1591 return (buf->b_hdr->b_size);
1592 }
1593
1594 /*
1595 * Evict buffers from list until we've removed the specified number of
1596 * bytes. Move the removed buffers to the appropriate evict state.
1597 * If the recycle flag is set, then attempt to "recycle" a buffer:
1598 * - look for a buffer to evict that is `bytes' long.
1599 * - return the data block from this buffer rather than freeing it.
1600 * This flag is used by callers that are trying to make space for a
1601 * new buffer in a full arc cache.
1602 *
1603 * This function makes a "best effort". It skips over any buffers
1604 * it can't get a hash_lock on, and so may not catch all candidates.
1605 * It may also return without evicting as much space as requested.
1606 */
1607 static void *
1608 arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1609 arc_buf_contents_t type)
1610 {
1611 arc_state_t *evicted_state;
1612 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1613 arc_buf_hdr_t *ab, *ab_prev = NULL;
1614 list_t *list = &state->arcs_list[type];
1615 kmutex_t *hash_lock;
1616 boolean_t have_lock;
1617 void *stolen = NULL;
1618
1619 ASSERT(state == arc_mru || state == arc_mfu);
1620
1621 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1622
1623 mutex_enter(&state->arcs_mtx);
1624 mutex_enter(&evicted_state->arcs_mtx);
1625
1626 for (ab = list_tail(list); ab; ab = ab_prev) {
1627 ab_prev = list_prev(list, ab);
1628 /* prefetch buffers have a minimum lifespan */
1629 if (HDR_IO_IN_PROGRESS(ab) ||
1630 (spa && ab->b_spa != spa) ||
1631 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1632 ddi_get_lbolt() - ab->b_arc_access <
1633 arc_min_prefetch_lifespan)) {
1634 skipped++;
1635 continue;
1636 }
1637 /* "lookahead" for better eviction candidate */
1638 if (recycle && ab->b_size != bytes &&
1639 ab_prev && ab_prev->b_size == bytes)
1640 continue;
1641 hash_lock = HDR_LOCK(ab);
1642 have_lock = MUTEX_HELD(hash_lock);
1643 if (have_lock || mutex_tryenter(hash_lock)) {
1644 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1645 ASSERT(ab->b_datacnt > 0);
1646 while (ab->b_buf) {
1647 arc_buf_t *buf = ab->b_buf;
1648 if (!mutex_tryenter(&buf->b_evict_lock)) {
1649 missed += 1;
1650 break;
1651 }
1652 if (buf->b_data) {
1653 bytes_evicted += ab->b_size;
1654 if (recycle && ab->b_type == type &&
1655 ab->b_size == bytes &&
1656 !HDR_L2_WRITING(ab)) {
1657 stolen = buf->b_data;
1658 recycle = FALSE;
1659 }
1660 }
1661 if (buf->b_efunc) {
1662 mutex_enter(&arc_eviction_mtx);
1663 arc_buf_destroy(buf,
1664 buf->b_data == stolen, FALSE);
1665 ab->b_buf = buf->b_next;
1666 buf->b_hdr = &arc_eviction_hdr;
1667 buf->b_next = arc_eviction_list;
1668 arc_eviction_list = buf;
1669 mutex_exit(&arc_eviction_mtx);
1670 mutex_exit(&buf->b_evict_lock);
1671 } else {
1672 mutex_exit(&buf->b_evict_lock);
1673 arc_buf_destroy(buf,
1674 buf->b_data == stolen, TRUE);
1675 }
1676 }
1677
1678 if (ab->b_l2hdr) {
1679 ARCSTAT_INCR(arcstat_evict_l2_cached,
1680 ab->b_size);
1681 } else {
1682 if (l2arc_write_eligible(ab->b_spa, ab)) {
1683 ARCSTAT_INCR(arcstat_evict_l2_eligible,
1684 ab->b_size);
1685 } else {
1686 ARCSTAT_INCR(
1687 arcstat_evict_l2_ineligible,
1688 ab->b_size);
1689 }
1690 }
1691
1692 if (ab->b_datacnt == 0) {
1693 arc_change_state(evicted_state, ab, hash_lock);
1694 ASSERT(HDR_IN_HASH_TABLE(ab));
1695 ab->b_flags |= ARC_IN_HASH_TABLE;
1696 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1697 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1698 }
1699 if (!have_lock)
1700 mutex_exit(hash_lock);
1701 if (bytes >= 0 && bytes_evicted >= bytes)
1702 break;
1703 } else {
1704 missed += 1;
1705 }
1706 }
1707
1708 mutex_exit(&evicted_state->arcs_mtx);
1709 mutex_exit(&state->arcs_mtx);
1710
1711 if (bytes_evicted < bytes)
1712 dprintf("only evicted %lld bytes from %x",
1713 (longlong_t)bytes_evicted, state);
1714
1715 if (skipped)
1716 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1717
1718 if (missed)
1719 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1720
1721 /*
1722 * We have just evicted some date into the ghost state, make
1723 * sure we also adjust the ghost state size if necessary.
1724 */
1725 if (arc_no_grow &&
1726 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1727 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1728 arc_mru_ghost->arcs_size - arc_c;
1729
1730 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1731 int64_t todelete =
1732 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1733 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1734 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1735 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1736 arc_mru_ghost->arcs_size +
1737 arc_mfu_ghost->arcs_size - arc_c);
1738 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1739 }
1740 }
1741
1742 return (stolen);
1743 }
1744
1745 /*
1746 * Remove buffers from list until we've removed the specified number of
1747 * bytes. Destroy the buffers that are removed.
1748 */
1749 static void
1750 arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
1751 {
1752 arc_buf_hdr_t *ab, *ab_prev;
1753 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1754 kmutex_t *hash_lock;
1755 uint64_t bytes_deleted = 0;
1756 uint64_t bufs_skipped = 0;
1757
1758 ASSERT(GHOST_STATE(state));
1759 top:
1760 mutex_enter(&state->arcs_mtx);
1761 for (ab = list_tail(list); ab; ab = ab_prev) {
1762 ab_prev = list_prev(list, ab);
1763 if (spa && ab->b_spa != spa)
1764 continue;
1765 hash_lock = HDR_LOCK(ab);
1766 /* caller may be trying to modify this buffer, skip it */
1767 if (MUTEX_HELD(hash_lock))
1768 continue;
1769 if (mutex_tryenter(hash_lock)) {
1770 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1771 ASSERT(ab->b_buf == NULL);
1772 ARCSTAT_BUMP(arcstat_deleted);
1773 bytes_deleted += ab->b_size;
1774
1775 if (ab->b_l2hdr != NULL) {
1776 /*
1777 * This buffer is cached on the 2nd Level ARC;
1778 * don't destroy the header.
1779 */
1780 arc_change_state(arc_l2c_only, ab, hash_lock);
1781 mutex_exit(hash_lock);
1782 } else {
1783 arc_change_state(arc_anon, ab, hash_lock);
1784 mutex_exit(hash_lock);
1785 arc_hdr_destroy(ab);
1786 }
1787
1788 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1789 if (bytes >= 0 && bytes_deleted >= bytes)
1790 break;
1791 } else {
1792 if (bytes < 0) {
1793 mutex_exit(&state->arcs_mtx);
1794 mutex_enter(hash_lock);
1795 mutex_exit(hash_lock);
1796 goto top;
1797 }
1798 bufs_skipped += 1;
1799 }
1800 }
1801 mutex_exit(&state->arcs_mtx);
1802
1803 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1804 (bytes < 0 || bytes_deleted < bytes)) {
1805 list = &state->arcs_list[ARC_BUFC_METADATA];
1806 goto top;
1807 }
1808
1809 if (bufs_skipped) {
1810 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1811 ASSERT(bytes >= 0);
1812 }
1813
1814 if (bytes_deleted < bytes)
1815 dprintf("only deleted %lld bytes from %p",
1816 (longlong_t)bytes_deleted, state);
1817 }
1818
1819 static void
1820 arc_adjust(void)
1821 {
1822 int64_t adjustment, delta;
1823
1824 /*
1825 * Adjust MRU size
1826 */
1827
1828 adjustment = MIN(arc_size - arc_c,
1829 arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - arc_p);
1830
1831 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1832 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
1833 (void) arc_evict(arc_mru, NULL, delta, FALSE, ARC_BUFC_DATA);
1834 adjustment -= delta;
1835 }
1836
1837 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1838 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
1839 (void) arc_evict(arc_mru, NULL, delta, FALSE,
1840 ARC_BUFC_METADATA);
1841 }
1842
1843 /*
1844 * Adjust MFU size
1845 */
1846
1847 adjustment = arc_size - arc_c;
1848
1849 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1850 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
1851 (void) arc_evict(arc_mfu, NULL, delta, FALSE, ARC_BUFC_DATA);
1852 adjustment -= delta;
1853 }
1854
1855 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1856 int64_t delta = MIN(adjustment,
1857 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
1858 (void) arc_evict(arc_mfu, NULL, delta, FALSE,
1859 ARC_BUFC_METADATA);
1860 }
1861
1862 /*
1863 * Adjust ghost lists
1864 */
1865
1866 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
1867
1868 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
1869 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
1870 arc_evict_ghost(arc_mru_ghost, NULL, delta);
1871 }
1872
1873 adjustment =
1874 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
1875
1876 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
1877 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
1878 arc_evict_ghost(arc_mfu_ghost, NULL, delta);
1879 }
1880 }
1881
1882 static void
1883 arc_do_user_evicts(void)
1884 {
1885 mutex_enter(&arc_eviction_mtx);
1886 while (arc_eviction_list != NULL) {
1887 arc_buf_t *buf = arc_eviction_list;
1888 arc_eviction_list = buf->b_next;
1889 mutex_enter(&buf->b_evict_lock);
1890 buf->b_hdr = NULL;
1891 mutex_exit(&buf->b_evict_lock);
1892 mutex_exit(&arc_eviction_mtx);
1893
1894 if (buf->b_efunc != NULL)
1895 VERIFY(buf->b_efunc(buf) == 0);
1896
1897 buf->b_efunc = NULL;
1898 buf->b_private = NULL;
1899 kmem_cache_free(buf_cache, buf);
1900 mutex_enter(&arc_eviction_mtx);
1901 }
1902 mutex_exit(&arc_eviction_mtx);
1903 }
1904
1905 /*
1906 * Flush all *evictable* data from the cache for the given spa.
1907 * NOTE: this will not touch "active" (i.e. referenced) data.
1908 */
1909 void
1910 arc_flush(spa_t *spa)
1911 {
1912 uint64_t guid = 0;
1913
1914 if (spa)
1915 guid = spa_guid(spa);
1916
1917 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1918 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
1919 if (spa)
1920 break;
1921 }
1922 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1923 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
1924 if (spa)
1925 break;
1926 }
1927 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1928 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
1929 if (spa)
1930 break;
1931 }
1932 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1933 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
1934 if (spa)
1935 break;
1936 }
1937
1938 arc_evict_ghost(arc_mru_ghost, guid, -1);
1939 arc_evict_ghost(arc_mfu_ghost, guid, -1);
1940
1941 mutex_enter(&arc_reclaim_thr_lock);
1942 arc_do_user_evicts();
1943 mutex_exit(&arc_reclaim_thr_lock);
1944 ASSERT(spa || arc_eviction_list == NULL);
1945 }
1946
1947 void
1948 arc_shrink(void)
1949 {
1950 if (arc_c > arc_c_min) {
1951 uint64_t to_free;
1952
1953 #ifdef _KERNEL
1954 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1955 #else
1956 to_free = arc_c >> arc_shrink_shift;
1957 #endif
1958 if (arc_c > arc_c_min + to_free)
1959 atomic_add_64(&arc_c, -to_free);
1960 else
1961 arc_c = arc_c_min;
1962
1963 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1964 if (arc_c > arc_size)
1965 arc_c = MAX(arc_size, arc_c_min);
1966 if (arc_p > arc_c)
1967 arc_p = (arc_c >> 1);
1968 ASSERT(arc_c >= arc_c_min);
1969 ASSERT((int64_t)arc_p >= 0);
1970 }
1971
1972 if (arc_size > arc_c)
1973 arc_adjust();
1974 }
1975
1976 static int
1977 arc_reclaim_needed(void)
1978 {
1979 uint64_t extra;
1980
1981 #ifdef _KERNEL
1982
1983 if (needfree)
1984 return (1);
1985
1986 /*
1987 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1988 */
1989 extra = desfree;
1990
1991 /*
1992 * check that we're out of range of the pageout scanner. It starts to
1993 * schedule paging if freemem is less than lotsfree and needfree.
1994 * lotsfree is the high-water mark for pageout, and needfree is the
1995 * number of needed free pages. We add extra pages here to make sure
1996 * the scanner doesn't start up while we're freeing memory.
1997 */
1998 if (freemem < lotsfree + needfree + extra)
1999 return (1);
2000
2001 /*
2002 * check to make sure that swapfs has enough space so that anon
2003 * reservations can still succeed. anon_resvmem() checks that the
2004 * availrmem is greater than swapfs_minfree, and the number of reserved
2005 * swap pages. We also add a bit of extra here just to prevent
2006 * circumstances from getting really dire.
2007 */
2008 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2009 return (1);
2010
2011 #if defined(__i386)
2012 /*
2013 * If we're on an i386 platform, it's possible that we'll exhaust the
2014 * kernel heap space before we ever run out of available physical
2015 * memory. Most checks of the size of the heap_area compare against
2016 * tune.t_minarmem, which is the minimum available real memory that we
2017 * can have in the system. However, this is generally fixed at 25 pages
2018 * which is so low that it's useless. In this comparison, we seek to
2019 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2020 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2021 * free)
2022 */
2023 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2024 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2025 return (1);
2026 #endif
2027
2028 #else
2029 if (spa_get_random(100) == 0)
2030 return (1);
2031 #endif
2032 return (0);
2033 }
2034
2035 static void
2036 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2037 {
2038 size_t i;
2039 kmem_cache_t *prev_cache = NULL;
2040 kmem_cache_t *prev_data_cache = NULL;
2041 extern kmem_cache_t *zio_buf_cache[];
2042 extern kmem_cache_t *zio_data_buf_cache[];
2043
2044 #ifdef _KERNEL
2045 if (arc_meta_used >= arc_meta_limit) {
2046 /*
2047 * We are exceeding our meta-data cache limit.
2048 * Purge some DNLC entries to release holds on meta-data.
2049 */
2050 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2051 }
2052 #if defined(__i386)
2053 /*
2054 * Reclaim unused memory from all kmem caches.
2055 */
2056 kmem_reap();
2057 #endif
2058 #endif
2059
2060 /*
2061 * An aggressive reclamation will shrink the cache size as well as
2062 * reap free buffers from the arc kmem caches.
2063 */
2064 if (strat == ARC_RECLAIM_AGGR)
2065 arc_shrink();
2066
2067 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2068 if (zio_buf_cache[i] != prev_cache) {
2069 prev_cache = zio_buf_cache[i];
2070 kmem_cache_reap_now(zio_buf_cache[i]);
2071 }
2072 if (zio_data_buf_cache[i] != prev_data_cache) {
2073 prev_data_cache = zio_data_buf_cache[i];
2074 kmem_cache_reap_now(zio_data_buf_cache[i]);
2075 }
2076 }
2077 kmem_cache_reap_now(buf_cache);
2078 kmem_cache_reap_now(hdr_cache);
2079 }
2080
2081 static void
2082 arc_reclaim_thread(void)
2083 {
2084 clock_t growtime = 0;
2085 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2086 callb_cpr_t cpr;
2087
2088 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2089
2090 mutex_enter(&arc_reclaim_thr_lock);
2091 while (arc_thread_exit == 0) {
2092 if (arc_reclaim_needed()) {
2093
2094 if (arc_no_grow) {
2095 if (last_reclaim == ARC_RECLAIM_CONS) {
2096 last_reclaim = ARC_RECLAIM_AGGR;
2097 } else {
2098 last_reclaim = ARC_RECLAIM_CONS;
2099 }
2100 } else {
2101 arc_no_grow = TRUE;
2102 last_reclaim = ARC_RECLAIM_AGGR;
2103 membar_producer();
2104 }
2105
2106 /* reset the growth delay for every reclaim */
2107 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2108
2109 arc_kmem_reap_now(last_reclaim);
2110 arc_warm = B_TRUE;
2111
2112 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2113 arc_no_grow = FALSE;
2114 }
2115
2116 if (2 * arc_c < arc_size +
2117 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
2118 arc_adjust();
2119
2120 if (arc_eviction_list != NULL)
2121 arc_do_user_evicts();
2122
2123 /* block until needed, or one second, whichever is shorter */
2124 CALLB_CPR_SAFE_BEGIN(&cpr);
2125 (void) cv_timedwait(&arc_reclaim_thr_cv,
2126 &arc_reclaim_thr_lock, (ddi_get_lbolt() + hz));
2127 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2128 }
2129
2130 arc_thread_exit = 0;
2131 cv_broadcast(&arc_reclaim_thr_cv);
2132 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2133 thread_exit();
2134 }
2135
2136 /*
2137 * Adapt arc info given the number of bytes we are trying to add and
2138 * the state that we are comming from. This function is only called
2139 * when we are adding new content to the cache.
2140 */
2141 static void
2142 arc_adapt(int bytes, arc_state_t *state)
2143 {
2144 int mult;
2145 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2146
2147 if (state == arc_l2c_only)
2148 return;
2149
2150 ASSERT(bytes > 0);
2151 /*
2152 * Adapt the target size of the MRU list:
2153 * - if we just hit in the MRU ghost list, then increase
2154 * the target size of the MRU list.
2155 * - if we just hit in the MFU ghost list, then increase
2156 * the target size of the MFU list by decreasing the
2157 * target size of the MRU list.
2158 */
2159 if (state == arc_mru_ghost) {
2160 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2161 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2162
2163 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2164 } else if (state == arc_mfu_ghost) {
2165 uint64_t delta;
2166
2167 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2168 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2169
2170 delta = MIN(bytes * mult, arc_p);
2171 arc_p = MAX(arc_p_min, arc_p - delta);
2172 }
2173 ASSERT((int64_t)arc_p >= 0);
2174
2175 if (arc_reclaim_needed()) {
2176 cv_signal(&arc_reclaim_thr_cv);
2177 return;
2178 }
2179
2180 if (arc_no_grow)
2181 return;
2182
2183 if (arc_c >= arc_c_max)
2184 return;
2185
2186 /*
2187 * If we're within (2 * maxblocksize) bytes of the target
2188 * cache size, increment the target cache size
2189 */
2190 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2191 atomic_add_64(&arc_c, (int64_t)bytes);
2192 if (arc_c > arc_c_max)
2193 arc_c = arc_c_max;
2194 else if (state == arc_anon)
2195 atomic_add_64(&arc_p, (int64_t)bytes);
2196 if (arc_p > arc_c)
2197 arc_p = arc_c;
2198 }
2199 ASSERT((int64_t)arc_p >= 0);
2200 }
2201
2202 /*
2203 * Check if the cache has reached its limits and eviction is required
2204 * prior to insert.
2205 */
2206 static int
2207 arc_evict_needed(arc_buf_contents_t type)
2208 {
2209 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2210 return (1);
2211
2212 #ifdef _KERNEL
2213 /*
2214 * If zio data pages are being allocated out of a separate heap segment,
2215 * then enforce that the size of available vmem for this area remains
2216 * above about 1/32nd free.
2217 */
2218 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2219 vmem_size(zio_arena, VMEM_FREE) <
2220 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2221 return (1);
2222 #endif
2223
2224 if (arc_reclaim_needed())
2225 return (1);
2226
2227 return (arc_size > arc_c);
2228 }
2229
2230 /*
2231 * The buffer, supplied as the first argument, needs a data block.
2232 * So, if we are at cache max, determine which cache should be victimized.
2233 * We have the following cases:
2234 *
2235 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2236 * In this situation if we're out of space, but the resident size of the MFU is
2237 * under the limit, victimize the MFU cache to satisfy this insertion request.
2238 *
2239 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2240 * Here, we've used up all of the available space for the MRU, so we need to
2241 * evict from our own cache instead. Evict from the set of resident MRU
2242 * entries.
2243 *
2244 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2245 * c minus p represents the MFU space in the cache, since p is the size of the
2246 * cache that is dedicated to the MRU. In this situation there's still space on
2247 * the MFU side, so the MRU side needs to be victimized.
2248 *
2249 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2250 * MFU's resident set is consuming more space than it has been allotted. In
2251 * this situation, we must victimize our own cache, the MFU, for this insertion.
2252 */
2253 static void
2254 arc_get_data_buf(arc_buf_t *buf)
2255 {
2256 arc_state_t *state = buf->b_hdr->b_state;
2257 uint64_t size = buf->b_hdr->b_size;
2258 arc_buf_contents_t type = buf->b_hdr->b_type;
2259
2260 arc_adapt(size, state);
2261
2262 /*
2263 * We have not yet reached cache maximum size,
2264 * just allocate a new buffer.
2265 */
2266 if (!arc_evict_needed(type)) {
2267 if (type == ARC_BUFC_METADATA) {
2268 buf->b_data = zio_buf_alloc(size);
2269 arc_space_consume(size, ARC_SPACE_DATA);
2270 } else {
2271 ASSERT(type == ARC_BUFC_DATA);
2272 buf->b_data = zio_data_buf_alloc(size);
2273 ARCSTAT_INCR(arcstat_data_size, size);
2274 atomic_add_64(&arc_size, size);
2275 }
2276 goto out;
2277 }
2278
2279 /*
2280 * If we are prefetching from the mfu ghost list, this buffer
2281 * will end up on the mru list; so steal space from there.
2282 */
2283 if (state == arc_mfu_ghost)
2284 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2285 else if (state == arc_mru_ghost)
2286 state = arc_mru;
2287
2288 if (state == arc_mru || state == arc_anon) {
2289 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2290 state = (arc_mfu->arcs_lsize[type] >= size &&
2291 arc_p > mru_used) ? arc_mfu : arc_mru;
2292 } else {
2293 /* MFU cases */
2294 uint64_t mfu_space = arc_c - arc_p;
2295 state = (arc_mru->arcs_lsize[type] >= size &&
2296 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2297 }
2298 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2299 if (type == ARC_BUFC_METADATA) {
2300 buf->b_data = zio_buf_alloc(size);
2301 arc_space_consume(size, ARC_SPACE_DATA);
2302 } else {
2303 ASSERT(type == ARC_BUFC_DATA);
2304 buf->b_data = zio_data_buf_alloc(size);
2305 ARCSTAT_INCR(arcstat_data_size, size);
2306 atomic_add_64(&arc_size, size);
2307 }
2308 ARCSTAT_BUMP(arcstat_recycle_miss);
2309 }
2310 ASSERT(buf->b_data != NULL);
2311 out:
2312 /*
2313 * Update the state size. Note that ghost states have a
2314 * "ghost size" and so don't need to be updated.
2315 */
2316 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2317 arc_buf_hdr_t *hdr = buf->b_hdr;
2318
2319 atomic_add_64(&hdr->b_state->arcs_size, size);
2320 if (list_link_active(&hdr->b_arc_node)) {
2321 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2322 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2323 }
2324 /*
2325 * If we are growing the cache, and we are adding anonymous
2326 * data, and we have outgrown arc_p, update arc_p
2327 */
2328 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2329 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2330 arc_p = MIN(arc_c, arc_p + size);
2331 }
2332 }
2333
2334 /*
2335 * This routine is called whenever a buffer is accessed.
2336 * NOTE: the hash lock is dropped in this function.
2337 */
2338 static void
2339 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2340 {
2341 clock_t now;
2342
2343 ASSERT(MUTEX_HELD(hash_lock));
2344
2345 if (buf->b_state == arc_anon) {
2346 /*
2347 * This buffer is not in the cache, and does not
2348 * appear in our "ghost" list. Add the new buffer
2349 * to the MRU state.
2350 */
2351
2352 ASSERT(buf->b_arc_access == 0);
2353 buf->b_arc_access = ddi_get_lbolt();
2354 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2355 arc_change_state(arc_mru, buf, hash_lock);
2356
2357 } else if (buf->b_state == arc_mru) {
2358 now = ddi_get_lbolt();
2359
2360 /*
2361 * If this buffer is here because of a prefetch, then either:
2362 * - clear the flag if this is a "referencing" read
2363 * (any subsequent access will bump this into the MFU state).
2364 * or
2365 * - move the buffer to the head of the list if this is
2366 * another prefetch (to make it less likely to be evicted).
2367 */
2368 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2369 if (refcount_count(&buf->b_refcnt) == 0) {
2370 ASSERT(list_link_active(&buf->b_arc_node));
2371 } else {
2372 buf->b_flags &= ~ARC_PREFETCH;
2373 ARCSTAT_BUMP(arcstat_mru_hits);
2374 }
2375 buf->b_arc_access = now;
2376 return;
2377 }
2378
2379 /*
2380 * This buffer has been "accessed" only once so far,
2381 * but it is still in the cache. Move it to the MFU
2382 * state.
2383 */
2384 if (now > buf->b_arc_access + ARC_MINTIME) {
2385 /*
2386 * More than 125ms have passed since we
2387 * instantiated this buffer. Move it to the
2388 * most frequently used state.
2389 */
2390 buf->b_arc_access = now;
2391 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2392 arc_change_state(arc_mfu, buf, hash_lock);
2393 }
2394 ARCSTAT_BUMP(arcstat_mru_hits);
2395 } else if (buf->b_state == arc_mru_ghost) {
2396 arc_state_t *new_state;
2397 /*
2398 * This buffer has been "accessed" recently, but
2399 * was evicted from the cache. Move it to the
2400 * MFU state.
2401 */
2402
2403 if (buf->b_flags & ARC_PREFETCH) {
2404 new_state = arc_mru;
2405 if (refcount_count(&buf->b_refcnt) > 0)
2406 buf->b_flags &= ~ARC_PREFETCH;
2407 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2408 } else {
2409 new_state = arc_mfu;
2410 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2411 }
2412
2413 buf->b_arc_access = ddi_get_lbolt();
2414 arc_change_state(new_state, buf, hash_lock);
2415
2416 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2417 } else if (buf->b_state == arc_mfu) {
2418 /*
2419 * This buffer has been accessed more than once and is
2420 * still in the cache. Keep it in the MFU state.
2421 *
2422 * NOTE: an add_reference() that occurred when we did
2423 * the arc_read() will have kicked this off the list.
2424 * If it was a prefetch, we will explicitly move it to
2425 * the head of the list now.
2426 */
2427 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2428 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2429 ASSERT(list_link_active(&buf->b_arc_node));
2430 }
2431 ARCSTAT_BUMP(arcstat_mfu_hits);
2432 buf->b_arc_access = ddi_get_lbolt();
2433 } else if (buf->b_state == arc_mfu_ghost) {
2434 arc_state_t *new_state = arc_mfu;
2435 /*
2436 * This buffer has been accessed more than once but has
2437 * been evicted from the cache. Move it back to the
2438 * MFU state.
2439 */
2440
2441 if (buf->b_flags & ARC_PREFETCH) {
2442 /*
2443 * This is a prefetch access...
2444 * move this block back to the MRU state.
2445 */
2446 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2447 new_state = arc_mru;
2448 }
2449
2450 buf->b_arc_access = ddi_get_lbolt();
2451 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2452 arc_change_state(new_state, buf, hash_lock);
2453
2454 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2455 } else if (buf->b_state == arc_l2c_only) {
2456 /*
2457 * This buffer is on the 2nd Level ARC.
2458 */
2459
2460 buf->b_arc_access = ddi_get_lbolt();
2461 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2462 arc_change_state(arc_mfu, buf, hash_lock);
2463 } else {
2464 ASSERT(!"invalid arc state");
2465 }
2466 }
2467
2468 /* a generic arc_done_func_t which you can use */
2469 /* ARGSUSED */
2470 void
2471 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2472 {
2473 if (zio == NULL || zio->io_error == 0)
2474 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2475 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2476 }
2477
2478 /* a generic arc_done_func_t */
2479 void
2480 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2481 {
2482 arc_buf_t **bufp = arg;
2483 if (zio && zio->io_error) {
2484 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2485 *bufp = NULL;
2486 } else {
2487 *bufp = buf;
2488 ASSERT(buf->b_data);
2489 }
2490 }
2491
2492 static void
2493 arc_read_done(zio_t *zio)
2494 {
2495 arc_buf_hdr_t *hdr, *found;
2496 arc_buf_t *buf;
2497 arc_buf_t *abuf; /* buffer we're assigning to callback */
2498 kmutex_t *hash_lock;
2499 arc_callback_t *callback_list, *acb;
2500 int freeable = FALSE;
2501
2502 buf = zio->io_private;
2503 hdr = buf->b_hdr;
2504
2505 /*
2506 * The hdr was inserted into hash-table and removed from lists
2507 * prior to starting I/O. We should find this header, since
2508 * it's in the hash table, and it should be legit since it's
2509 * not possible to evict it during the I/O. The only possible
2510 * reason for it not to be found is if we were freed during the
2511 * read.
2512 */
2513 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2514 &hash_lock);
2515
2516 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2517 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2518 (found == hdr && HDR_L2_READING(hdr)));
2519
2520 hdr->b_flags &= ~ARC_L2_EVICTED;
2521 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2522 hdr->b_flags &= ~ARC_L2CACHE;
2523
2524 /* byteswap if necessary */
2525 callback_list = hdr->b_acb;
2526 ASSERT(callback_list != NULL);
2527 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2528 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2529 byteswap_uint64_array :
2530 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
2531 func(buf->b_data, hdr->b_size);
2532 }
2533
2534 arc_cksum_compute(buf, B_FALSE);
2535
2536 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2537 /*
2538 * Only call arc_access on anonymous buffers. This is because
2539 * if we've issued an I/O for an evicted buffer, we've already
2540 * called arc_access (to prevent any simultaneous readers from
2541 * getting confused).
2542 */
2543 arc_access(hdr, hash_lock);
2544 }
2545
2546 /* create copies of the data buffer for the callers */
2547 abuf = buf;
2548 for (acb = callback_list; acb; acb = acb->acb_next) {
2549 if (acb->acb_done) {
2550 if (abuf == NULL)
2551 abuf = arc_buf_clone(buf);
2552 acb->acb_buf = abuf;
2553 abuf = NULL;
2554 }
2555 }
2556 hdr->b_acb = NULL;
2557 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2558 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2559 if (abuf == buf) {
2560 ASSERT(buf->b_efunc == NULL);
2561 ASSERT(hdr->b_datacnt == 1);
2562 hdr->b_flags |= ARC_BUF_AVAILABLE;
2563 }
2564
2565 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2566
2567 if (zio->io_error != 0) {
2568 hdr->b_flags |= ARC_IO_ERROR;
2569 if (hdr->b_state != arc_anon)
2570 arc_change_state(arc_anon, hdr, hash_lock);
2571 if (HDR_IN_HASH_TABLE(hdr))
2572 buf_hash_remove(hdr);
2573 freeable = refcount_is_zero(&hdr->b_refcnt);
2574 }
2575
2576 /*
2577 * Broadcast before we drop the hash_lock to avoid the possibility
2578 * that the hdr (and hence the cv) might be freed before we get to
2579 * the cv_broadcast().
2580 */
2581 cv_broadcast(&hdr->b_cv);
2582
2583 if (hash_lock) {
2584 mutex_exit(hash_lock);
2585 } else {
2586 /*
2587 * This block was freed while we waited for the read to
2588 * complete. It has been removed from the hash table and
2589 * moved to the anonymous state (so that it won't show up
2590 * in the cache).
2591 */
2592 ASSERT3P(hdr->b_state, ==, arc_anon);
2593 freeable = refcount_is_zero(&hdr->b_refcnt);
2594 }
2595
2596 /* execute each callback and free its structure */
2597 while ((acb = callback_list) != NULL) {
2598 if (acb->acb_done)
2599 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2600
2601 if (acb->acb_zio_dummy != NULL) {
2602 acb->acb_zio_dummy->io_error = zio->io_error;
2603 zio_nowait(acb->acb_zio_dummy);
2604 }
2605
2606 callback_list = acb->acb_next;
2607 kmem_free(acb, sizeof (arc_callback_t));
2608 }
2609
2610 if (freeable)
2611 arc_hdr_destroy(hdr);
2612 }
2613
2614 /*
2615 * "Read" the block block at the specified DVA (in bp) via the
2616 * cache. If the block is found in the cache, invoke the provided
2617 * callback immediately and return. Note that the `zio' parameter
2618 * in the callback will be NULL in this case, since no IO was
2619 * required. If the block is not in the cache pass the read request
2620 * on to the spa with a substitute callback function, so that the
2621 * requested block will be added to the cache.
2622 *
2623 * If a read request arrives for a block that has a read in-progress,
2624 * either wait for the in-progress read to complete (and return the
2625 * results); or, if this is a read with a "done" func, add a record
2626 * to the read to invoke the "done" func when the read completes,
2627 * and return; or just return.
2628 *
2629 * arc_read_done() will invoke all the requested "done" functions
2630 * for readers of this block.
2631 *
2632 * Normal callers should use arc_read and pass the arc buffer and offset
2633 * for the bp. But if you know you don't need locking, you can use
2634 * arc_read_bp.
2635 */
2636 int
2637 arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_buf_t *pbuf,
2638 arc_done_func_t *done, void *private, int priority, int zio_flags,
2639 uint32_t *arc_flags, const zbookmark_t *zb)
2640 {
2641 int err;
2642
2643 if (pbuf == NULL) {
2644 /*
2645 * XXX This happens from traverse callback funcs, for
2646 * the objset_phys_t block.
2647 */
2648 return (arc_read_nolock(pio, spa, bp, done, private, priority,
2649 zio_flags, arc_flags, zb));
2650 }
2651
2652 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2653 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2654 rw_enter(&pbuf->b_data_lock, RW_READER);
2655
2656 err = arc_read_nolock(pio, spa, bp, done, private, priority,
2657 zio_flags, arc_flags, zb);
2658 rw_exit(&pbuf->b_data_lock);
2659
2660 return (err);
2661 }
2662
2663 int
2664 arc_read_nolock(zio_t *pio, spa_t *spa, const blkptr_t *bp,
2665 arc_done_func_t *done, void *private, int priority, int zio_flags,
2666 uint32_t *arc_flags, const zbookmark_t *zb)
2667 {
2668 arc_buf_hdr_t *hdr;
2669 arc_buf_t *buf;
2670 kmutex_t *hash_lock;
2671 zio_t *rzio;
2672 uint64_t guid = spa_guid(spa);
2673
2674 top:
2675 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
2676 &hash_lock);
2677 if (hdr && hdr->b_datacnt > 0) {
2678
2679 *arc_flags |= ARC_CACHED;
2680
2681 if (HDR_IO_IN_PROGRESS(hdr)) {
2682
2683 if (*arc_flags & ARC_WAIT) {
2684 cv_wait(&hdr->b_cv, hash_lock);
2685 mutex_exit(hash_lock);
2686 goto top;
2687 }
2688 ASSERT(*arc_flags & ARC_NOWAIT);
2689
2690 if (done) {
2691 arc_callback_t *acb = NULL;
2692
2693 acb = kmem_zalloc(sizeof (arc_callback_t),
2694 KM_SLEEP);
2695 acb->acb_done = done;
2696 acb->acb_private = private;
2697 if (pio != NULL)
2698 acb->acb_zio_dummy = zio_null(pio,
2699 spa, NULL, NULL, NULL, zio_flags);
2700
2701 ASSERT(acb->acb_done != NULL);
2702 acb->acb_next = hdr->b_acb;
2703 hdr->b_acb = acb;
2704 add_reference(hdr, hash_lock, private);
2705 mutex_exit(hash_lock);
2706 return (0);
2707 }
2708 mutex_exit(hash_lock);
2709 return (0);
2710 }
2711
2712 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2713
2714 if (done) {
2715 add_reference(hdr, hash_lock, private);
2716 /*
2717 * If this block is already in use, create a new
2718 * copy of the data so that we will be guaranteed
2719 * that arc_release() will always succeed.
2720 */
2721 buf = hdr->b_buf;
2722 ASSERT(buf);
2723 ASSERT(buf->b_data);
2724 if (HDR_BUF_AVAILABLE(hdr)) {
2725 ASSERT(buf->b_efunc == NULL);
2726 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2727 } else {
2728 buf = arc_buf_clone(buf);
2729 }
2730
2731 } else if (*arc_flags & ARC_PREFETCH &&
2732 refcount_count(&hdr->b_refcnt) == 0) {
2733 hdr->b_flags |= ARC_PREFETCH;
2734 }
2735 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2736 arc_access(hdr, hash_lock);
2737 if (*arc_flags & ARC_L2CACHE)
2738 hdr->b_flags |= ARC_L2CACHE;
2739 mutex_exit(hash_lock);
2740 ARCSTAT_BUMP(arcstat_hits);
2741 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2742 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2743 data, metadata, hits);
2744
2745 if (done)
2746 done(NULL, buf, private);
2747 } else {
2748 uint64_t size = BP_GET_LSIZE(bp);
2749 arc_callback_t *acb;
2750 vdev_t *vd = NULL;
2751 uint64_t addr;
2752 boolean_t devw = B_FALSE;
2753
2754 if (hdr == NULL) {
2755 /* this block is not in the cache */
2756 arc_buf_hdr_t *exists;
2757 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2758 buf = arc_buf_alloc(spa, size, private, type);
2759 hdr = buf->b_hdr;
2760 hdr->b_dva = *BP_IDENTITY(bp);
2761 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
2762 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2763 exists = buf_hash_insert(hdr, &hash_lock);
2764 if (exists) {
2765 /* somebody beat us to the hash insert */
2766 mutex_exit(hash_lock);
2767 buf_discard_identity(hdr);
2768 (void) arc_buf_remove_ref(buf, private);
2769 goto top; /* restart the IO request */
2770 }
2771 /* if this is a prefetch, we don't have a reference */
2772 if (*arc_flags & ARC_PREFETCH) {
2773 (void) remove_reference(hdr, hash_lock,
2774 private);
2775 hdr->b_flags |= ARC_PREFETCH;
2776 }
2777 if (*arc_flags & ARC_L2CACHE)
2778 hdr->b_flags |= ARC_L2CACHE;
2779 if (BP_GET_LEVEL(bp) > 0)
2780 hdr->b_flags |= ARC_INDIRECT;
2781 } else {
2782 /* this block is in the ghost cache */
2783 ASSERT(GHOST_STATE(hdr->b_state));
2784 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2785 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2786 ASSERT(hdr->b_buf == NULL);
2787
2788 /* if this is a prefetch, we don't have a reference */
2789 if (*arc_flags & ARC_PREFETCH)
2790 hdr->b_flags |= ARC_PREFETCH;
2791 else
2792 add_reference(hdr, hash_lock, private);
2793 if (*arc_flags & ARC_L2CACHE)
2794 hdr->b_flags |= ARC_L2CACHE;
2795 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2796 buf->b_hdr = hdr;
2797 buf->b_data = NULL;
2798 buf->b_efunc = NULL;
2799 buf->b_private = NULL;
2800 buf->b_next = NULL;
2801 hdr->b_buf = buf;
2802 ASSERT(hdr->b_datacnt == 0);
2803 hdr->b_datacnt = 1;
2804 arc_get_data_buf(buf);
2805 arc_access(hdr, hash_lock);
2806 }
2807
2808 ASSERT(!GHOST_STATE(hdr->b_state));
2809
2810 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2811 acb->acb_done = done;
2812 acb->acb_private = private;
2813
2814 ASSERT(hdr->b_acb == NULL);
2815 hdr->b_acb = acb;
2816 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2817
2818 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2819 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2820 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
2821 addr = hdr->b_l2hdr->b_daddr;
2822 /*
2823 * Lock out device removal.
2824 */
2825 if (vdev_is_dead(vd) ||
2826 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2827 vd = NULL;
2828 }
2829
2830 mutex_exit(hash_lock);
2831
2832 ASSERT3U(hdr->b_size, ==, size);
2833 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
2834 uint64_t, size, zbookmark_t *, zb);
2835 ARCSTAT_BUMP(arcstat_misses);
2836 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2837 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2838 data, metadata, misses);
2839
2840 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
2841 /*
2842 * Read from the L2ARC if the following are true:
2843 * 1. The L2ARC vdev was previously cached.
2844 * 2. This buffer still has L2ARC metadata.
2845 * 3. This buffer isn't currently writing to the L2ARC.
2846 * 4. The L2ARC entry wasn't evicted, which may
2847 * also have invalidated the vdev.
2848 * 5. This isn't prefetch and l2arc_noprefetch is set.
2849 */
2850 if (hdr->b_l2hdr != NULL &&
2851 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
2852 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
2853 l2arc_read_callback_t *cb;
2854
2855 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2856 ARCSTAT_BUMP(arcstat_l2_hits);
2857
2858 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2859 KM_SLEEP);
2860 cb->l2rcb_buf = buf;
2861 cb->l2rcb_spa = spa;
2862 cb->l2rcb_bp = *bp;
2863 cb->l2rcb_zb = *zb;
2864 cb->l2rcb_flags = zio_flags;
2865
2866 /*
2867 * l2arc read. The SCL_L2ARC lock will be
2868 * released by l2arc_read_done().
2869 */
2870 rzio = zio_read_phys(pio, vd, addr, size,
2871 buf->b_data, ZIO_CHECKSUM_OFF,
2872 l2arc_read_done, cb, priority, zio_flags |
2873 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2874 ZIO_FLAG_DONT_PROPAGATE |
2875 ZIO_FLAG_DONT_RETRY, B_FALSE);
2876 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2877 zio_t *, rzio);
2878 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
2879
2880 if (*arc_flags & ARC_NOWAIT) {
2881 zio_nowait(rzio);
2882 return (0);
2883 }
2884
2885 ASSERT(*arc_flags & ARC_WAIT);
2886 if (zio_wait(rzio) == 0)
2887 return (0);
2888
2889 /* l2arc read error; goto zio_read() */
2890 } else {
2891 DTRACE_PROBE1(l2arc__miss,
2892 arc_buf_hdr_t *, hdr);
2893 ARCSTAT_BUMP(arcstat_l2_misses);
2894 if (HDR_L2_WRITING(hdr))
2895 ARCSTAT_BUMP(arcstat_l2_rw_clash);
2896 spa_config_exit(spa, SCL_L2ARC, vd);
2897 }
2898 } else {
2899 if (vd != NULL)
2900 spa_config_exit(spa, SCL_L2ARC, vd);
2901 if (l2arc_ndev != 0) {
2902 DTRACE_PROBE1(l2arc__miss,
2903 arc_buf_hdr_t *, hdr);
2904 ARCSTAT_BUMP(arcstat_l2_misses);
2905 }
2906 }
2907
2908 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2909 arc_read_done, buf, priority, zio_flags, zb);
2910
2911 if (*arc_flags & ARC_WAIT)
2912 return (zio_wait(rzio));
2913
2914 ASSERT(*arc_flags & ARC_NOWAIT);
2915 zio_nowait(rzio);
2916 }
2917 return (0);
2918 }
2919
2920 void
2921 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2922 {
2923 ASSERT(buf->b_hdr != NULL);
2924 ASSERT(buf->b_hdr->b_state != arc_anon);
2925 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2926 ASSERT(buf->b_efunc == NULL);
2927 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
2928
2929 buf->b_efunc = func;
2930 buf->b_private = private;
2931 }
2932
2933 /*
2934 * This is used by the DMU to let the ARC know that a buffer is
2935 * being evicted, so the ARC should clean up. If this arc buf
2936 * is not yet in the evicted state, it will be put there.
2937 */
2938 int
2939 arc_buf_evict(arc_buf_t *buf)
2940 {
2941 arc_buf_hdr_t *hdr;
2942 kmutex_t *hash_lock;
2943 arc_buf_t **bufp;
2944
2945 mutex_enter(&buf->b_evict_lock);
2946 hdr = buf->b_hdr;
2947 if (hdr == NULL) {
2948 /*
2949 * We are in arc_do_user_evicts().
2950 */
2951 ASSERT(buf->b_data == NULL);
2952 mutex_exit(&buf->b_evict_lock);
2953 return (0);
2954 } else if (buf->b_data == NULL) {
2955 arc_buf_t copy = *buf; /* structure assignment */
2956 /*
2957 * We are on the eviction list; process this buffer now
2958 * but let arc_do_user_evicts() do the reaping.
2959 */
2960 buf->b_efunc = NULL;
2961 mutex_exit(&buf->b_evict_lock);
2962 VERIFY(copy.b_efunc(&copy) == 0);
2963 return (1);
2964 }
2965 hash_lock = HDR_LOCK(hdr);
2966 mutex_enter(hash_lock);
2967 hdr = buf->b_hdr;
2968 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2969
2970 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2971 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2972
2973 /*
2974 * Pull this buffer off of the hdr
2975 */
2976 bufp = &hdr->b_buf;
2977 while (*bufp != buf)
2978 bufp = &(*bufp)->b_next;
2979 *bufp = buf->b_next;
2980
2981 ASSERT(buf->b_data != NULL);
2982 arc_buf_destroy(buf, FALSE, FALSE);
2983
2984 if (hdr->b_datacnt == 0) {
2985 arc_state_t *old_state = hdr->b_state;
2986 arc_state_t *evicted_state;
2987
2988 ASSERT(hdr->b_buf == NULL);
2989 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2990
2991 evicted_state =
2992 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2993
2994 mutex_enter(&old_state->arcs_mtx);
2995 mutex_enter(&evicted_state->arcs_mtx);
2996
2997 arc_change_state(evicted_state, hdr, hash_lock);
2998 ASSERT(HDR_IN_HASH_TABLE(hdr));
2999 hdr->b_flags |= ARC_IN_HASH_TABLE;
3000 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3001
3002 mutex_exit(&evicted_state->arcs_mtx);
3003 mutex_exit(&old_state->arcs_mtx);
3004 }
3005 mutex_exit(hash_lock);
3006 mutex_exit(&buf->b_evict_lock);
3007
3008 VERIFY(buf->b_efunc(buf) == 0);
3009 buf->b_efunc = NULL;
3010 buf->b_private = NULL;
3011 buf->b_hdr = NULL;
3012 buf->b_next = NULL;
3013 kmem_cache_free(buf_cache, buf);
3014 return (1);
3015 }
3016
3017 /*
3018 * Release this buffer from the cache. This must be done
3019 * after a read and prior to modifying the buffer contents.
3020 * If the buffer has more than one reference, we must make
3021 * a new hdr for the buffer.
3022 */
3023 void
3024 arc_release(arc_buf_t *buf, void *tag)
3025 {
3026 arc_buf_hdr_t *hdr;
3027 kmutex_t *hash_lock = NULL;
3028 l2arc_buf_hdr_t *l2hdr;
3029 uint64_t buf_size;
3030
3031 /*
3032 * It would be nice to assert that if it's DMU metadata (level >
3033 * 0 || it's the dnode file), then it must be syncing context.
3034 * But we don't know that information at this level.
3035 */
3036
3037 mutex_enter(&buf->b_evict_lock);
3038 hdr = buf->b_hdr;
3039
3040 /* this buffer is not on any list */
3041 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3042
3043 if (hdr->b_state == arc_anon) {
3044 /* this buffer is already released */
3045 ASSERT(buf->b_efunc == NULL);
3046 } else {
3047 hash_lock = HDR_LOCK(hdr);
3048 mutex_enter(hash_lock);
3049 hdr = buf->b_hdr;
3050 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3051 }
3052
3053 l2hdr = hdr->b_l2hdr;
3054 if (l2hdr) {
3055 mutex_enter(&l2arc_buflist_mtx);
3056 hdr->b_l2hdr = NULL;
3057 buf_size = hdr->b_size;
3058 }
3059
3060 /*
3061 * Do we have more than one buf?
3062 */
3063 if (hdr->b_datacnt > 1) {
3064 arc_buf_hdr_t *nhdr;
3065 arc_buf_t **bufp;
3066 uint64_t blksz = hdr->b_size;
3067 uint64_t spa = hdr->b_spa;
3068 arc_buf_contents_t type = hdr->b_type;
3069 uint32_t flags = hdr->b_flags;
3070
3071 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3072 /*
3073 * Pull the data off of this hdr and attach it to
3074 * a new anonymous hdr.
3075 */
3076 (void) remove_reference(hdr, hash_lock, tag);
3077 bufp = &hdr->b_buf;
3078 while (*bufp != buf)
3079 bufp = &(*bufp)->b_next;
3080 *bufp = buf->b_next;
3081 buf->b_next = NULL;
3082
3083 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3084 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3085 if (refcount_is_zero(&hdr->b_refcnt)) {
3086 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3087 ASSERT3U(*size, >=, hdr->b_size);
3088 atomic_add_64(size, -hdr->b_size);
3089 }
3090 hdr->b_datacnt -= 1;
3091 arc_cksum_verify(buf);
3092
3093 mutex_exit(hash_lock);
3094
3095 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3096 nhdr->b_size = blksz;
3097 nhdr->b_spa = spa;
3098 nhdr->b_type = type;
3099 nhdr->b_buf = buf;
3100 nhdr->b_state = arc_anon;
3101 nhdr->b_arc_access = 0;
3102 nhdr->b_flags = flags & ARC_L2_WRITING;
3103 nhdr->b_l2hdr = NULL;
3104 nhdr->b_datacnt = 1;
3105 nhdr->b_freeze_cksum = NULL;
3106 (void) refcount_add(&nhdr->b_refcnt, tag);
3107 buf->b_hdr = nhdr;
3108 mutex_exit(&buf->b_evict_lock);
3109 atomic_add_64(&arc_anon->arcs_size, blksz);
3110 } else {
3111 mutex_exit(&buf->b_evict_lock);
3112 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3113 ASSERT(!list_link_active(&hdr->b_arc_node));
3114 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3115 if (hdr->b_state != arc_anon)
3116 arc_change_state(arc_anon, hdr, hash_lock);
3117 hdr->b_arc_access = 0;
3118 if (hash_lock)
3119 mutex_exit(hash_lock);
3120
3121 buf_discard_identity(hdr);
3122 arc_buf_thaw(buf);
3123 }
3124 buf->b_efunc = NULL;
3125 buf->b_private = NULL;
3126
3127 if (l2hdr) {
3128 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3129 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3130 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3131 mutex_exit(&l2arc_buflist_mtx);
3132 }
3133 }
3134
3135 /*
3136 * Release this buffer. If it does not match the provided BP, fill it
3137 * with that block's contents.
3138 */
3139 /* ARGSUSED */
3140 int
3141 arc_release_bp(arc_buf_t *buf, void *tag, blkptr_t *bp, spa_t *spa,
3142 zbookmark_t *zb)
3143 {
3144 arc_release(buf, tag);
3145 return (0);
3146 }
3147
3148 int
3149 arc_released(arc_buf_t *buf)
3150 {
3151 int released;
3152
3153 mutex_enter(&buf->b_evict_lock);
3154 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3155 mutex_exit(&buf->b_evict_lock);
3156 return (released);
3157 }
3158
3159 int
3160 arc_has_callback(arc_buf_t *buf)
3161 {
3162 int callback;
3163
3164 mutex_enter(&buf->b_evict_lock);
3165 callback = (buf->b_efunc != NULL);
3166 mutex_exit(&buf->b_evict_lock);
3167 return (callback);
3168 }
3169
3170 #ifdef ZFS_DEBUG
3171 int
3172 arc_referenced(arc_buf_t *buf)
3173 {
3174 int referenced;
3175
3176 mutex_enter(&buf->b_evict_lock);
3177 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3178 mutex_exit(&buf->b_evict_lock);
3179 return (referenced);
3180 }
3181 #endif
3182
3183 static void
3184 arc_write_ready(zio_t *zio)
3185 {
3186 arc_write_callback_t *callback = zio->io_private;
3187 arc_buf_t *buf = callback->awcb_buf;
3188 arc_buf_hdr_t *hdr = buf->b_hdr;
3189
3190 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3191 callback->awcb_ready(zio, buf, callback->awcb_private);
3192
3193 /*
3194 * If the IO is already in progress, then this is a re-write
3195 * attempt, so we need to thaw and re-compute the cksum.
3196 * It is the responsibility of the callback to handle the
3197 * accounting for any re-write attempt.
3198 */
3199 if (HDR_IO_IN_PROGRESS(hdr)) {
3200 mutex_enter(&hdr->b_freeze_lock);
3201 if (hdr->b_freeze_cksum != NULL) {
3202 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3203 hdr->b_freeze_cksum = NULL;
3204 }
3205 mutex_exit(&hdr->b_freeze_lock);
3206 }
3207 arc_cksum_compute(buf, B_FALSE);
3208 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3209 }
3210
3211 static void
3212 arc_write_done(zio_t *zio)
3213 {
3214 arc_write_callback_t *callback = zio->io_private;
3215 arc_buf_t *buf = callback->awcb_buf;
3216 arc_buf_hdr_t *hdr = buf->b_hdr;
3217
3218 ASSERT(hdr->b_acb == NULL);
3219
3220 if (zio->io_error == 0) {
3221 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3222 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3223 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3224 } else {
3225 ASSERT(BUF_EMPTY(hdr));
3226 }
3227
3228 /*
3229 * If the block to be written was all-zero, we may have
3230 * compressed it away. In this case no write was performed
3231 * so there will be no dva/birth/checksum. The buffer must
3232 * therefore remain anonymous (and uncached).
3233 */
3234 if (!BUF_EMPTY(hdr)) {
3235 arc_buf_hdr_t *exists;
3236 kmutex_t *hash_lock;
3237
3238 ASSERT(zio->io_error == 0);
3239
3240 arc_cksum_verify(buf);
3241
3242 exists = buf_hash_insert(hdr, &hash_lock);
3243 if (exists) {
3244 /*
3245 * This can only happen if we overwrite for
3246 * sync-to-convergence, because we remove
3247 * buffers from the hash table when we arc_free().
3248 */
3249 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3250 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3251 panic("bad overwrite, hdr=%p exists=%p",
3252 (void *)hdr, (void *)exists);
3253 ASSERT(refcount_is_zero(&exists->b_refcnt));
3254 arc_change_state(arc_anon, exists, hash_lock);
3255 mutex_exit(hash_lock);
3256 arc_hdr_destroy(exists);
3257 exists = buf_hash_insert(hdr, &hash_lock);
3258 ASSERT3P(exists, ==, NULL);
3259 } else {
3260 /* Dedup */
3261 ASSERT(hdr->b_datacnt == 1);
3262 ASSERT(hdr->b_state == arc_anon);
3263 ASSERT(BP_GET_DEDUP(zio->io_bp));
3264 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3265 }
3266 }
3267 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3268 /* if it's not anon, we are doing a scrub */
3269 if (!exists && hdr->b_state == arc_anon)
3270 arc_access(hdr, hash_lock);
3271 mutex_exit(hash_lock);
3272 } else {
3273 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3274 }
3275
3276 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3277 callback->awcb_done(zio, buf, callback->awcb_private);
3278
3279 kmem_free(callback, sizeof (arc_write_callback_t));
3280 }
3281
3282 zio_t *
3283 arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3284 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3285 arc_done_func_t *ready, arc_done_func_t *done, void *private,
3286 int priority, int zio_flags, const zbookmark_t *zb)
3287 {
3288 arc_buf_hdr_t *hdr = buf->b_hdr;
3289 arc_write_callback_t *callback;
3290 zio_t *zio;
3291
3292 ASSERT(ready != NULL);
3293 ASSERT(done != NULL);
3294 ASSERT(!HDR_IO_ERROR(hdr));
3295 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3296 ASSERT(hdr->b_acb == NULL);
3297 if (l2arc)
3298 hdr->b_flags |= ARC_L2CACHE;
3299 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3300 callback->awcb_ready = ready;
3301 callback->awcb_done = done;
3302 callback->awcb_private = private;
3303 callback->awcb_buf = buf;
3304
3305 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3306 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3307
3308 return (zio);
3309 }
3310
3311 static int
3312 arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3313 {
3314 #ifdef _KERNEL
3315 uint64_t available_memory = ptob(freemem);
3316 static uint64_t page_load = 0;
3317 static uint64_t last_txg = 0;
3318
3319 #if defined(__i386)
3320 available_memory =
3321 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3322 #endif
3323 if (available_memory >= zfs_write_limit_max)
3324 return (0);
3325
3326 if (txg > last_txg) {
3327 last_txg = txg;
3328 page_load = 0;
3329 }
3330 /*
3331 * If we are in pageout, we know that memory is already tight,
3332 * the arc is already going to be evicting, so we just want to
3333 * continue to let page writes occur as quickly as possible.
3334 */
3335 if (curproc == proc_pageout) {
3336 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3337 return (ERESTART);
3338 /* Note: reserve is inflated, so we deflate */
3339 page_load += reserve / 8;
3340 return (0);
3341 } else if (page_load > 0 && arc_reclaim_needed()) {
3342 /* memory is low, delay before restarting */
3343 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3344 return (EAGAIN);
3345 }
3346 page_load = 0;
3347
3348 if (arc_size > arc_c_min) {
3349 uint64_t evictable_memory =
3350 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3351 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3352 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3353 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3354 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3355 }
3356
3357 if (inflight_data > available_memory / 4) {
3358 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3359 return (ERESTART);
3360 }
3361 #endif
3362 return (0);
3363 }
3364
3365 void
3366 arc_tempreserve_clear(uint64_t reserve)
3367 {
3368 atomic_add_64(&arc_tempreserve, -reserve);
3369 ASSERT((int64_t)arc_tempreserve >= 0);
3370 }
3371
3372 int
3373 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3374 {
3375 int error;
3376 uint64_t anon_size;
3377
3378 #ifdef ZFS_DEBUG
3379 /*
3380 * Once in a while, fail for no reason. Everything should cope.
3381 */
3382 if (spa_get_random(10000) == 0) {
3383 dprintf("forcing random failure\n");
3384 return (ERESTART);
3385 }
3386 #endif
3387 if (reserve > arc_c/4 && !arc_no_grow)
3388 arc_c = MIN(arc_c_max, reserve * 4);
3389 if (reserve > arc_c)
3390 return (ENOMEM);
3391
3392 /*
3393 * Don't count loaned bufs as in flight dirty data to prevent long
3394 * network delays from blocking transactions that are ready to be
3395 * assigned to a txg.
3396 */
3397 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3398
3399 /*
3400 * Writes will, almost always, require additional memory allocations
3401 * in order to compress/encrypt/etc the data. We therefor need to
3402 * make sure that there is sufficient available memory for this.
3403 */
3404 if (error = arc_memory_throttle(reserve, anon_size, txg))
3405 return (error);
3406
3407 /*
3408 * Throttle writes when the amount of dirty data in the cache
3409 * gets too large. We try to keep the cache less than half full
3410 * of dirty blocks so that our sync times don't grow too large.
3411 * Note: if two requests come in concurrently, we might let them
3412 * both succeed, when one of them should fail. Not a huge deal.
3413 */
3414
3415 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3416 anon_size > arc_c / 4) {
3417 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3418 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3419 arc_tempreserve>>10,
3420 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3421 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3422 reserve>>10, arc_c>>10);
3423 return (ERESTART);
3424 }
3425 atomic_add_64(&arc_tempreserve, reserve);
3426 return (0);
3427 }
3428
3429 void
3430 arc_init(void)
3431 {
3432 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3433 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3434
3435 /* Convert seconds to clock ticks */
3436 arc_min_prefetch_lifespan = 1 * hz;
3437
3438 /* Start out with 1/8 of all memory */
3439 arc_c = physmem * PAGESIZE / 8;
3440
3441 #ifdef _KERNEL
3442 /*
3443 * On architectures where the physical memory can be larger
3444 * than the addressable space (intel in 32-bit mode), we may
3445 * need to limit the cache to 1/8 of VM size.
3446 */
3447 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3448 #endif
3449
3450 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3451 arc_c_min = MAX(arc_c / 4, 64<<20);
3452 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3453 if (arc_c * 8 >= 1<<30)
3454 arc_c_max = (arc_c * 8) - (1<<30);
3455 else
3456 arc_c_max = arc_c_min;
3457 arc_c_max = MAX(arc_c * 6, arc_c_max);
3458
3459 /*
3460 * Allow the tunables to override our calculations if they are
3461 * reasonable (ie. over 64MB)
3462 */
3463 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3464 arc_c_max = zfs_arc_max;
3465 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3466 arc_c_min = zfs_arc_min;
3467
3468 arc_c = arc_c_max;
3469 arc_p = (arc_c >> 1);
3470
3471 /* limit meta-data to 1/4 of the arc capacity */
3472 arc_meta_limit = arc_c_max / 4;
3473
3474 /* Allow the tunable to override if it is reasonable */
3475 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3476 arc_meta_limit = zfs_arc_meta_limit;
3477
3478 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3479 arc_c_min = arc_meta_limit / 2;
3480
3481 if (zfs_arc_grow_retry > 0)
3482 arc_grow_retry = zfs_arc_grow_retry;
3483
3484 if (zfs_arc_shrink_shift > 0)
3485 arc_shrink_shift = zfs_arc_shrink_shift;
3486
3487 if (zfs_arc_p_min_shift > 0)
3488 arc_p_min_shift = zfs_arc_p_min_shift;
3489
3490 /* if kmem_flags are set, lets try to use less memory */
3491 if (kmem_debugging())
3492 arc_c = arc_c / 2;
3493 if (arc_c < arc_c_min)
3494 arc_c = arc_c_min;
3495
3496 arc_anon = &ARC_anon;
3497 arc_mru = &ARC_mru;
3498 arc_mru_ghost = &ARC_mru_ghost;
3499 arc_mfu = &ARC_mfu;
3500 arc_mfu_ghost = &ARC_mfu_ghost;
3501 arc_l2c_only = &ARC_l2c_only;
3502 arc_size = 0;
3503
3504 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3505 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3506 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3507 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3508 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3509 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3510
3511 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3512 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3513 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3514 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3515 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3516 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3517 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3518 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3519 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3520 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3521 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3522 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3523 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3524 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3525 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3526 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3527 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3528 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3529 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3530 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3531
3532 buf_init();
3533
3534 arc_thread_exit = 0;
3535 arc_eviction_list = NULL;
3536 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3537 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3538
3539 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3540 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3541
3542 if (arc_ksp != NULL) {
3543 arc_ksp->ks_data = &arc_stats;
3544 kstat_install(arc_ksp);
3545 }
3546
3547 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3548 TS_RUN, minclsyspri);
3549
3550 arc_dead = FALSE;
3551 arc_warm = B_FALSE;
3552
3553 if (zfs_write_limit_max == 0)
3554 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3555 else
3556 zfs_write_limit_shift = 0;
3557 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3558 }
3559
3560 void
3561 arc_fini(void)
3562 {
3563 mutex_enter(&arc_reclaim_thr_lock);
3564 arc_thread_exit = 1;
3565 while (arc_thread_exit != 0)
3566 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3567 mutex_exit(&arc_reclaim_thr_lock);
3568
3569 arc_flush(NULL);
3570
3571 arc_dead = TRUE;
3572
3573 if (arc_ksp != NULL) {
3574 kstat_delete(arc_ksp);
3575 arc_ksp = NULL;
3576 }
3577
3578 mutex_destroy(&arc_eviction_mtx);
3579 mutex_destroy(&arc_reclaim_thr_lock);
3580 cv_destroy(&arc_reclaim_thr_cv);
3581
3582 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3583 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3584 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3585 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3586 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3587 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3588 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3589 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3590
3591 mutex_destroy(&arc_anon->arcs_mtx);
3592 mutex_destroy(&arc_mru->arcs_mtx);
3593 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3594 mutex_destroy(&arc_mfu->arcs_mtx);
3595 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3596 mutex_destroy(&arc_l2c_only->arcs_mtx);
3597
3598 mutex_destroy(&zfs_write_limit_lock);
3599
3600 buf_fini();
3601
3602 ASSERT(arc_loaned_bytes == 0);
3603 }
3604
3605 /*
3606 * Level 2 ARC
3607 *
3608 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3609 * It uses dedicated storage devices to hold cached data, which are populated
3610 * using large infrequent writes. The main role of this cache is to boost
3611 * the performance of random read workloads. The intended L2ARC devices
3612 * include short-stroked disks, solid state disks, and other media with
3613 * substantially faster read latency than disk.
3614 *
3615 * +-----------------------+
3616 * | ARC |
3617 * +-----------------------+
3618 * | ^ ^
3619 * | | |
3620 * l2arc_feed_thread() arc_read()
3621 * | | |
3622 * | l2arc read |
3623 * V | |
3624 * +---------------+ |
3625 * | L2ARC | |
3626 * +---------------+ |
3627 * | ^ |
3628 * l2arc_write() | |
3629 * | | |
3630 * V | |
3631 * +-------+ +-------+
3632 * | vdev | | vdev |
3633 * | cache | | cache |
3634 * +-------+ +-------+
3635 * +=========+ .-----.
3636 * : L2ARC : |-_____-|
3637 * : devices : | Disks |
3638 * +=========+ `-_____-'
3639 *
3640 * Read requests are satisfied from the following sources, in order:
3641 *
3642 * 1) ARC
3643 * 2) vdev cache of L2ARC devices
3644 * 3) L2ARC devices
3645 * 4) vdev cache of disks
3646 * 5) disks
3647 *
3648 * Some L2ARC device types exhibit extremely slow write performance.
3649 * To accommodate for this there are some significant differences between
3650 * the L2ARC and traditional cache design:
3651 *
3652 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3653 * the ARC behave as usual, freeing buffers and placing headers on ghost
3654 * lists. The ARC does not send buffers to the L2ARC during eviction as
3655 * this would add inflated write latencies for all ARC memory pressure.
3656 *
3657 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3658 * It does this by periodically scanning buffers from the eviction-end of
3659 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3660 * not already there. It scans until a headroom of buffers is satisfied,
3661 * which itself is a buffer for ARC eviction. The thread that does this is
3662 * l2arc_feed_thread(), illustrated below; example sizes are included to
3663 * provide a better sense of ratio than this diagram:
3664 *
3665 * head --> tail
3666 * +---------------------+----------+
3667 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3668 * +---------------------+----------+ | o L2ARC eligible
3669 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3670 * +---------------------+----------+ |
3671 * 15.9 Gbytes ^ 32 Mbytes |
3672 * headroom |
3673 * l2arc_feed_thread()
3674 * |
3675 * l2arc write hand <--[oooo]--'
3676 * | 8 Mbyte
3677 * | write max
3678 * V
3679 * +==============================+
3680 * L2ARC dev |####|#|###|###| |####| ... |
3681 * +==============================+
3682 * 32 Gbytes
3683 *
3684 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3685 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3686 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3687 * safe to say that this is an uncommon case, since buffers at the end of
3688 * the ARC lists have moved there due to inactivity.
3689 *
3690 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3691 * then the L2ARC simply misses copying some buffers. This serves as a
3692 * pressure valve to prevent heavy read workloads from both stalling the ARC
3693 * with waits and clogging the L2ARC with writes. This also helps prevent
3694 * the potential for the L2ARC to churn if it attempts to cache content too
3695 * quickly, such as during backups of the entire pool.
3696 *
3697 * 5. After system boot and before the ARC has filled main memory, there are
3698 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3699 * lists can remain mostly static. Instead of searching from tail of these
3700 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3701 * for eligible buffers, greatly increasing its chance of finding them.
3702 *
3703 * The L2ARC device write speed is also boosted during this time so that
3704 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3705 * there are no L2ARC reads, and no fear of degrading read performance
3706 * through increased writes.
3707 *
3708 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3709 * the vdev queue can aggregate them into larger and fewer writes. Each
3710 * device is written to in a rotor fashion, sweeping writes through
3711 * available space then repeating.
3712 *
3713 * 7. The L2ARC does not store dirty content. It never needs to flush
3714 * write buffers back to disk based storage.
3715 *
3716 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3717 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3718 *
3719 * The performance of the L2ARC can be tweaked by a number of tunables, which
3720 * may be necessary for different workloads:
3721 *
3722 * l2arc_write_max max write bytes per interval
3723 * l2arc_write_boost extra write bytes during device warmup
3724 * l2arc_noprefetch skip caching prefetched buffers
3725 * l2arc_headroom number of max device writes to precache
3726 * l2arc_feed_secs seconds between L2ARC writing
3727 *
3728 * Tunables may be removed or added as future performance improvements are
3729 * integrated, and also may become zpool properties.
3730 *
3731 * There are three key functions that control how the L2ARC warms up:
3732 *
3733 * l2arc_write_eligible() check if a buffer is eligible to cache
3734 * l2arc_write_size() calculate how much to write
3735 * l2arc_write_interval() calculate sleep delay between writes
3736 *
3737 * These three functions determine what to write, how much, and how quickly
3738 * to send writes.
3739 */
3740
3741 static boolean_t
3742 l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
3743 {
3744 /*
3745 * A buffer is *not* eligible for the L2ARC if it:
3746 * 1. belongs to a different spa.
3747 * 2. is already cached on the L2ARC.
3748 * 3. has an I/O in progress (it may be an incomplete read).
3749 * 4. is flagged not eligible (zfs property).
3750 */
3751 if (ab->b_spa != spa_guid || ab->b_l2hdr != NULL ||
3752 HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab))
3753 return (B_FALSE);
3754
3755 return (B_TRUE);
3756 }
3757
3758 static uint64_t
3759 l2arc_write_size(l2arc_dev_t *dev)
3760 {
3761 uint64_t size;
3762
3763 size = dev->l2ad_write;
3764
3765 if (arc_warm == B_FALSE)
3766 size += dev->l2ad_boost;
3767
3768 return (size);
3769
3770 }
3771
3772 static clock_t
3773 l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
3774 {
3775 clock_t interval, next, now;
3776
3777 /*
3778 * If the ARC lists are busy, increase our write rate; if the
3779 * lists are stale, idle back. This is achieved by checking
3780 * how much we previously wrote - if it was more than half of
3781 * what we wanted, schedule the next write much sooner.
3782 */
3783 if (l2arc_feed_again && wrote > (wanted / 2))
3784 interval = (hz * l2arc_feed_min_ms) / 1000;
3785 else
3786 interval = hz * l2arc_feed_secs;
3787
3788 now = ddi_get_lbolt();
3789 next = MAX(now, MIN(now + interval, began + interval));
3790
3791 return (next);
3792 }
3793
3794 static void
3795 l2arc_hdr_stat_add(void)
3796 {
3797 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3798 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3799 }
3800
3801 static void
3802 l2arc_hdr_stat_remove(void)
3803 {
3804 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3805 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3806 }
3807
3808 /*
3809 * Cycle through L2ARC devices. This is how L2ARC load balances.
3810 * If a device is returned, this also returns holding the spa config lock.
3811 */
3812 static l2arc_dev_t *
3813 l2arc_dev_get_next(void)
3814 {
3815 l2arc_dev_t *first, *next = NULL;
3816
3817 /*
3818 * Lock out the removal of spas (spa_namespace_lock), then removal
3819 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3820 * both locks will be dropped and a spa config lock held instead.
3821 */
3822 mutex_enter(&spa_namespace_lock);
3823 mutex_enter(&l2arc_dev_mtx);
3824
3825 /* if there are no vdevs, there is nothing to do */
3826 if (l2arc_ndev == 0)
3827 goto out;
3828
3829 first = NULL;
3830 next = l2arc_dev_last;
3831 do {
3832 /* loop around the list looking for a non-faulted vdev */
3833 if (next == NULL) {
3834 next = list_head(l2arc_dev_list);
3835 } else {
3836 next = list_next(l2arc_dev_list, next);
3837 if (next == NULL)
3838 next = list_head(l2arc_dev_list);
3839 }
3840
3841 /* if we have come back to the start, bail out */
3842 if (first == NULL)
3843 first = next;
3844 else if (next == first)
3845 break;
3846
3847 } while (vdev_is_dead(next->l2ad_vdev));
3848
3849 /* if we were unable to find any usable vdevs, return NULL */
3850 if (vdev_is_dead(next->l2ad_vdev))
3851 next = NULL;
3852
3853 l2arc_dev_last = next;
3854
3855 out:
3856 mutex_exit(&l2arc_dev_mtx);
3857
3858 /*
3859 * Grab the config lock to prevent the 'next' device from being
3860 * removed while we are writing to it.
3861 */
3862 if (next != NULL)
3863 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3864 mutex_exit(&spa_namespace_lock);
3865
3866 return (next);
3867 }
3868
3869 /*
3870 * Free buffers that were tagged for destruction.
3871 */
3872 static void
3873 l2arc_do_free_on_write()
3874 {
3875 list_t *buflist;
3876 l2arc_data_free_t *df, *df_prev;
3877
3878 mutex_enter(&l2arc_free_on_write_mtx);
3879 buflist = l2arc_free_on_write;
3880
3881 for (df = list_tail(buflist); df; df = df_prev) {
3882 df_prev = list_prev(buflist, df);
3883 ASSERT(df->l2df_data != NULL);
3884 ASSERT(df->l2df_func != NULL);
3885 df->l2df_func(df->l2df_data, df->l2df_size);
3886 list_remove(buflist, df);
3887 kmem_free(df, sizeof (l2arc_data_free_t));
3888 }
3889
3890 mutex_exit(&l2arc_free_on_write_mtx);
3891 }
3892
3893 /*
3894 * A write to a cache device has completed. Update all headers to allow
3895 * reads from these buffers to begin.
3896 */
3897 static void
3898 l2arc_write_done(zio_t *zio)
3899 {
3900 l2arc_write_callback_t *cb;
3901 l2arc_dev_t *dev;
3902 list_t *buflist;
3903 arc_buf_hdr_t *head, *ab, *ab_prev;
3904 l2arc_buf_hdr_t *abl2;
3905 kmutex_t *hash_lock;
3906
3907 cb = zio->io_private;
3908 ASSERT(cb != NULL);
3909 dev = cb->l2wcb_dev;
3910 ASSERT(dev != NULL);
3911 head = cb->l2wcb_head;
3912 ASSERT(head != NULL);
3913 buflist = dev->l2ad_buflist;
3914 ASSERT(buflist != NULL);
3915 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3916 l2arc_write_callback_t *, cb);
3917
3918 if (zio->io_error != 0)
3919 ARCSTAT_BUMP(arcstat_l2_writes_error);
3920
3921 mutex_enter(&l2arc_buflist_mtx);
3922
3923 /*
3924 * All writes completed, or an error was hit.
3925 */
3926 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3927 ab_prev = list_prev(buflist, ab);
3928
3929 hash_lock = HDR_LOCK(ab);
3930 if (!mutex_tryenter(hash_lock)) {
3931 /*
3932 * This buffer misses out. It may be in a stage
3933 * of eviction. Its ARC_L2_WRITING flag will be
3934 * left set, denying reads to this buffer.
3935 */
3936 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3937 continue;
3938 }
3939
3940 if (zio->io_error != 0) {
3941 /*
3942 * Error - drop L2ARC entry.
3943 */
3944 list_remove(buflist, ab);
3945 abl2 = ab->b_l2hdr;
3946 ab->b_l2hdr = NULL;
3947 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3948 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3949 }
3950
3951 /*
3952 * Allow ARC to begin reads to this L2ARC entry.
3953 */
3954 ab->b_flags &= ~ARC_L2_WRITING;
3955
3956 mutex_exit(hash_lock);
3957 }
3958
3959 atomic_inc_64(&l2arc_writes_done);
3960 list_remove(buflist, head);
3961 kmem_cache_free(hdr_cache, head);
3962 mutex_exit(&l2arc_buflist_mtx);
3963
3964 l2arc_do_free_on_write();
3965
3966 kmem_free(cb, sizeof (l2arc_write_callback_t));
3967 }
3968
3969 /*
3970 * A read to a cache device completed. Validate buffer contents before
3971 * handing over to the regular ARC routines.
3972 */
3973 static void
3974 l2arc_read_done(zio_t *zio)
3975 {
3976 l2arc_read_callback_t *cb;
3977 arc_buf_hdr_t *hdr;
3978 arc_buf_t *buf;
3979 kmutex_t *hash_lock;
3980 int equal;
3981
3982 ASSERT(zio->io_vd != NULL);
3983 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
3984
3985 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
3986
3987 cb = zio->io_private;
3988 ASSERT(cb != NULL);
3989 buf = cb->l2rcb_buf;
3990 ASSERT(buf != NULL);
3991
3992 hash_lock = HDR_LOCK(buf->b_hdr);
3993 mutex_enter(hash_lock);
3994 hdr = buf->b_hdr;
3995 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3996
3997 /*
3998 * Check this survived the L2ARC journey.
3999 */
4000 equal = arc_cksum_equal(buf);
4001 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4002 mutex_exit(hash_lock);
4003 zio->io_private = buf;
4004 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4005 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4006 arc_read_done(zio);
4007 } else {
4008 mutex_exit(hash_lock);
4009 /*
4010 * Buffer didn't survive caching. Increment stats and
4011 * reissue to the original storage device.
4012 */
4013 if (zio->io_error != 0) {
4014 ARCSTAT_BUMP(arcstat_l2_io_error);
4015 } else {
4016 zio->io_error = EIO;
4017 }
4018 if (!equal)
4019 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4020
4021 /*
4022 * If there's no waiter, issue an async i/o to the primary
4023 * storage now. If there *is* a waiter, the caller must
4024 * issue the i/o in a context where it's OK to block.
4025 */
4026 if (zio->io_waiter == NULL) {
4027 zio_t *pio = zio_unique_parent(zio);
4028
4029 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4030
4031 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4032 buf->b_data, zio->io_size, arc_read_done, buf,
4033 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4034 }
4035 }
4036
4037 kmem_free(cb, sizeof (l2arc_read_callback_t));
4038 }
4039
4040 /*
4041 * This is the list priority from which the L2ARC will search for pages to
4042 * cache. This is used within loops (0..3) to cycle through lists in the
4043 * desired order. This order can have a significant effect on cache
4044 * performance.
4045 *
4046 * Currently the metadata lists are hit first, MFU then MRU, followed by
4047 * the data lists. This function returns a locked list, and also returns
4048 * the lock pointer.
4049 */
4050 static list_t *
4051 l2arc_list_locked(int list_num, kmutex_t **lock)
4052 {
4053 list_t *list;
4054
4055 ASSERT(list_num >= 0 && list_num <= 3);
4056
4057 switch (list_num) {
4058 case 0:
4059 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
4060 *lock = &arc_mfu->arcs_mtx;
4061 break;
4062 case 1:
4063 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
4064 *lock = &arc_mru->arcs_mtx;
4065 break;
4066 case 2:
4067 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
4068 *lock = &arc_mfu->arcs_mtx;
4069 break;
4070 case 3:
4071 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
4072 *lock = &arc_mru->arcs_mtx;
4073 break;
4074 }
4075
4076 ASSERT(!(MUTEX_HELD(*lock)));
4077 mutex_enter(*lock);
4078 return (list);
4079 }
4080
4081 /*
4082 * Evict buffers from the device write hand to the distance specified in
4083 * bytes. This distance may span populated buffers, it may span nothing.
4084 * This is clearing a region on the L2ARC device ready for writing.
4085 * If the 'all' boolean is set, every buffer is evicted.
4086 */
4087 static void
4088 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4089 {
4090 list_t *buflist;
4091 l2arc_buf_hdr_t *abl2;
4092 arc_buf_hdr_t *ab, *ab_prev;
4093 kmutex_t *hash_lock;
4094 uint64_t taddr;
4095
4096 buflist = dev->l2ad_buflist;
4097
4098 if (buflist == NULL)
4099 return;
4100
4101 if (!all && dev->l2ad_first) {
4102 /*
4103 * This is the first sweep through the device. There is
4104 * nothing to evict.
4105 */
4106 return;
4107 }
4108
4109 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4110 /*
4111 * When nearing the end of the device, evict to the end
4112 * before the device write hand jumps to the start.
4113 */
4114 taddr = dev->l2ad_end;
4115 } else {
4116 taddr = dev->l2ad_hand + distance;
4117 }
4118 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4119 uint64_t, taddr, boolean_t, all);
4120
4121 top:
4122 mutex_enter(&l2arc_buflist_mtx);
4123 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4124 ab_prev = list_prev(buflist, ab);
4125
4126 hash_lock = HDR_LOCK(ab);
4127 if (!mutex_tryenter(hash_lock)) {
4128 /*
4129 * Missed the hash lock. Retry.
4130 */
4131 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4132 mutex_exit(&l2arc_buflist_mtx);
4133 mutex_enter(hash_lock);
4134 mutex_exit(hash_lock);
4135 goto top;
4136 }
4137
4138 if (HDR_L2_WRITE_HEAD(ab)) {
4139 /*
4140 * We hit a write head node. Leave it for
4141 * l2arc_write_done().
4142 */
4143 list_remove(buflist, ab);
4144 mutex_exit(hash_lock);
4145 continue;
4146 }
4147
4148 if (!all && ab->b_l2hdr != NULL &&
4149 (ab->b_l2hdr->b_daddr > taddr ||
4150 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4151 /*
4152 * We've evicted to the target address,
4153 * or the end of the device.
4154 */
4155 mutex_exit(hash_lock);
4156 break;
4157 }
4158
4159 if (HDR_FREE_IN_PROGRESS(ab)) {
4160 /*
4161 * Already on the path to destruction.
4162 */
4163 mutex_exit(hash_lock);
4164 continue;
4165 }
4166
4167 if (ab->b_state == arc_l2c_only) {
4168 ASSERT(!HDR_L2_READING(ab));
4169 /*
4170 * This doesn't exist in the ARC. Destroy.
4171 * arc_hdr_destroy() will call list_remove()
4172 * and decrement arcstat_l2_size.
4173 */
4174 arc_change_state(arc_anon, ab, hash_lock);
4175 arc_hdr_destroy(ab);
4176 } else {
4177 /*
4178 * Invalidate issued or about to be issued
4179 * reads, since we may be about to write
4180 * over this location.
4181 */
4182 if (HDR_L2_READING(ab)) {
4183 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4184 ab->b_flags |= ARC_L2_EVICTED;
4185 }
4186
4187 /*
4188 * Tell ARC this no longer exists in L2ARC.
4189 */
4190 if (ab->b_l2hdr != NULL) {
4191 abl2 = ab->b_l2hdr;
4192 ab->b_l2hdr = NULL;
4193 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4194 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4195 }
4196 list_remove(buflist, ab);
4197
4198 /*
4199 * This may have been leftover after a
4200 * failed write.
4201 */
4202 ab->b_flags &= ~ARC_L2_WRITING;
4203 }
4204 mutex_exit(hash_lock);
4205 }
4206 mutex_exit(&l2arc_buflist_mtx);
4207
4208 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4209 dev->l2ad_evict = taddr;
4210 }
4211
4212 /*
4213 * Find and write ARC buffers to the L2ARC device.
4214 *
4215 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4216 * for reading until they have completed writing.
4217 */
4218 static uint64_t
4219 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4220 {
4221 arc_buf_hdr_t *ab, *ab_prev, *head;
4222 l2arc_buf_hdr_t *hdrl2;
4223 list_t *list;
4224 uint64_t passed_sz, write_sz, buf_sz, headroom;
4225 void *buf_data;
4226 kmutex_t *hash_lock, *list_lock;
4227 boolean_t have_lock, full;
4228 l2arc_write_callback_t *cb;
4229 zio_t *pio, *wzio;
4230 uint64_t guid = spa_guid(spa);
4231
4232 ASSERT(dev->l2ad_vdev != NULL);
4233
4234 pio = NULL;
4235 write_sz = 0;
4236 full = B_FALSE;
4237 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4238 head->b_flags |= ARC_L2_WRITE_HEAD;
4239
4240 /*
4241 * Copy buffers for L2ARC writing.
4242 */
4243 mutex_enter(&l2arc_buflist_mtx);
4244 for (int try = 0; try <= 3; try++) {
4245 list = l2arc_list_locked(try, &list_lock);
4246 passed_sz = 0;
4247
4248 /*
4249 * L2ARC fast warmup.
4250 *
4251 * Until the ARC is warm and starts to evict, read from the
4252 * head of the ARC lists rather than the tail.
4253 */
4254 headroom = target_sz * l2arc_headroom;
4255 if (arc_warm == B_FALSE)
4256 ab = list_head(list);
4257 else
4258 ab = list_tail(list);
4259
4260 for (; ab; ab = ab_prev) {
4261 if (arc_warm == B_FALSE)
4262 ab_prev = list_next(list, ab);
4263 else
4264 ab_prev = list_prev(list, ab);
4265
4266 hash_lock = HDR_LOCK(ab);
4267 have_lock = MUTEX_HELD(hash_lock);
4268 if (!have_lock && !mutex_tryenter(hash_lock)) {
4269 /*
4270 * Skip this buffer rather than waiting.
4271 */
4272 continue;
4273 }
4274
4275 passed_sz += ab->b_size;
4276 if (passed_sz > headroom) {
4277 /*
4278 * Searched too far.
4279 */
4280 mutex_exit(hash_lock);
4281 break;
4282 }
4283
4284 if (!l2arc_write_eligible(guid, ab)) {
4285 mutex_exit(hash_lock);
4286 continue;
4287 }
4288
4289 if ((write_sz + ab->b_size) > target_sz) {
4290 full = B_TRUE;
4291 mutex_exit(hash_lock);
4292 break;
4293 }
4294
4295 if (pio == NULL) {
4296 /*
4297 * Insert a dummy header on the buflist so
4298 * l2arc_write_done() can find where the
4299 * write buffers begin without searching.
4300 */
4301 list_insert_head(dev->l2ad_buflist, head);
4302
4303 cb = kmem_alloc(
4304 sizeof (l2arc_write_callback_t), KM_SLEEP);
4305 cb->l2wcb_dev = dev;
4306 cb->l2wcb_head = head;
4307 pio = zio_root(spa, l2arc_write_done, cb,
4308 ZIO_FLAG_CANFAIL);
4309 }
4310
4311 /*
4312 * Create and add a new L2ARC header.
4313 */
4314 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4315 hdrl2->b_dev = dev;
4316 hdrl2->b_daddr = dev->l2ad_hand;
4317
4318 ab->b_flags |= ARC_L2_WRITING;
4319 ab->b_l2hdr = hdrl2;
4320 list_insert_head(dev->l2ad_buflist, ab);
4321 buf_data = ab->b_buf->b_data;
4322 buf_sz = ab->b_size;
4323
4324 /*
4325 * Compute and store the buffer cksum before
4326 * writing. On debug the cksum is verified first.
4327 */
4328 arc_cksum_verify(ab->b_buf);
4329 arc_cksum_compute(ab->b_buf, B_TRUE);
4330
4331 mutex_exit(hash_lock);
4332
4333 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4334 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4335 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4336 ZIO_FLAG_CANFAIL, B_FALSE);
4337
4338 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4339 zio_t *, wzio);
4340 (void) zio_nowait(wzio);
4341
4342 /*
4343 * Keep the clock hand suitably device-aligned.
4344 */
4345 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4346
4347 write_sz += buf_sz;
4348 dev->l2ad_hand += buf_sz;
4349 }
4350
4351 mutex_exit(list_lock);
4352
4353 if (full == B_TRUE)
4354 break;
4355 }
4356 mutex_exit(&l2arc_buflist_mtx);
4357
4358 if (pio == NULL) {
4359 ASSERT3U(write_sz, ==, 0);
4360 kmem_cache_free(hdr_cache, head);
4361 return (0);
4362 }
4363
4364 ASSERT3U(write_sz, <=, target_sz);
4365 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4366 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
4367 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4368 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
4369
4370 /*
4371 * Bump device hand to the device start if it is approaching the end.
4372 * l2arc_evict() will already have evicted ahead for this case.
4373 */
4374 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4375 vdev_space_update(dev->l2ad_vdev,
4376 dev->l2ad_end - dev->l2ad_hand, 0, 0);
4377 dev->l2ad_hand = dev->l2ad_start;
4378 dev->l2ad_evict = dev->l2ad_start;
4379 dev->l2ad_first = B_FALSE;
4380 }
4381
4382 dev->l2ad_writing = B_TRUE;
4383 (void) zio_wait(pio);
4384 dev->l2ad_writing = B_FALSE;
4385
4386 return (write_sz);
4387 }
4388
4389 /*
4390 * This thread feeds the L2ARC at regular intervals. This is the beating
4391 * heart of the L2ARC.
4392 */
4393 static void
4394 l2arc_feed_thread(void)
4395 {
4396 callb_cpr_t cpr;
4397 l2arc_dev_t *dev;
4398 spa_t *spa;
4399 uint64_t size, wrote;
4400 clock_t begin, next = ddi_get_lbolt();
4401
4402 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4403
4404 mutex_enter(&l2arc_feed_thr_lock);
4405
4406 while (l2arc_thread_exit == 0) {
4407 CALLB_CPR_SAFE_BEGIN(&cpr);
4408 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4409 next);
4410 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4411 next = ddi_get_lbolt() + hz;
4412
4413 /*
4414 * Quick check for L2ARC devices.
4415 */
4416 mutex_enter(&l2arc_dev_mtx);
4417 if (l2arc_ndev == 0) {
4418 mutex_exit(&l2arc_dev_mtx);
4419 continue;
4420 }
4421 mutex_exit(&l2arc_dev_mtx);
4422 begin = ddi_get_lbolt();
4423
4424 /*
4425 * This selects the next l2arc device to write to, and in
4426 * doing so the next spa to feed from: dev->l2ad_spa. This
4427 * will return NULL if there are now no l2arc devices or if
4428 * they are all faulted.
4429 *
4430 * If a device is returned, its spa's config lock is also
4431 * held to prevent device removal. l2arc_dev_get_next()
4432 * will grab and release l2arc_dev_mtx.
4433 */
4434 if ((dev = l2arc_dev_get_next()) == NULL)
4435 continue;
4436
4437 spa = dev->l2ad_spa;
4438 ASSERT(spa != NULL);
4439
4440 /*
4441 * Avoid contributing to memory pressure.
4442 */
4443 if (arc_reclaim_needed()) {
4444 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4445 spa_config_exit(spa, SCL_L2ARC, dev);
4446 continue;
4447 }
4448
4449 ARCSTAT_BUMP(arcstat_l2_feeds);
4450
4451 size = l2arc_write_size(dev);
4452
4453 /*
4454 * Evict L2ARC buffers that will be overwritten.
4455 */
4456 l2arc_evict(dev, size, B_FALSE);
4457
4458 /*
4459 * Write ARC buffers.
4460 */
4461 wrote = l2arc_write_buffers(spa, dev, size);
4462
4463 /*
4464 * Calculate interval between writes.
4465 */
4466 next = l2arc_write_interval(begin, size, wrote);
4467 spa_config_exit(spa, SCL_L2ARC, dev);
4468 }
4469
4470 l2arc_thread_exit = 0;
4471 cv_broadcast(&l2arc_feed_thr_cv);
4472 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
4473 thread_exit();
4474 }
4475
4476 boolean_t
4477 l2arc_vdev_present(vdev_t *vd)
4478 {
4479 l2arc_dev_t *dev;
4480
4481 mutex_enter(&l2arc_dev_mtx);
4482 for (dev = list_head(l2arc_dev_list); dev != NULL;
4483 dev = list_next(l2arc_dev_list, dev)) {
4484 if (dev->l2ad_vdev == vd)
4485 break;
4486 }
4487 mutex_exit(&l2arc_dev_mtx);
4488
4489 return (dev != NULL);
4490 }
4491
4492 /*
4493 * Add a vdev for use by the L2ARC. By this point the spa has already
4494 * validated the vdev and opened it.
4495 */
4496 void
4497 l2arc_add_vdev(spa_t *spa, vdev_t *vd)
4498 {
4499 l2arc_dev_t *adddev;
4500
4501 ASSERT(!l2arc_vdev_present(vd));
4502
4503 /*
4504 * Create a new l2arc device entry.
4505 */
4506 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4507 adddev->l2ad_spa = spa;
4508 adddev->l2ad_vdev = vd;
4509 adddev->l2ad_write = l2arc_write_max;
4510 adddev->l2ad_boost = l2arc_write_boost;
4511 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
4512 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
4513 adddev->l2ad_hand = adddev->l2ad_start;
4514 adddev->l2ad_evict = adddev->l2ad_start;
4515 adddev->l2ad_first = B_TRUE;
4516 adddev->l2ad_writing = B_FALSE;
4517 ASSERT3U(adddev->l2ad_write, >, 0);
4518
4519 /*
4520 * This is a list of all ARC buffers that are still valid on the
4521 * device.
4522 */
4523 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4524 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4525 offsetof(arc_buf_hdr_t, b_l2node));
4526
4527 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
4528
4529 /*
4530 * Add device to global list
4531 */
4532 mutex_enter(&l2arc_dev_mtx);
4533 list_insert_head(l2arc_dev_list, adddev);
4534 atomic_inc_64(&l2arc_ndev);
4535 mutex_exit(&l2arc_dev_mtx);
4536 }
4537
4538 /*
4539 * Remove a vdev from the L2ARC.
4540 */
4541 void
4542 l2arc_remove_vdev(vdev_t *vd)
4543 {
4544 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4545
4546 /*
4547 * Find the device by vdev
4548 */
4549 mutex_enter(&l2arc_dev_mtx);
4550 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4551 nextdev = list_next(l2arc_dev_list, dev);
4552 if (vd == dev->l2ad_vdev) {
4553 remdev = dev;
4554 break;
4555 }
4556 }
4557 ASSERT(remdev != NULL);
4558
4559 /*
4560 * Remove device from global list
4561 */
4562 list_remove(l2arc_dev_list, remdev);
4563 l2arc_dev_last = NULL; /* may have been invalidated */
4564 atomic_dec_64(&l2arc_ndev);
4565 mutex_exit(&l2arc_dev_mtx);
4566
4567 /*
4568 * Clear all buflists and ARC references. L2ARC device flush.
4569 */
4570 l2arc_evict(remdev, 0, B_TRUE);
4571 list_destroy(remdev->l2ad_buflist);
4572 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4573 kmem_free(remdev, sizeof (l2arc_dev_t));
4574 }
4575
4576 void
4577 l2arc_init(void)
4578 {
4579 l2arc_thread_exit = 0;
4580 l2arc_ndev = 0;
4581 l2arc_writes_sent = 0;
4582 l2arc_writes_done = 0;
4583
4584 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4585 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4586 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4587 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4588 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4589
4590 l2arc_dev_list = &L2ARC_dev_list;
4591 l2arc_free_on_write = &L2ARC_free_on_write;
4592 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4593 offsetof(l2arc_dev_t, l2ad_node));
4594 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4595 offsetof(l2arc_data_free_t, l2df_list_node));
4596 }
4597
4598 void
4599 l2arc_fini(void)
4600 {
4601 /*
4602 * This is called from dmu_fini(), which is called from spa_fini();
4603 * Because of this, we can assume that all l2arc devices have
4604 * already been removed when the pools themselves were removed.
4605 */
4606
4607 l2arc_do_free_on_write();
4608
4609 mutex_destroy(&l2arc_feed_thr_lock);
4610 cv_destroy(&l2arc_feed_thr_cv);
4611 mutex_destroy(&l2arc_dev_mtx);
4612 mutex_destroy(&l2arc_buflist_mtx);
4613 mutex_destroy(&l2arc_free_on_write_mtx);
4614
4615 list_destroy(l2arc_dev_list);
4616 list_destroy(l2arc_free_on_write);
4617 }
4618
4619 void
4620 l2arc_start(void)
4621 {
4622 if (!(spa_mode_global & FWRITE))
4623 return;
4624
4625 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4626 TS_RUN, minclsyspri);
4627 }
4628
4629 void
4630 l2arc_stop(void)
4631 {
4632 if (!(spa_mode_global & FWRITE))
4633 return;
4634
4635 mutex_enter(&l2arc_feed_thr_lock);
4636 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
4637 l2arc_thread_exit = 1;
4638 while (l2arc_thread_exit != 0)
4639 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4640 mutex_exit(&l2arc_feed_thr_lock);
4641 }