]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/arc.c
Rebase master to b105
[mirror_zfs.git] / module / zfs / arc.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
24 */
25
26 /*
27 * DVA-based Adjustable Replacement Cache
28 *
29 * While much of the theory of operation used here is
30 * based on the self-tuning, low overhead replacement cache
31 * presented by Megiddo and Modha at FAST 2003, there are some
32 * significant differences:
33 *
34 * 1. The Megiddo and Modha model assumes any page is evictable.
35 * Pages in its cache cannot be "locked" into memory. This makes
36 * the eviction algorithm simple: evict the last page in the list.
37 * This also make the performance characteristics easy to reason
38 * about. Our cache is not so simple. At any given moment, some
39 * subset of the blocks in the cache are un-evictable because we
40 * have handed out a reference to them. Blocks are only evictable
41 * when there are no external references active. This makes
42 * eviction far more problematic: we choose to evict the evictable
43 * blocks that are the "lowest" in the list.
44 *
45 * There are times when it is not possible to evict the requested
46 * space. In these circumstances we are unable to adjust the cache
47 * size. To prevent the cache growing unbounded at these times we
48 * implement a "cache throttle" that slows the flow of new data
49 * into the cache until we can make space available.
50 *
51 * 2. The Megiddo and Modha model assumes a fixed cache size.
52 * Pages are evicted when the cache is full and there is a cache
53 * miss. Our model has a variable sized cache. It grows with
54 * high use, but also tries to react to memory pressure from the
55 * operating system: decreasing its size when system memory is
56 * tight.
57 *
58 * 3. The Megiddo and Modha model assumes a fixed page size. All
59 * elements of the cache are therefor exactly the same size. So
60 * when adjusting the cache size following a cache miss, its simply
61 * a matter of choosing a single page to evict. In our model, we
62 * have variable sized cache blocks (rangeing from 512 bytes to
63 * 128K bytes). We therefor choose a set of blocks to evict to make
64 * space for a cache miss that approximates as closely as possible
65 * the space used by the new block.
66 *
67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
68 * by N. Megiddo & D. Modha, FAST 2003
69 */
70
71 /*
72 * The locking model:
73 *
74 * A new reference to a cache buffer can be obtained in two
75 * ways: 1) via a hash table lookup using the DVA as a key,
76 * or 2) via one of the ARC lists. The arc_read() interface
77 * uses method 1, while the internal arc algorithms for
78 * adjusting the cache use method 2. We therefor provide two
79 * types of locks: 1) the hash table lock array, and 2) the
80 * arc list locks.
81 *
82 * Buffers do not have their own mutexs, rather they rely on the
83 * hash table mutexs for the bulk of their protection (i.e. most
84 * fields in the arc_buf_hdr_t are protected by these mutexs).
85 *
86 * buf_hash_find() returns the appropriate mutex (held) when it
87 * locates the requested buffer in the hash table. It returns
88 * NULL for the mutex if the buffer was not in the table.
89 *
90 * buf_hash_remove() expects the appropriate hash mutex to be
91 * already held before it is invoked.
92 *
93 * Each arc state also has a mutex which is used to protect the
94 * buffer list associated with the state. When attempting to
95 * obtain a hash table lock while holding an arc list lock you
96 * must use: mutex_tryenter() to avoid deadlock. Also note that
97 * the active state mutex must be held before the ghost state mutex.
98 *
99 * Arc buffers may have an associated eviction callback function.
100 * This function will be invoked prior to removing the buffer (e.g.
101 * in arc_do_user_evicts()). Note however that the data associated
102 * with the buffer may be evicted prior to the callback. The callback
103 * must be made with *no locks held* (to prevent deadlock). Additionally,
104 * the users of callbacks must ensure that their private data is
105 * protected from simultaneous callbacks from arc_buf_evict()
106 * and arc_do_user_evicts().
107 *
108 * Note that the majority of the performance stats are manipulated
109 * with atomic operations.
110 *
111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
112 *
113 * - L2ARC buflist creation
114 * - L2ARC buflist eviction
115 * - L2ARC write completion, which walks L2ARC buflists
116 * - ARC header destruction, as it removes from L2ARC buflists
117 * - ARC header release, as it removes from L2ARC buflists
118 */
119
120 #include <sys/spa.h>
121 #include <sys/zio.h>
122 #include <sys/zio_checksum.h>
123 #include <sys/zfs_context.h>
124 #include <sys/arc.h>
125 #include <sys/refcount.h>
126 #include <sys/vdev.h>
127 #ifdef _KERNEL
128 #include <sys/vmsystm.h>
129 #include <vm/anon.h>
130 #include <sys/fs/swapnode.h>
131 #include <sys/dnlc.h>
132 #endif
133 #include <sys/callb.h>
134 #include <sys/kstat.h>
135
136 static kmutex_t arc_reclaim_thr_lock;
137 static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
138 static uint8_t arc_thread_exit;
139
140 extern int zfs_write_limit_shift;
141 extern uint64_t zfs_write_limit_max;
142 extern kmutex_t zfs_write_limit_lock;
143
144 #define ARC_REDUCE_DNLC_PERCENT 3
145 uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
146
147 typedef enum arc_reclaim_strategy {
148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
150 } arc_reclaim_strategy_t;
151
152 /* number of seconds before growing cache again */
153 static int arc_grow_retry = 60;
154
155 /*
156 * minimum lifespan of a prefetch block in clock ticks
157 * (initialized in arc_init())
158 */
159 static int arc_min_prefetch_lifespan;
160
161 static int arc_dead;
162
163 /*
164 * The arc has filled available memory and has now warmed up.
165 */
166 static boolean_t arc_warm;
167
168 /*
169 * These tunables are for performance analysis.
170 */
171 uint64_t zfs_arc_max;
172 uint64_t zfs_arc_min;
173 uint64_t zfs_arc_meta_limit = 0;
174 int zfs_mdcomp_disable = 0;
175
176 /*
177 * Note that buffers can be in one of 6 states:
178 * ARC_anon - anonymous (discussed below)
179 * ARC_mru - recently used, currently cached
180 * ARC_mru_ghost - recentely used, no longer in cache
181 * ARC_mfu - frequently used, currently cached
182 * ARC_mfu_ghost - frequently used, no longer in cache
183 * ARC_l2c_only - exists in L2ARC but not other states
184 * When there are no active references to the buffer, they are
185 * are linked onto a list in one of these arc states. These are
186 * the only buffers that can be evicted or deleted. Within each
187 * state there are multiple lists, one for meta-data and one for
188 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
189 * etc.) is tracked separately so that it can be managed more
190 * explicitly: favored over data, limited explicitly.
191 *
192 * Anonymous buffers are buffers that are not associated with
193 * a DVA. These are buffers that hold dirty block copies
194 * before they are written to stable storage. By definition,
195 * they are "ref'd" and are considered part of arc_mru
196 * that cannot be freed. Generally, they will aquire a DVA
197 * as they are written and migrate onto the arc_mru list.
198 *
199 * The ARC_l2c_only state is for buffers that are in the second
200 * level ARC but no longer in any of the ARC_m* lists. The second
201 * level ARC itself may also contain buffers that are in any of
202 * the ARC_m* states - meaning that a buffer can exist in two
203 * places. The reason for the ARC_l2c_only state is to keep the
204 * buffer header in the hash table, so that reads that hit the
205 * second level ARC benefit from these fast lookups.
206 */
207
208 typedef struct arc_state {
209 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */
210 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
211 uint64_t arcs_size; /* total amount of data in this state */
212 kmutex_t arcs_mtx;
213 } arc_state_t;
214
215 /* The 6 states: */
216 static arc_state_t ARC_anon;
217 static arc_state_t ARC_mru;
218 static arc_state_t ARC_mru_ghost;
219 static arc_state_t ARC_mfu;
220 static arc_state_t ARC_mfu_ghost;
221 static arc_state_t ARC_l2c_only;
222
223 typedef struct arc_stats {
224 kstat_named_t arcstat_hits;
225 kstat_named_t arcstat_misses;
226 kstat_named_t arcstat_demand_data_hits;
227 kstat_named_t arcstat_demand_data_misses;
228 kstat_named_t arcstat_demand_metadata_hits;
229 kstat_named_t arcstat_demand_metadata_misses;
230 kstat_named_t arcstat_prefetch_data_hits;
231 kstat_named_t arcstat_prefetch_data_misses;
232 kstat_named_t arcstat_prefetch_metadata_hits;
233 kstat_named_t arcstat_prefetch_metadata_misses;
234 kstat_named_t arcstat_mru_hits;
235 kstat_named_t arcstat_mru_ghost_hits;
236 kstat_named_t arcstat_mfu_hits;
237 kstat_named_t arcstat_mfu_ghost_hits;
238 kstat_named_t arcstat_deleted;
239 kstat_named_t arcstat_recycle_miss;
240 kstat_named_t arcstat_mutex_miss;
241 kstat_named_t arcstat_evict_skip;
242 kstat_named_t arcstat_hash_elements;
243 kstat_named_t arcstat_hash_elements_max;
244 kstat_named_t arcstat_hash_collisions;
245 kstat_named_t arcstat_hash_chains;
246 kstat_named_t arcstat_hash_chain_max;
247 kstat_named_t arcstat_p;
248 kstat_named_t arcstat_c;
249 kstat_named_t arcstat_c_min;
250 kstat_named_t arcstat_c_max;
251 kstat_named_t arcstat_size;
252 kstat_named_t arcstat_hdr_size;
253 kstat_named_t arcstat_l2_hits;
254 kstat_named_t arcstat_l2_misses;
255 kstat_named_t arcstat_l2_feeds;
256 kstat_named_t arcstat_l2_rw_clash;
257 kstat_named_t arcstat_l2_writes_sent;
258 kstat_named_t arcstat_l2_writes_done;
259 kstat_named_t arcstat_l2_writes_error;
260 kstat_named_t arcstat_l2_writes_hdr_miss;
261 kstat_named_t arcstat_l2_evict_lock_retry;
262 kstat_named_t arcstat_l2_evict_reading;
263 kstat_named_t arcstat_l2_free_on_write;
264 kstat_named_t arcstat_l2_abort_lowmem;
265 kstat_named_t arcstat_l2_cksum_bad;
266 kstat_named_t arcstat_l2_io_error;
267 kstat_named_t arcstat_l2_size;
268 kstat_named_t arcstat_l2_hdr_size;
269 kstat_named_t arcstat_memory_throttle_count;
270 } arc_stats_t;
271
272 static arc_stats_t arc_stats = {
273 { "hits", KSTAT_DATA_UINT64 },
274 { "misses", KSTAT_DATA_UINT64 },
275 { "demand_data_hits", KSTAT_DATA_UINT64 },
276 { "demand_data_misses", KSTAT_DATA_UINT64 },
277 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
278 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
279 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
280 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
281 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
282 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
283 { "mru_hits", KSTAT_DATA_UINT64 },
284 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
285 { "mfu_hits", KSTAT_DATA_UINT64 },
286 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
287 { "deleted", KSTAT_DATA_UINT64 },
288 { "recycle_miss", KSTAT_DATA_UINT64 },
289 { "mutex_miss", KSTAT_DATA_UINT64 },
290 { "evict_skip", KSTAT_DATA_UINT64 },
291 { "hash_elements", KSTAT_DATA_UINT64 },
292 { "hash_elements_max", KSTAT_DATA_UINT64 },
293 { "hash_collisions", KSTAT_DATA_UINT64 },
294 { "hash_chains", KSTAT_DATA_UINT64 },
295 { "hash_chain_max", KSTAT_DATA_UINT64 },
296 { "p", KSTAT_DATA_UINT64 },
297 { "c", KSTAT_DATA_UINT64 },
298 { "c_min", KSTAT_DATA_UINT64 },
299 { "c_max", KSTAT_DATA_UINT64 },
300 { "size", KSTAT_DATA_UINT64 },
301 { "hdr_size", KSTAT_DATA_UINT64 },
302 { "l2_hits", KSTAT_DATA_UINT64 },
303 { "l2_misses", KSTAT_DATA_UINT64 },
304 { "l2_feeds", KSTAT_DATA_UINT64 },
305 { "l2_rw_clash", KSTAT_DATA_UINT64 },
306 { "l2_writes_sent", KSTAT_DATA_UINT64 },
307 { "l2_writes_done", KSTAT_DATA_UINT64 },
308 { "l2_writes_error", KSTAT_DATA_UINT64 },
309 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
310 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
311 { "l2_evict_reading", KSTAT_DATA_UINT64 },
312 { "l2_free_on_write", KSTAT_DATA_UINT64 },
313 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
314 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
315 { "l2_io_error", KSTAT_DATA_UINT64 },
316 { "l2_size", KSTAT_DATA_UINT64 },
317 { "l2_hdr_size", KSTAT_DATA_UINT64 },
318 { "memory_throttle_count", KSTAT_DATA_UINT64 }
319 };
320
321 #define ARCSTAT(stat) (arc_stats.stat.value.ui64)
322
323 #define ARCSTAT_INCR(stat, val) \
324 atomic_add_64(&arc_stats.stat.value.ui64, (val));
325
326 #define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
327 #define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
328
329 #define ARCSTAT_MAX(stat, val) { \
330 uint64_t m; \
331 while ((val) > (m = arc_stats.stat.value.ui64) && \
332 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
333 continue; \
334 }
335
336 #define ARCSTAT_MAXSTAT(stat) \
337 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
338
339 /*
340 * We define a macro to allow ARC hits/misses to be easily broken down by
341 * two separate conditions, giving a total of four different subtypes for
342 * each of hits and misses (so eight statistics total).
343 */
344 #define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
345 if (cond1) { \
346 if (cond2) { \
347 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
348 } else { \
349 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
350 } \
351 } else { \
352 if (cond2) { \
353 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
354 } else { \
355 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
356 } \
357 }
358
359 kstat_t *arc_ksp;
360 static arc_state_t *arc_anon;
361 static arc_state_t *arc_mru;
362 static arc_state_t *arc_mru_ghost;
363 static arc_state_t *arc_mfu;
364 static arc_state_t *arc_mfu_ghost;
365 static arc_state_t *arc_l2c_only;
366
367 /*
368 * There are several ARC variables that are critical to export as kstats --
369 * but we don't want to have to grovel around in the kstat whenever we wish to
370 * manipulate them. For these variables, we therefore define them to be in
371 * terms of the statistic variable. This assures that we are not introducing
372 * the possibility of inconsistency by having shadow copies of the variables,
373 * while still allowing the code to be readable.
374 */
375 #define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
376 #define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
377 #define arc_c ARCSTAT(arcstat_c) /* target size of cache */
378 #define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
379 #define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
380
381 static int arc_no_grow; /* Don't try to grow cache size */
382 static uint64_t arc_tempreserve;
383 static uint64_t arc_meta_used;
384 static uint64_t arc_meta_limit;
385 static uint64_t arc_meta_max = 0;
386
387 typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
388
389 typedef struct arc_callback arc_callback_t;
390
391 struct arc_callback {
392 void *acb_private;
393 arc_done_func_t *acb_done;
394 arc_buf_t *acb_buf;
395 zio_t *acb_zio_dummy;
396 arc_callback_t *acb_next;
397 };
398
399 typedef struct arc_write_callback arc_write_callback_t;
400
401 struct arc_write_callback {
402 void *awcb_private;
403 arc_done_func_t *awcb_ready;
404 arc_done_func_t *awcb_done;
405 arc_buf_t *awcb_buf;
406 };
407
408 struct arc_buf_hdr {
409 /* protected by hash lock */
410 dva_t b_dva;
411 uint64_t b_birth;
412 uint64_t b_cksum0;
413
414 kmutex_t b_freeze_lock;
415 zio_cksum_t *b_freeze_cksum;
416
417 arc_buf_hdr_t *b_hash_next;
418 arc_buf_t *b_buf;
419 uint32_t b_flags;
420 uint32_t b_datacnt;
421
422 arc_callback_t *b_acb;
423 kcondvar_t b_cv;
424
425 /* immutable */
426 arc_buf_contents_t b_type;
427 uint64_t b_size;
428 spa_t *b_spa;
429
430 /* protected by arc state mutex */
431 arc_state_t *b_state;
432 list_node_t b_arc_node;
433
434 /* updated atomically */
435 clock_t b_arc_access;
436
437 /* self protecting */
438 refcount_t b_refcnt;
439
440 l2arc_buf_hdr_t *b_l2hdr;
441 list_node_t b_l2node;
442 };
443
444 static arc_buf_t *arc_eviction_list;
445 static kmutex_t arc_eviction_mtx;
446 static arc_buf_hdr_t arc_eviction_hdr;
447 static void arc_get_data_buf(arc_buf_t *buf);
448 static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
449 static int arc_evict_needed(arc_buf_contents_t type);
450 static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes);
451
452 #define GHOST_STATE(state) \
453 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
454 (state) == arc_l2c_only)
455
456 /*
457 * Private ARC flags. These flags are private ARC only flags that will show up
458 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
459 * be passed in as arc_flags in things like arc_read. However, these flags
460 * should never be passed and should only be set by ARC code. When adding new
461 * public flags, make sure not to smash the private ones.
462 */
463
464 #define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
465 #define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
466 #define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
467 #define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
468 #define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
469 #define ARC_INDIRECT (1 << 14) /* this is an indirect block */
470 #define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
471 #define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
472 #define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
473 #define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
474 #define ARC_STORED (1 << 19) /* has been store()d to */
475
476 #define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
477 #define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
478 #define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
479 #define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
480 #define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
481 #define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
482 #define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
483 #define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
484 (hdr)->b_l2hdr != NULL)
485 #define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
486 #define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
487 #define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
488
489 /*
490 * Other sizes
491 */
492
493 #define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
494 #define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
495
496 /*
497 * Hash table routines
498 */
499
500 #define HT_LOCK_PAD 64
501
502 struct ht_lock {
503 kmutex_t ht_lock;
504 #ifdef _KERNEL
505 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
506 #endif
507 };
508
509 #define BUF_LOCKS 256
510 typedef struct buf_hash_table {
511 uint64_t ht_mask;
512 arc_buf_hdr_t **ht_table;
513 struct ht_lock ht_locks[BUF_LOCKS];
514 } buf_hash_table_t;
515
516 static buf_hash_table_t buf_hash_table;
517
518 #define BUF_HASH_INDEX(spa, dva, birth) \
519 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
520 #define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
521 #define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
522 #define HDR_LOCK(buf) \
523 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth)))
524
525 uint64_t zfs_crc64_table[256];
526
527 /*
528 * Level 2 ARC
529 */
530
531 #define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
532 #define L2ARC_HEADROOM 4 /* num of writes */
533 #define L2ARC_FEED_SECS 1 /* caching interval */
534
535 #define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
536 #define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
537
538 /*
539 * L2ARC Performance Tunables
540 */
541 uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
542 uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
543 uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
544 uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
545 boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
546
547 /*
548 * L2ARC Internals
549 */
550 typedef struct l2arc_dev {
551 vdev_t *l2ad_vdev; /* vdev */
552 spa_t *l2ad_spa; /* spa */
553 uint64_t l2ad_hand; /* next write location */
554 uint64_t l2ad_write; /* desired write size, bytes */
555 uint64_t l2ad_boost; /* warmup write boost, bytes */
556 uint64_t l2ad_start; /* first addr on device */
557 uint64_t l2ad_end; /* last addr on device */
558 uint64_t l2ad_evict; /* last addr eviction reached */
559 boolean_t l2ad_first; /* first sweep through */
560 list_t *l2ad_buflist; /* buffer list */
561 list_node_t l2ad_node; /* device list node */
562 } l2arc_dev_t;
563
564 static list_t L2ARC_dev_list; /* device list */
565 static list_t *l2arc_dev_list; /* device list pointer */
566 static kmutex_t l2arc_dev_mtx; /* device list mutex */
567 static l2arc_dev_t *l2arc_dev_last; /* last device used */
568 static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
569 static list_t L2ARC_free_on_write; /* free after write buf list */
570 static list_t *l2arc_free_on_write; /* free after write list ptr */
571 static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
572 static uint64_t l2arc_ndev; /* number of devices */
573
574 typedef struct l2arc_read_callback {
575 arc_buf_t *l2rcb_buf; /* read buffer */
576 spa_t *l2rcb_spa; /* spa */
577 blkptr_t l2rcb_bp; /* original blkptr */
578 zbookmark_t l2rcb_zb; /* original bookmark */
579 int l2rcb_flags; /* original flags */
580 } l2arc_read_callback_t;
581
582 typedef struct l2arc_write_callback {
583 l2arc_dev_t *l2wcb_dev; /* device info */
584 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
585 } l2arc_write_callback_t;
586
587 struct l2arc_buf_hdr {
588 /* protected by arc_buf_hdr mutex */
589 l2arc_dev_t *b_dev; /* L2ARC device */
590 daddr_t b_daddr; /* disk address, offset byte */
591 };
592
593 typedef struct l2arc_data_free {
594 /* protected by l2arc_free_on_write_mtx */
595 void *l2df_data;
596 size_t l2df_size;
597 void (*l2df_func)(void *, size_t);
598 list_node_t l2df_list_node;
599 } l2arc_data_free_t;
600
601 static kmutex_t l2arc_feed_thr_lock;
602 static kcondvar_t l2arc_feed_thr_cv;
603 static uint8_t l2arc_thread_exit;
604
605 static void l2arc_read_done(zio_t *zio);
606 static void l2arc_hdr_stat_add(void);
607 static void l2arc_hdr_stat_remove(void);
608
609 static uint64_t
610 buf_hash(spa_t *spa, const dva_t *dva, uint64_t birth)
611 {
612 uintptr_t spav = (uintptr_t)spa;
613 uint8_t *vdva = (uint8_t *)dva;
614 uint64_t crc = -1ULL;
615 int i;
616
617 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
618
619 for (i = 0; i < sizeof (dva_t); i++)
620 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
621
622 crc ^= (spav>>8) ^ birth;
623
624 return (crc);
625 }
626
627 #define BUF_EMPTY(buf) \
628 ((buf)->b_dva.dva_word[0] == 0 && \
629 (buf)->b_dva.dva_word[1] == 0 && \
630 (buf)->b_birth == 0)
631
632 #define BUF_EQUAL(spa, dva, birth, buf) \
633 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
634 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
635 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
636
637 static arc_buf_hdr_t *
638 buf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
639 {
640 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
641 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
642 arc_buf_hdr_t *buf;
643
644 mutex_enter(hash_lock);
645 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
646 buf = buf->b_hash_next) {
647 if (BUF_EQUAL(spa, dva, birth, buf)) {
648 *lockp = hash_lock;
649 return (buf);
650 }
651 }
652 mutex_exit(hash_lock);
653 *lockp = NULL;
654 return (NULL);
655 }
656
657 /*
658 * Insert an entry into the hash table. If there is already an element
659 * equal to elem in the hash table, then the already existing element
660 * will be returned and the new element will not be inserted.
661 * Otherwise returns NULL.
662 */
663 static arc_buf_hdr_t *
664 buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
665 {
666 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
667 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
668 arc_buf_hdr_t *fbuf;
669 uint32_t i;
670
671 ASSERT(!HDR_IN_HASH_TABLE(buf));
672 *lockp = hash_lock;
673 mutex_enter(hash_lock);
674 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
675 fbuf = fbuf->b_hash_next, i++) {
676 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
677 return (fbuf);
678 }
679
680 buf->b_hash_next = buf_hash_table.ht_table[idx];
681 buf_hash_table.ht_table[idx] = buf;
682 buf->b_flags |= ARC_IN_HASH_TABLE;
683
684 /* collect some hash table performance data */
685 if (i > 0) {
686 ARCSTAT_BUMP(arcstat_hash_collisions);
687 if (i == 1)
688 ARCSTAT_BUMP(arcstat_hash_chains);
689
690 ARCSTAT_MAX(arcstat_hash_chain_max, i);
691 }
692
693 ARCSTAT_BUMP(arcstat_hash_elements);
694 ARCSTAT_MAXSTAT(arcstat_hash_elements);
695
696 return (NULL);
697 }
698
699 static void
700 buf_hash_remove(arc_buf_hdr_t *buf)
701 {
702 arc_buf_hdr_t *fbuf, **bufp;
703 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
704
705 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
706 ASSERT(HDR_IN_HASH_TABLE(buf));
707
708 bufp = &buf_hash_table.ht_table[idx];
709 while ((fbuf = *bufp) != buf) {
710 ASSERT(fbuf != NULL);
711 bufp = &fbuf->b_hash_next;
712 }
713 *bufp = buf->b_hash_next;
714 buf->b_hash_next = NULL;
715 buf->b_flags &= ~ARC_IN_HASH_TABLE;
716
717 /* collect some hash table performance data */
718 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
719
720 if (buf_hash_table.ht_table[idx] &&
721 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
722 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
723 }
724
725 /*
726 * Global data structures and functions for the buf kmem cache.
727 */
728 static kmem_cache_t *hdr_cache;
729 static kmem_cache_t *buf_cache;
730
731 static void
732 buf_fini(void)
733 {
734 int i;
735
736 kmem_free(buf_hash_table.ht_table,
737 (buf_hash_table.ht_mask + 1) * sizeof (void *));
738 for (i = 0; i < BUF_LOCKS; i++)
739 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
740 kmem_cache_destroy(hdr_cache);
741 kmem_cache_destroy(buf_cache);
742 }
743
744 /*
745 * Constructor callback - called when the cache is empty
746 * and a new buf is requested.
747 */
748 /* ARGSUSED */
749 static int
750 hdr_cons(void *vbuf, void *unused, int kmflag)
751 {
752 arc_buf_hdr_t *buf = vbuf;
753
754 bzero(buf, sizeof (arc_buf_hdr_t));
755 refcount_create(&buf->b_refcnt);
756 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
757 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
758
759 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
760 return (0);
761 }
762
763 /* ARGSUSED */
764 static int
765 buf_cons(void *vbuf, void *unused, int kmflag)
766 {
767 arc_buf_t *buf = vbuf;
768
769 bzero(buf, sizeof (arc_buf_t));
770 rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL);
771 return (0);
772 }
773
774 /*
775 * Destructor callback - called when a cached buf is
776 * no longer required.
777 */
778 /* ARGSUSED */
779 static void
780 hdr_dest(void *vbuf, void *unused)
781 {
782 arc_buf_hdr_t *buf = vbuf;
783
784 refcount_destroy(&buf->b_refcnt);
785 cv_destroy(&buf->b_cv);
786 mutex_destroy(&buf->b_freeze_lock);
787
788 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
789 }
790
791 /* ARGSUSED */
792 static void
793 buf_dest(void *vbuf, void *unused)
794 {
795 arc_buf_t *buf = vbuf;
796
797 rw_destroy(&buf->b_lock);
798 }
799
800 /*
801 * Reclaim callback -- invoked when memory is low.
802 */
803 /* ARGSUSED */
804 static void
805 hdr_recl(void *unused)
806 {
807 dprintf("hdr_recl called\n");
808 /*
809 * umem calls the reclaim func when we destroy the buf cache,
810 * which is after we do arc_fini().
811 */
812 if (!arc_dead)
813 cv_signal(&arc_reclaim_thr_cv);
814 }
815
816 static void
817 buf_init(void)
818 {
819 uint64_t *ct;
820 uint64_t hsize = 1ULL << 12;
821 int i, j;
822
823 /*
824 * The hash table is big enough to fill all of physical memory
825 * with an average 64K block size. The table will take up
826 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
827 */
828 while (hsize * 65536 < physmem * PAGESIZE)
829 hsize <<= 1;
830 retry:
831 buf_hash_table.ht_mask = hsize - 1;
832 buf_hash_table.ht_table =
833 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
834 if (buf_hash_table.ht_table == NULL) {
835 ASSERT(hsize > (1ULL << 8));
836 hsize >>= 1;
837 goto retry;
838 }
839
840 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
841 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
842 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
843 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
844
845 for (i = 0; i < 256; i++)
846 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
847 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
848
849 for (i = 0; i < BUF_LOCKS; i++) {
850 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
851 NULL, MUTEX_DEFAULT, NULL);
852 }
853 }
854
855 #define ARC_MINTIME (hz>>4) /* 62 ms */
856
857 static void
858 arc_cksum_verify(arc_buf_t *buf)
859 {
860 zio_cksum_t zc;
861
862 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
863 return;
864
865 mutex_enter(&buf->b_hdr->b_freeze_lock);
866 if (buf->b_hdr->b_freeze_cksum == NULL ||
867 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
868 mutex_exit(&buf->b_hdr->b_freeze_lock);
869 return;
870 }
871 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
872 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
873 panic("buffer modified while frozen!");
874 mutex_exit(&buf->b_hdr->b_freeze_lock);
875 }
876
877 static int
878 arc_cksum_equal(arc_buf_t *buf)
879 {
880 zio_cksum_t zc;
881 int equal;
882
883 mutex_enter(&buf->b_hdr->b_freeze_lock);
884 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
885 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
886 mutex_exit(&buf->b_hdr->b_freeze_lock);
887
888 return (equal);
889 }
890
891 static void
892 arc_cksum_compute(arc_buf_t *buf, boolean_t force)
893 {
894 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
895 return;
896
897 mutex_enter(&buf->b_hdr->b_freeze_lock);
898 if (buf->b_hdr->b_freeze_cksum != NULL) {
899 mutex_exit(&buf->b_hdr->b_freeze_lock);
900 return;
901 }
902 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
903 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
904 buf->b_hdr->b_freeze_cksum);
905 mutex_exit(&buf->b_hdr->b_freeze_lock);
906 }
907
908 void
909 arc_buf_thaw(arc_buf_t *buf)
910 {
911 if (zfs_flags & ZFS_DEBUG_MODIFY) {
912 if (buf->b_hdr->b_state != arc_anon)
913 panic("modifying non-anon buffer!");
914 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
915 panic("modifying buffer while i/o in progress!");
916 arc_cksum_verify(buf);
917 }
918
919 mutex_enter(&buf->b_hdr->b_freeze_lock);
920 if (buf->b_hdr->b_freeze_cksum != NULL) {
921 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
922 buf->b_hdr->b_freeze_cksum = NULL;
923 }
924 mutex_exit(&buf->b_hdr->b_freeze_lock);
925 }
926
927 void
928 arc_buf_freeze(arc_buf_t *buf)
929 {
930 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
931 return;
932
933 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
934 buf->b_hdr->b_state == arc_anon);
935 arc_cksum_compute(buf, B_FALSE);
936 }
937
938 static void
939 add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
940 {
941 ASSERT(MUTEX_HELD(hash_lock));
942
943 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
944 (ab->b_state != arc_anon)) {
945 uint64_t delta = ab->b_size * ab->b_datacnt;
946 list_t *list = &ab->b_state->arcs_list[ab->b_type];
947 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
948
949 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx));
950 mutex_enter(&ab->b_state->arcs_mtx);
951 ASSERT(list_link_active(&ab->b_arc_node));
952 list_remove(list, ab);
953 if (GHOST_STATE(ab->b_state)) {
954 ASSERT3U(ab->b_datacnt, ==, 0);
955 ASSERT3P(ab->b_buf, ==, NULL);
956 delta = ab->b_size;
957 }
958 ASSERT(delta > 0);
959 ASSERT3U(*size, >=, delta);
960 atomic_add_64(size, -delta);
961 mutex_exit(&ab->b_state->arcs_mtx);
962 /* remove the prefetch flag if we get a reference */
963 if (ab->b_flags & ARC_PREFETCH)
964 ab->b_flags &= ~ARC_PREFETCH;
965 }
966 }
967
968 static int
969 remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
970 {
971 int cnt;
972 arc_state_t *state = ab->b_state;
973
974 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
975 ASSERT(!GHOST_STATE(state));
976
977 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
978 (state != arc_anon)) {
979 uint64_t *size = &state->arcs_lsize[ab->b_type];
980
981 ASSERT(!MUTEX_HELD(&state->arcs_mtx));
982 mutex_enter(&state->arcs_mtx);
983 ASSERT(!list_link_active(&ab->b_arc_node));
984 list_insert_head(&state->arcs_list[ab->b_type], ab);
985 ASSERT(ab->b_datacnt > 0);
986 atomic_add_64(size, ab->b_size * ab->b_datacnt);
987 mutex_exit(&state->arcs_mtx);
988 }
989 return (cnt);
990 }
991
992 /*
993 * Move the supplied buffer to the indicated state. The mutex
994 * for the buffer must be held by the caller.
995 */
996 static void
997 arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
998 {
999 arc_state_t *old_state = ab->b_state;
1000 int64_t refcnt = refcount_count(&ab->b_refcnt);
1001 uint64_t from_delta, to_delta;
1002
1003 ASSERT(MUTEX_HELD(hash_lock));
1004 ASSERT(new_state != old_state);
1005 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1006 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1007
1008 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1009
1010 /*
1011 * If this buffer is evictable, transfer it from the
1012 * old state list to the new state list.
1013 */
1014 if (refcnt == 0) {
1015 if (old_state != arc_anon) {
1016 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx);
1017 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1018
1019 if (use_mutex)
1020 mutex_enter(&old_state->arcs_mtx);
1021
1022 ASSERT(list_link_active(&ab->b_arc_node));
1023 list_remove(&old_state->arcs_list[ab->b_type], ab);
1024
1025 /*
1026 * If prefetching out of the ghost cache,
1027 * we will have a non-null datacnt.
1028 */
1029 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1030 /* ghost elements have a ghost size */
1031 ASSERT(ab->b_buf == NULL);
1032 from_delta = ab->b_size;
1033 }
1034 ASSERT3U(*size, >=, from_delta);
1035 atomic_add_64(size, -from_delta);
1036
1037 if (use_mutex)
1038 mutex_exit(&old_state->arcs_mtx);
1039 }
1040 if (new_state != arc_anon) {
1041 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx);
1042 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1043
1044 if (use_mutex)
1045 mutex_enter(&new_state->arcs_mtx);
1046
1047 list_insert_head(&new_state->arcs_list[ab->b_type], ab);
1048
1049 /* ghost elements have a ghost size */
1050 if (GHOST_STATE(new_state)) {
1051 ASSERT(ab->b_datacnt == 0);
1052 ASSERT(ab->b_buf == NULL);
1053 to_delta = ab->b_size;
1054 }
1055 atomic_add_64(size, to_delta);
1056
1057 if (use_mutex)
1058 mutex_exit(&new_state->arcs_mtx);
1059 }
1060 }
1061
1062 ASSERT(!BUF_EMPTY(ab));
1063 if (new_state == arc_anon) {
1064 buf_hash_remove(ab);
1065 }
1066
1067 /* adjust state sizes */
1068 if (to_delta)
1069 atomic_add_64(&new_state->arcs_size, to_delta);
1070 if (from_delta) {
1071 ASSERT3U(old_state->arcs_size, >=, from_delta);
1072 atomic_add_64(&old_state->arcs_size, -from_delta);
1073 }
1074 ab->b_state = new_state;
1075
1076 /* adjust l2arc hdr stats */
1077 if (new_state == arc_l2c_only)
1078 l2arc_hdr_stat_add();
1079 else if (old_state == arc_l2c_only)
1080 l2arc_hdr_stat_remove();
1081 }
1082
1083 void
1084 arc_space_consume(uint64_t space)
1085 {
1086 atomic_add_64(&arc_meta_used, space);
1087 atomic_add_64(&arc_size, space);
1088 }
1089
1090 void
1091 arc_space_return(uint64_t space)
1092 {
1093 ASSERT(arc_meta_used >= space);
1094 if (arc_meta_max < arc_meta_used)
1095 arc_meta_max = arc_meta_used;
1096 atomic_add_64(&arc_meta_used, -space);
1097 ASSERT(arc_size >= space);
1098 atomic_add_64(&arc_size, -space);
1099 }
1100
1101 void *
1102 arc_data_buf_alloc(uint64_t size)
1103 {
1104 if (arc_evict_needed(ARC_BUFC_DATA))
1105 cv_signal(&arc_reclaim_thr_cv);
1106 atomic_add_64(&arc_size, size);
1107 return (zio_data_buf_alloc(size));
1108 }
1109
1110 void
1111 arc_data_buf_free(void *buf, uint64_t size)
1112 {
1113 zio_data_buf_free(buf, size);
1114 ASSERT(arc_size >= size);
1115 atomic_add_64(&arc_size, -size);
1116 }
1117
1118 arc_buf_t *
1119 arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1120 {
1121 arc_buf_hdr_t *hdr;
1122 arc_buf_t *buf;
1123
1124 ASSERT3U(size, >, 0);
1125 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1126 ASSERT(BUF_EMPTY(hdr));
1127 hdr->b_size = size;
1128 hdr->b_type = type;
1129 hdr->b_spa = spa;
1130 hdr->b_state = arc_anon;
1131 hdr->b_arc_access = 0;
1132 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1133 buf->b_hdr = hdr;
1134 buf->b_data = NULL;
1135 buf->b_efunc = NULL;
1136 buf->b_private = NULL;
1137 buf->b_next = NULL;
1138 hdr->b_buf = buf;
1139 arc_get_data_buf(buf);
1140 hdr->b_datacnt = 1;
1141 hdr->b_flags = 0;
1142 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1143 (void) refcount_add(&hdr->b_refcnt, tag);
1144
1145 return (buf);
1146 }
1147
1148 static arc_buf_t *
1149 arc_buf_clone(arc_buf_t *from)
1150 {
1151 arc_buf_t *buf;
1152 arc_buf_hdr_t *hdr = from->b_hdr;
1153 uint64_t size = hdr->b_size;
1154
1155 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1156 buf->b_hdr = hdr;
1157 buf->b_data = NULL;
1158 buf->b_efunc = NULL;
1159 buf->b_private = NULL;
1160 buf->b_next = hdr->b_buf;
1161 hdr->b_buf = buf;
1162 arc_get_data_buf(buf);
1163 bcopy(from->b_data, buf->b_data, size);
1164 hdr->b_datacnt += 1;
1165 return (buf);
1166 }
1167
1168 void
1169 arc_buf_add_ref(arc_buf_t *buf, void* tag)
1170 {
1171 arc_buf_hdr_t *hdr;
1172 kmutex_t *hash_lock;
1173
1174 /*
1175 * Check to see if this buffer is evicted. Callers
1176 * must verify b_data != NULL to know if the add_ref
1177 * was successful.
1178 */
1179 rw_enter(&buf->b_lock, RW_READER);
1180 if (buf->b_data == NULL) {
1181 rw_exit(&buf->b_lock);
1182 return;
1183 }
1184 hdr = buf->b_hdr;
1185 ASSERT(hdr != NULL);
1186 hash_lock = HDR_LOCK(hdr);
1187 mutex_enter(hash_lock);
1188 rw_exit(&buf->b_lock);
1189
1190 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1191 add_reference(hdr, hash_lock, tag);
1192 arc_access(hdr, hash_lock);
1193 mutex_exit(hash_lock);
1194 ARCSTAT_BUMP(arcstat_hits);
1195 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1196 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1197 data, metadata, hits);
1198 }
1199
1200 /*
1201 * Free the arc data buffer. If it is an l2arc write in progress,
1202 * the buffer is placed on l2arc_free_on_write to be freed later.
1203 */
1204 static void
1205 arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t),
1206 void *data, size_t size)
1207 {
1208 if (HDR_L2_WRITING(hdr)) {
1209 l2arc_data_free_t *df;
1210 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1211 df->l2df_data = data;
1212 df->l2df_size = size;
1213 df->l2df_func = free_func;
1214 mutex_enter(&l2arc_free_on_write_mtx);
1215 list_insert_head(l2arc_free_on_write, df);
1216 mutex_exit(&l2arc_free_on_write_mtx);
1217 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1218 } else {
1219 free_func(data, size);
1220 }
1221 }
1222
1223 static void
1224 arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1225 {
1226 arc_buf_t **bufp;
1227
1228 /* free up data associated with the buf */
1229 if (buf->b_data) {
1230 arc_state_t *state = buf->b_hdr->b_state;
1231 uint64_t size = buf->b_hdr->b_size;
1232 arc_buf_contents_t type = buf->b_hdr->b_type;
1233
1234 arc_cksum_verify(buf);
1235 if (!recycle) {
1236 if (type == ARC_BUFC_METADATA) {
1237 arc_buf_data_free(buf->b_hdr, zio_buf_free,
1238 buf->b_data, size);
1239 arc_space_return(size);
1240 } else {
1241 ASSERT(type == ARC_BUFC_DATA);
1242 arc_buf_data_free(buf->b_hdr,
1243 zio_data_buf_free, buf->b_data, size);
1244 atomic_add_64(&arc_size, -size);
1245 }
1246 }
1247 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1248 uint64_t *cnt = &state->arcs_lsize[type];
1249
1250 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1251 ASSERT(state != arc_anon);
1252
1253 ASSERT3U(*cnt, >=, size);
1254 atomic_add_64(cnt, -size);
1255 }
1256 ASSERT3U(state->arcs_size, >=, size);
1257 atomic_add_64(&state->arcs_size, -size);
1258 buf->b_data = NULL;
1259 ASSERT(buf->b_hdr->b_datacnt > 0);
1260 buf->b_hdr->b_datacnt -= 1;
1261 }
1262
1263 /* only remove the buf if requested */
1264 if (!all)
1265 return;
1266
1267 /* remove the buf from the hdr list */
1268 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1269 continue;
1270 *bufp = buf->b_next;
1271
1272 ASSERT(buf->b_efunc == NULL);
1273
1274 /* clean up the buf */
1275 buf->b_hdr = NULL;
1276 kmem_cache_free(buf_cache, buf);
1277 }
1278
1279 static void
1280 arc_hdr_destroy(arc_buf_hdr_t *hdr)
1281 {
1282 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1283 ASSERT3P(hdr->b_state, ==, arc_anon);
1284 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1285 ASSERT(!(hdr->b_flags & ARC_STORED));
1286
1287 if (hdr->b_l2hdr != NULL) {
1288 if (!MUTEX_HELD(&l2arc_buflist_mtx)) {
1289 /*
1290 * To prevent arc_free() and l2arc_evict() from
1291 * attempting to free the same buffer at the same time,
1292 * a FREE_IN_PROGRESS flag is given to arc_free() to
1293 * give it priority. l2arc_evict() can't destroy this
1294 * header while we are waiting on l2arc_buflist_mtx.
1295 *
1296 * The hdr may be removed from l2ad_buflist before we
1297 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1298 */
1299 mutex_enter(&l2arc_buflist_mtx);
1300 if (hdr->b_l2hdr != NULL) {
1301 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist,
1302 hdr);
1303 }
1304 mutex_exit(&l2arc_buflist_mtx);
1305 } else {
1306 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr);
1307 }
1308 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1309 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t));
1310 if (hdr->b_state == arc_l2c_only)
1311 l2arc_hdr_stat_remove();
1312 hdr->b_l2hdr = NULL;
1313 }
1314
1315 if (!BUF_EMPTY(hdr)) {
1316 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1317 bzero(&hdr->b_dva, sizeof (dva_t));
1318 hdr->b_birth = 0;
1319 hdr->b_cksum0 = 0;
1320 }
1321 while (hdr->b_buf) {
1322 arc_buf_t *buf = hdr->b_buf;
1323
1324 if (buf->b_efunc) {
1325 mutex_enter(&arc_eviction_mtx);
1326 rw_enter(&buf->b_lock, RW_WRITER);
1327 ASSERT(buf->b_hdr != NULL);
1328 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1329 hdr->b_buf = buf->b_next;
1330 buf->b_hdr = &arc_eviction_hdr;
1331 buf->b_next = arc_eviction_list;
1332 arc_eviction_list = buf;
1333 rw_exit(&buf->b_lock);
1334 mutex_exit(&arc_eviction_mtx);
1335 } else {
1336 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1337 }
1338 }
1339 if (hdr->b_freeze_cksum != NULL) {
1340 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1341 hdr->b_freeze_cksum = NULL;
1342 }
1343
1344 ASSERT(!list_link_active(&hdr->b_arc_node));
1345 ASSERT3P(hdr->b_hash_next, ==, NULL);
1346 ASSERT3P(hdr->b_acb, ==, NULL);
1347 kmem_cache_free(hdr_cache, hdr);
1348 }
1349
1350 void
1351 arc_buf_free(arc_buf_t *buf, void *tag)
1352 {
1353 arc_buf_hdr_t *hdr = buf->b_hdr;
1354 int hashed = hdr->b_state != arc_anon;
1355
1356 ASSERT(buf->b_efunc == NULL);
1357 ASSERT(buf->b_data != NULL);
1358
1359 if (hashed) {
1360 kmutex_t *hash_lock = HDR_LOCK(hdr);
1361
1362 mutex_enter(hash_lock);
1363 (void) remove_reference(hdr, hash_lock, tag);
1364 if (hdr->b_datacnt > 1)
1365 arc_buf_destroy(buf, FALSE, TRUE);
1366 else
1367 hdr->b_flags |= ARC_BUF_AVAILABLE;
1368 mutex_exit(hash_lock);
1369 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1370 int destroy_hdr;
1371 /*
1372 * We are in the middle of an async write. Don't destroy
1373 * this buffer unless the write completes before we finish
1374 * decrementing the reference count.
1375 */
1376 mutex_enter(&arc_eviction_mtx);
1377 (void) remove_reference(hdr, NULL, tag);
1378 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1379 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1380 mutex_exit(&arc_eviction_mtx);
1381 if (destroy_hdr)
1382 arc_hdr_destroy(hdr);
1383 } else {
1384 if (remove_reference(hdr, NULL, tag) > 0) {
1385 ASSERT(HDR_IO_ERROR(hdr));
1386 arc_buf_destroy(buf, FALSE, TRUE);
1387 } else {
1388 arc_hdr_destroy(hdr);
1389 }
1390 }
1391 }
1392
1393 int
1394 arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1395 {
1396 arc_buf_hdr_t *hdr = buf->b_hdr;
1397 kmutex_t *hash_lock = HDR_LOCK(hdr);
1398 int no_callback = (buf->b_efunc == NULL);
1399
1400 if (hdr->b_state == arc_anon) {
1401 arc_buf_free(buf, tag);
1402 return (no_callback);
1403 }
1404
1405 mutex_enter(hash_lock);
1406 ASSERT(hdr->b_state != arc_anon);
1407 ASSERT(buf->b_data != NULL);
1408
1409 (void) remove_reference(hdr, hash_lock, tag);
1410 if (hdr->b_datacnt > 1) {
1411 if (no_callback)
1412 arc_buf_destroy(buf, FALSE, TRUE);
1413 } else if (no_callback) {
1414 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1415 hdr->b_flags |= ARC_BUF_AVAILABLE;
1416 }
1417 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1418 refcount_is_zero(&hdr->b_refcnt));
1419 mutex_exit(hash_lock);
1420 return (no_callback);
1421 }
1422
1423 int
1424 arc_buf_size(arc_buf_t *buf)
1425 {
1426 return (buf->b_hdr->b_size);
1427 }
1428
1429 /*
1430 * Evict buffers from list until we've removed the specified number of
1431 * bytes. Move the removed buffers to the appropriate evict state.
1432 * If the recycle flag is set, then attempt to "recycle" a buffer:
1433 * - look for a buffer to evict that is `bytes' long.
1434 * - return the data block from this buffer rather than freeing it.
1435 * This flag is used by callers that are trying to make space for a
1436 * new buffer in a full arc cache.
1437 *
1438 * This function makes a "best effort". It skips over any buffers
1439 * it can't get a hash_lock on, and so may not catch all candidates.
1440 * It may also return without evicting as much space as requested.
1441 */
1442 static void *
1443 arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle,
1444 arc_buf_contents_t type)
1445 {
1446 arc_state_t *evicted_state;
1447 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1448 arc_buf_hdr_t *ab, *ab_prev = NULL;
1449 list_t *list = &state->arcs_list[type];
1450 kmutex_t *hash_lock;
1451 boolean_t have_lock;
1452 void *stolen = NULL;
1453
1454 ASSERT(state == arc_mru || state == arc_mfu);
1455
1456 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1457
1458 mutex_enter(&state->arcs_mtx);
1459 mutex_enter(&evicted_state->arcs_mtx);
1460
1461 for (ab = list_tail(list); ab; ab = ab_prev) {
1462 ab_prev = list_prev(list, ab);
1463 /* prefetch buffers have a minimum lifespan */
1464 if (HDR_IO_IN_PROGRESS(ab) ||
1465 (spa && ab->b_spa != spa) ||
1466 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1467 lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) {
1468 skipped++;
1469 continue;
1470 }
1471 /* "lookahead" for better eviction candidate */
1472 if (recycle && ab->b_size != bytes &&
1473 ab_prev && ab_prev->b_size == bytes)
1474 continue;
1475 hash_lock = HDR_LOCK(ab);
1476 have_lock = MUTEX_HELD(hash_lock);
1477 if (have_lock || mutex_tryenter(hash_lock)) {
1478 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0);
1479 ASSERT(ab->b_datacnt > 0);
1480 while (ab->b_buf) {
1481 arc_buf_t *buf = ab->b_buf;
1482 if (!rw_tryenter(&buf->b_lock, RW_WRITER)) {
1483 missed += 1;
1484 break;
1485 }
1486 if (buf->b_data) {
1487 bytes_evicted += ab->b_size;
1488 if (recycle && ab->b_type == type &&
1489 ab->b_size == bytes &&
1490 !HDR_L2_WRITING(ab)) {
1491 stolen = buf->b_data;
1492 recycle = FALSE;
1493 }
1494 }
1495 if (buf->b_efunc) {
1496 mutex_enter(&arc_eviction_mtx);
1497 arc_buf_destroy(buf,
1498 buf->b_data == stolen, FALSE);
1499 ab->b_buf = buf->b_next;
1500 buf->b_hdr = &arc_eviction_hdr;
1501 buf->b_next = arc_eviction_list;
1502 arc_eviction_list = buf;
1503 mutex_exit(&arc_eviction_mtx);
1504 rw_exit(&buf->b_lock);
1505 } else {
1506 rw_exit(&buf->b_lock);
1507 arc_buf_destroy(buf,
1508 buf->b_data == stolen, TRUE);
1509 }
1510 }
1511 if (ab->b_datacnt == 0) {
1512 arc_change_state(evicted_state, ab, hash_lock);
1513 ASSERT(HDR_IN_HASH_TABLE(ab));
1514 ab->b_flags |= ARC_IN_HASH_TABLE;
1515 ab->b_flags &= ~ARC_BUF_AVAILABLE;
1516 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
1517 }
1518 if (!have_lock)
1519 mutex_exit(hash_lock);
1520 if (bytes >= 0 && bytes_evicted >= bytes)
1521 break;
1522 } else {
1523 missed += 1;
1524 }
1525 }
1526
1527 mutex_exit(&evicted_state->arcs_mtx);
1528 mutex_exit(&state->arcs_mtx);
1529
1530 if (bytes_evicted < bytes)
1531 dprintf("only evicted %lld bytes from %x",
1532 (longlong_t)bytes_evicted, state);
1533
1534 if (skipped)
1535 ARCSTAT_INCR(arcstat_evict_skip, skipped);
1536
1537 if (missed)
1538 ARCSTAT_INCR(arcstat_mutex_miss, missed);
1539
1540 /*
1541 * We have just evicted some date into the ghost state, make
1542 * sure we also adjust the ghost state size if necessary.
1543 */
1544 if (arc_no_grow &&
1545 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
1546 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
1547 arc_mru_ghost->arcs_size - arc_c;
1548
1549 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
1550 int64_t todelete =
1551 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
1552 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1553 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
1554 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
1555 arc_mru_ghost->arcs_size +
1556 arc_mfu_ghost->arcs_size - arc_c);
1557 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1558 }
1559 }
1560
1561 return (stolen);
1562 }
1563
1564 /*
1565 * Remove buffers from list until we've removed the specified number of
1566 * bytes. Destroy the buffers that are removed.
1567 */
1568 static void
1569 arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes)
1570 {
1571 arc_buf_hdr_t *ab, *ab_prev;
1572 list_t *list = &state->arcs_list[ARC_BUFC_DATA];
1573 kmutex_t *hash_lock;
1574 uint64_t bytes_deleted = 0;
1575 uint64_t bufs_skipped = 0;
1576
1577 ASSERT(GHOST_STATE(state));
1578 top:
1579 mutex_enter(&state->arcs_mtx);
1580 for (ab = list_tail(list); ab; ab = ab_prev) {
1581 ab_prev = list_prev(list, ab);
1582 if (spa && ab->b_spa != spa)
1583 continue;
1584 hash_lock = HDR_LOCK(ab);
1585 if (mutex_tryenter(hash_lock)) {
1586 ASSERT(!HDR_IO_IN_PROGRESS(ab));
1587 ASSERT(ab->b_buf == NULL);
1588 ARCSTAT_BUMP(arcstat_deleted);
1589 bytes_deleted += ab->b_size;
1590
1591 if (ab->b_l2hdr != NULL) {
1592 /*
1593 * This buffer is cached on the 2nd Level ARC;
1594 * don't destroy the header.
1595 */
1596 arc_change_state(arc_l2c_only, ab, hash_lock);
1597 mutex_exit(hash_lock);
1598 } else {
1599 arc_change_state(arc_anon, ab, hash_lock);
1600 mutex_exit(hash_lock);
1601 arc_hdr_destroy(ab);
1602 }
1603
1604 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
1605 if (bytes >= 0 && bytes_deleted >= bytes)
1606 break;
1607 } else {
1608 if (bytes < 0) {
1609 mutex_exit(&state->arcs_mtx);
1610 mutex_enter(hash_lock);
1611 mutex_exit(hash_lock);
1612 goto top;
1613 }
1614 bufs_skipped += 1;
1615 }
1616 }
1617 mutex_exit(&state->arcs_mtx);
1618
1619 if (list == &state->arcs_list[ARC_BUFC_DATA] &&
1620 (bytes < 0 || bytes_deleted < bytes)) {
1621 list = &state->arcs_list[ARC_BUFC_METADATA];
1622 goto top;
1623 }
1624
1625 if (bufs_skipped) {
1626 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
1627 ASSERT(bytes >= 0);
1628 }
1629
1630 if (bytes_deleted < bytes)
1631 dprintf("only deleted %lld bytes from %p",
1632 (longlong_t)bytes_deleted, state);
1633 }
1634
1635 static void
1636 arc_adjust(void)
1637 {
1638 int64_t top_sz, mru_over, arc_over, todelete;
1639
1640 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used;
1641
1642 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
1643 int64_t toevict =
1644 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p);
1645 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA);
1646 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1647 }
1648
1649 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1650 int64_t toevict =
1651 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p);
1652 (void) arc_evict(arc_mru, NULL, toevict, FALSE,
1653 ARC_BUFC_METADATA);
1654 top_sz = arc_anon->arcs_size + arc_mru->arcs_size;
1655 }
1656
1657 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c;
1658
1659 if (mru_over > 0) {
1660 if (arc_mru_ghost->arcs_size > 0) {
1661 todelete = MIN(arc_mru_ghost->arcs_size, mru_over);
1662 arc_evict_ghost(arc_mru_ghost, NULL, todelete);
1663 }
1664 }
1665
1666 if ((arc_over = arc_size - arc_c) > 0) {
1667 int64_t tbl_over;
1668
1669 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
1670 int64_t toevict =
1671 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over);
1672 (void) arc_evict(arc_mfu, NULL, toevict, FALSE,
1673 ARC_BUFC_DATA);
1674 arc_over = arc_size - arc_c;
1675 }
1676
1677 if (arc_over > 0 &&
1678 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
1679 int64_t toevict =
1680 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA],
1681 arc_over);
1682 (void) arc_evict(arc_mfu, NULL, toevict, FALSE,
1683 ARC_BUFC_METADATA);
1684 }
1685
1686 tbl_over = arc_size + arc_mru_ghost->arcs_size +
1687 arc_mfu_ghost->arcs_size - arc_c * 2;
1688
1689 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) {
1690 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over);
1691 arc_evict_ghost(arc_mfu_ghost, NULL, todelete);
1692 }
1693 }
1694 }
1695
1696 static void
1697 arc_do_user_evicts(void)
1698 {
1699 mutex_enter(&arc_eviction_mtx);
1700 while (arc_eviction_list != NULL) {
1701 arc_buf_t *buf = arc_eviction_list;
1702 arc_eviction_list = buf->b_next;
1703 rw_enter(&buf->b_lock, RW_WRITER);
1704 buf->b_hdr = NULL;
1705 rw_exit(&buf->b_lock);
1706 mutex_exit(&arc_eviction_mtx);
1707
1708 if (buf->b_efunc != NULL)
1709 VERIFY(buf->b_efunc(buf) == 0);
1710
1711 buf->b_efunc = NULL;
1712 buf->b_private = NULL;
1713 kmem_cache_free(buf_cache, buf);
1714 mutex_enter(&arc_eviction_mtx);
1715 }
1716 mutex_exit(&arc_eviction_mtx);
1717 }
1718
1719 /*
1720 * Flush all *evictable* data from the cache for the given spa.
1721 * NOTE: this will not touch "active" (i.e. referenced) data.
1722 */
1723 void
1724 arc_flush(spa_t *spa)
1725 {
1726 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) {
1727 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA);
1728 if (spa)
1729 break;
1730 }
1731 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) {
1732 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA);
1733 if (spa)
1734 break;
1735 }
1736 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) {
1737 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA);
1738 if (spa)
1739 break;
1740 }
1741 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) {
1742 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA);
1743 if (spa)
1744 break;
1745 }
1746
1747 arc_evict_ghost(arc_mru_ghost, spa, -1);
1748 arc_evict_ghost(arc_mfu_ghost, spa, -1);
1749
1750 mutex_enter(&arc_reclaim_thr_lock);
1751 arc_do_user_evicts();
1752 mutex_exit(&arc_reclaim_thr_lock);
1753 ASSERT(spa || arc_eviction_list == NULL);
1754 }
1755
1756 int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */
1757
1758 void
1759 arc_shrink(void)
1760 {
1761 if (arc_c > arc_c_min) {
1762 uint64_t to_free;
1763
1764 #ifdef _KERNEL
1765 to_free = MAX(arc_c >> arc_shrink_shift, ptob(needfree));
1766 #else
1767 to_free = arc_c >> arc_shrink_shift;
1768 #endif
1769 if (arc_c > arc_c_min + to_free)
1770 atomic_add_64(&arc_c, -to_free);
1771 else
1772 arc_c = arc_c_min;
1773
1774 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
1775 if (arc_c > arc_size)
1776 arc_c = MAX(arc_size, arc_c_min);
1777 if (arc_p > arc_c)
1778 arc_p = (arc_c >> 1);
1779 ASSERT(arc_c >= arc_c_min);
1780 ASSERT((int64_t)arc_p >= 0);
1781 }
1782
1783 if (arc_size > arc_c)
1784 arc_adjust();
1785 }
1786
1787 static int
1788 arc_reclaim_needed(void)
1789 {
1790 uint64_t extra;
1791
1792 #ifdef _KERNEL
1793
1794 if (needfree)
1795 return (1);
1796
1797 /*
1798 * take 'desfree' extra pages, so we reclaim sooner, rather than later
1799 */
1800 extra = desfree;
1801
1802 /*
1803 * check that we're out of range of the pageout scanner. It starts to
1804 * schedule paging if freemem is less than lotsfree and needfree.
1805 * lotsfree is the high-water mark for pageout, and needfree is the
1806 * number of needed free pages. We add extra pages here to make sure
1807 * the scanner doesn't start up while we're freeing memory.
1808 */
1809 if (freemem < lotsfree + needfree + extra)
1810 return (1);
1811
1812 /*
1813 * check to make sure that swapfs has enough space so that anon
1814 * reservations can still succeed. anon_resvmem() checks that the
1815 * availrmem is greater than swapfs_minfree, and the number of reserved
1816 * swap pages. We also add a bit of extra here just to prevent
1817 * circumstances from getting really dire.
1818 */
1819 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
1820 return (1);
1821
1822 #if defined(__i386)
1823 /*
1824 * If we're on an i386 platform, it's possible that we'll exhaust the
1825 * kernel heap space before we ever run out of available physical
1826 * memory. Most checks of the size of the heap_area compare against
1827 * tune.t_minarmem, which is the minimum available real memory that we
1828 * can have in the system. However, this is generally fixed at 25 pages
1829 * which is so low that it's useless. In this comparison, we seek to
1830 * calculate the total heap-size, and reclaim if more than 3/4ths of the
1831 * heap is allocated. (Or, in the calculation, if less than 1/4th is
1832 * free)
1833 */
1834 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
1835 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
1836 return (1);
1837 #endif
1838
1839 #else
1840 if (spa_get_random(100) == 0)
1841 return (1);
1842 #endif
1843 return (0);
1844 }
1845
1846 static void
1847 arc_kmem_reap_now(arc_reclaim_strategy_t strat)
1848 {
1849 size_t i;
1850 kmem_cache_t *prev_cache = NULL;
1851 kmem_cache_t *prev_data_cache = NULL;
1852 extern kmem_cache_t *zio_buf_cache[];
1853 extern kmem_cache_t *zio_data_buf_cache[];
1854
1855 #ifdef _KERNEL
1856 if (arc_meta_used >= arc_meta_limit) {
1857 /*
1858 * We are exceeding our meta-data cache limit.
1859 * Purge some DNLC entries to release holds on meta-data.
1860 */
1861 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
1862 }
1863 #if defined(__i386)
1864 /*
1865 * Reclaim unused memory from all kmem caches.
1866 */
1867 kmem_reap();
1868 #endif
1869 #endif
1870
1871 /*
1872 * An aggressive reclamation will shrink the cache size as well as
1873 * reap free buffers from the arc kmem caches.
1874 */
1875 if (strat == ARC_RECLAIM_AGGR)
1876 arc_shrink();
1877
1878 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
1879 if (zio_buf_cache[i] != prev_cache) {
1880 prev_cache = zio_buf_cache[i];
1881 kmem_cache_reap_now(zio_buf_cache[i]);
1882 }
1883 if (zio_data_buf_cache[i] != prev_data_cache) {
1884 prev_data_cache = zio_data_buf_cache[i];
1885 kmem_cache_reap_now(zio_data_buf_cache[i]);
1886 }
1887 }
1888 kmem_cache_reap_now(buf_cache);
1889 kmem_cache_reap_now(hdr_cache);
1890 }
1891
1892 static void
1893 arc_reclaim_thread(void)
1894 {
1895 clock_t growtime = 0;
1896 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
1897 callb_cpr_t cpr;
1898
1899 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
1900
1901 mutex_enter(&arc_reclaim_thr_lock);
1902 while (arc_thread_exit == 0) {
1903 if (arc_reclaim_needed()) {
1904
1905 if (arc_no_grow) {
1906 if (last_reclaim == ARC_RECLAIM_CONS) {
1907 last_reclaim = ARC_RECLAIM_AGGR;
1908 } else {
1909 last_reclaim = ARC_RECLAIM_CONS;
1910 }
1911 } else {
1912 arc_no_grow = TRUE;
1913 last_reclaim = ARC_RECLAIM_AGGR;
1914 membar_producer();
1915 }
1916
1917 /* reset the growth delay for every reclaim */
1918 growtime = lbolt + (arc_grow_retry * hz);
1919
1920 arc_kmem_reap_now(last_reclaim);
1921 arc_warm = B_TRUE;
1922
1923 } else if (arc_no_grow && lbolt >= growtime) {
1924 arc_no_grow = FALSE;
1925 }
1926
1927 if (2 * arc_c < arc_size +
1928 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)
1929 arc_adjust();
1930
1931 if (arc_eviction_list != NULL)
1932 arc_do_user_evicts();
1933
1934 /* block until needed, or one second, whichever is shorter */
1935 CALLB_CPR_SAFE_BEGIN(&cpr);
1936 (void) cv_timedwait(&arc_reclaim_thr_cv,
1937 &arc_reclaim_thr_lock, (lbolt + hz));
1938 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
1939 }
1940
1941 arc_thread_exit = 0;
1942 cv_broadcast(&arc_reclaim_thr_cv);
1943 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
1944 thread_exit();
1945 }
1946
1947 /*
1948 * Adapt arc info given the number of bytes we are trying to add and
1949 * the state that we are comming from. This function is only called
1950 * when we are adding new content to the cache.
1951 */
1952 static void
1953 arc_adapt(int bytes, arc_state_t *state)
1954 {
1955 int mult;
1956
1957 if (state == arc_l2c_only)
1958 return;
1959
1960 ASSERT(bytes > 0);
1961 /*
1962 * Adapt the target size of the MRU list:
1963 * - if we just hit in the MRU ghost list, then increase
1964 * the target size of the MRU list.
1965 * - if we just hit in the MFU ghost list, then increase
1966 * the target size of the MFU list by decreasing the
1967 * target size of the MRU list.
1968 */
1969 if (state == arc_mru_ghost) {
1970 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
1971 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
1972
1973 arc_p = MIN(arc_c, arc_p + bytes * mult);
1974 } else if (state == arc_mfu_ghost) {
1975 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
1976 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
1977
1978 arc_p = MAX(0, (int64_t)arc_p - bytes * mult);
1979 }
1980 ASSERT((int64_t)arc_p >= 0);
1981
1982 if (arc_reclaim_needed()) {
1983 cv_signal(&arc_reclaim_thr_cv);
1984 return;
1985 }
1986
1987 if (arc_no_grow)
1988 return;
1989
1990 if (arc_c >= arc_c_max)
1991 return;
1992
1993 /*
1994 * If we're within (2 * maxblocksize) bytes of the target
1995 * cache size, increment the target cache size
1996 */
1997 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
1998 atomic_add_64(&arc_c, (int64_t)bytes);
1999 if (arc_c > arc_c_max)
2000 arc_c = arc_c_max;
2001 else if (state == arc_anon)
2002 atomic_add_64(&arc_p, (int64_t)bytes);
2003 if (arc_p > arc_c)
2004 arc_p = arc_c;
2005 }
2006 ASSERT((int64_t)arc_p >= 0);
2007 }
2008
2009 /*
2010 * Check if the cache has reached its limits and eviction is required
2011 * prior to insert.
2012 */
2013 static int
2014 arc_evict_needed(arc_buf_contents_t type)
2015 {
2016 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2017 return (1);
2018
2019 #ifdef _KERNEL
2020 /*
2021 * If zio data pages are being allocated out of a separate heap segment,
2022 * then enforce that the size of available vmem for this area remains
2023 * above about 1/32nd free.
2024 */
2025 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2026 vmem_size(zio_arena, VMEM_FREE) <
2027 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2028 return (1);
2029 #endif
2030
2031 if (arc_reclaim_needed())
2032 return (1);
2033
2034 return (arc_size > arc_c);
2035 }
2036
2037 /*
2038 * The buffer, supplied as the first argument, needs a data block.
2039 * So, if we are at cache max, determine which cache should be victimized.
2040 * We have the following cases:
2041 *
2042 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2043 * In this situation if we're out of space, but the resident size of the MFU is
2044 * under the limit, victimize the MFU cache to satisfy this insertion request.
2045 *
2046 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2047 * Here, we've used up all of the available space for the MRU, so we need to
2048 * evict from our own cache instead. Evict from the set of resident MRU
2049 * entries.
2050 *
2051 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2052 * c minus p represents the MFU space in the cache, since p is the size of the
2053 * cache that is dedicated to the MRU. In this situation there's still space on
2054 * the MFU side, so the MRU side needs to be victimized.
2055 *
2056 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2057 * MFU's resident set is consuming more space than it has been allotted. In
2058 * this situation, we must victimize our own cache, the MFU, for this insertion.
2059 */
2060 static void
2061 arc_get_data_buf(arc_buf_t *buf)
2062 {
2063 arc_state_t *state = buf->b_hdr->b_state;
2064 uint64_t size = buf->b_hdr->b_size;
2065 arc_buf_contents_t type = buf->b_hdr->b_type;
2066
2067 arc_adapt(size, state);
2068
2069 /*
2070 * We have not yet reached cache maximum size,
2071 * just allocate a new buffer.
2072 */
2073 if (!arc_evict_needed(type)) {
2074 if (type == ARC_BUFC_METADATA) {
2075 buf->b_data = zio_buf_alloc(size);
2076 arc_space_consume(size);
2077 } else {
2078 ASSERT(type == ARC_BUFC_DATA);
2079 buf->b_data = zio_data_buf_alloc(size);
2080 atomic_add_64(&arc_size, size);
2081 }
2082 goto out;
2083 }
2084
2085 /*
2086 * If we are prefetching from the mfu ghost list, this buffer
2087 * will end up on the mru list; so steal space from there.
2088 */
2089 if (state == arc_mfu_ghost)
2090 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2091 else if (state == arc_mru_ghost)
2092 state = arc_mru;
2093
2094 if (state == arc_mru || state == arc_anon) {
2095 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2096 state = (arc_mfu->arcs_lsize[type] > 0 &&
2097 arc_p > mru_used) ? arc_mfu : arc_mru;
2098 } else {
2099 /* MFU cases */
2100 uint64_t mfu_space = arc_c - arc_p;
2101 state = (arc_mru->arcs_lsize[type] > 0 &&
2102 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2103 }
2104 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) {
2105 if (type == ARC_BUFC_METADATA) {
2106 buf->b_data = zio_buf_alloc(size);
2107 arc_space_consume(size);
2108 } else {
2109 ASSERT(type == ARC_BUFC_DATA);
2110 buf->b_data = zio_data_buf_alloc(size);
2111 atomic_add_64(&arc_size, size);
2112 }
2113 ARCSTAT_BUMP(arcstat_recycle_miss);
2114 }
2115 ASSERT(buf->b_data != NULL);
2116 out:
2117 /*
2118 * Update the state size. Note that ghost states have a
2119 * "ghost size" and so don't need to be updated.
2120 */
2121 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2122 arc_buf_hdr_t *hdr = buf->b_hdr;
2123
2124 atomic_add_64(&hdr->b_state->arcs_size, size);
2125 if (list_link_active(&hdr->b_arc_node)) {
2126 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2127 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2128 }
2129 /*
2130 * If we are growing the cache, and we are adding anonymous
2131 * data, and we have outgrown arc_p, update arc_p
2132 */
2133 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2134 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2135 arc_p = MIN(arc_c, arc_p + size);
2136 }
2137 }
2138
2139 /*
2140 * This routine is called whenever a buffer is accessed.
2141 * NOTE: the hash lock is dropped in this function.
2142 */
2143 static void
2144 arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2145 {
2146 ASSERT(MUTEX_HELD(hash_lock));
2147
2148 if (buf->b_state == arc_anon) {
2149 /*
2150 * This buffer is not in the cache, and does not
2151 * appear in our "ghost" list. Add the new buffer
2152 * to the MRU state.
2153 */
2154
2155 ASSERT(buf->b_arc_access == 0);
2156 buf->b_arc_access = lbolt;
2157 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2158 arc_change_state(arc_mru, buf, hash_lock);
2159
2160 } else if (buf->b_state == arc_mru) {
2161 /*
2162 * If this buffer is here because of a prefetch, then either:
2163 * - clear the flag if this is a "referencing" read
2164 * (any subsequent access will bump this into the MFU state).
2165 * or
2166 * - move the buffer to the head of the list if this is
2167 * another prefetch (to make it less likely to be evicted).
2168 */
2169 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2170 if (refcount_count(&buf->b_refcnt) == 0) {
2171 ASSERT(list_link_active(&buf->b_arc_node));
2172 } else {
2173 buf->b_flags &= ~ARC_PREFETCH;
2174 ARCSTAT_BUMP(arcstat_mru_hits);
2175 }
2176 buf->b_arc_access = lbolt;
2177 return;
2178 }
2179
2180 /*
2181 * This buffer has been "accessed" only once so far,
2182 * but it is still in the cache. Move it to the MFU
2183 * state.
2184 */
2185 if (lbolt > buf->b_arc_access + ARC_MINTIME) {
2186 /*
2187 * More than 125ms have passed since we
2188 * instantiated this buffer. Move it to the
2189 * most frequently used state.
2190 */
2191 buf->b_arc_access = lbolt;
2192 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2193 arc_change_state(arc_mfu, buf, hash_lock);
2194 }
2195 ARCSTAT_BUMP(arcstat_mru_hits);
2196 } else if (buf->b_state == arc_mru_ghost) {
2197 arc_state_t *new_state;
2198 /*
2199 * This buffer has been "accessed" recently, but
2200 * was evicted from the cache. Move it to the
2201 * MFU state.
2202 */
2203
2204 if (buf->b_flags & ARC_PREFETCH) {
2205 new_state = arc_mru;
2206 if (refcount_count(&buf->b_refcnt) > 0)
2207 buf->b_flags &= ~ARC_PREFETCH;
2208 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2209 } else {
2210 new_state = arc_mfu;
2211 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2212 }
2213
2214 buf->b_arc_access = lbolt;
2215 arc_change_state(new_state, buf, hash_lock);
2216
2217 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2218 } else if (buf->b_state == arc_mfu) {
2219 /*
2220 * This buffer has been accessed more than once and is
2221 * still in the cache. Keep it in the MFU state.
2222 *
2223 * NOTE: an add_reference() that occurred when we did
2224 * the arc_read() will have kicked this off the list.
2225 * If it was a prefetch, we will explicitly move it to
2226 * the head of the list now.
2227 */
2228 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2229 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2230 ASSERT(list_link_active(&buf->b_arc_node));
2231 }
2232 ARCSTAT_BUMP(arcstat_mfu_hits);
2233 buf->b_arc_access = lbolt;
2234 } else if (buf->b_state == arc_mfu_ghost) {
2235 arc_state_t *new_state = arc_mfu;
2236 /*
2237 * This buffer has been accessed more than once but has
2238 * been evicted from the cache. Move it back to the
2239 * MFU state.
2240 */
2241
2242 if (buf->b_flags & ARC_PREFETCH) {
2243 /*
2244 * This is a prefetch access...
2245 * move this block back to the MRU state.
2246 */
2247 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0);
2248 new_state = arc_mru;
2249 }
2250
2251 buf->b_arc_access = lbolt;
2252 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2253 arc_change_state(new_state, buf, hash_lock);
2254
2255 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2256 } else if (buf->b_state == arc_l2c_only) {
2257 /*
2258 * This buffer is on the 2nd Level ARC.
2259 */
2260
2261 buf->b_arc_access = lbolt;
2262 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2263 arc_change_state(arc_mfu, buf, hash_lock);
2264 } else {
2265 ASSERT(!"invalid arc state");
2266 }
2267 }
2268
2269 /* a generic arc_done_func_t which you can use */
2270 /* ARGSUSED */
2271 void
2272 arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2273 {
2274 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2275 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2276 }
2277
2278 /* a generic arc_done_func_t */
2279 void
2280 arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2281 {
2282 arc_buf_t **bufp = arg;
2283 if (zio && zio->io_error) {
2284 VERIFY(arc_buf_remove_ref(buf, arg) == 1);
2285 *bufp = NULL;
2286 } else {
2287 *bufp = buf;
2288 }
2289 }
2290
2291 static void
2292 arc_read_done(zio_t *zio)
2293 {
2294 arc_buf_hdr_t *hdr, *found;
2295 arc_buf_t *buf;
2296 arc_buf_t *abuf; /* buffer we're assigning to callback */
2297 kmutex_t *hash_lock;
2298 arc_callback_t *callback_list, *acb;
2299 int freeable = FALSE;
2300
2301 buf = zio->io_private;
2302 hdr = buf->b_hdr;
2303
2304 /*
2305 * The hdr was inserted into hash-table and removed from lists
2306 * prior to starting I/O. We should find this header, since
2307 * it's in the hash table, and it should be legit since it's
2308 * not possible to evict it during the I/O. The only possible
2309 * reason for it not to be found is if we were freed during the
2310 * read.
2311 */
2312 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth,
2313 &hash_lock);
2314
2315 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2316 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2317 (found == hdr && HDR_L2_READING(hdr)));
2318
2319 hdr->b_flags &= ~ARC_L2_EVICTED;
2320 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2321 hdr->b_flags &= ~ARC_L2CACHE;
2322
2323 /* byteswap if necessary */
2324 callback_list = hdr->b_acb;
2325 ASSERT(callback_list != NULL);
2326 if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
2327 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2328 byteswap_uint64_array :
2329 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap;
2330 func(buf->b_data, hdr->b_size);
2331 }
2332
2333 arc_cksum_compute(buf, B_FALSE);
2334
2335 /* create copies of the data buffer for the callers */
2336 abuf = buf;
2337 for (acb = callback_list; acb; acb = acb->acb_next) {
2338 if (acb->acb_done) {
2339 if (abuf == NULL)
2340 abuf = arc_buf_clone(buf);
2341 acb->acb_buf = abuf;
2342 abuf = NULL;
2343 }
2344 }
2345 hdr->b_acb = NULL;
2346 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
2347 ASSERT(!HDR_BUF_AVAILABLE(hdr));
2348 if (abuf == buf)
2349 hdr->b_flags |= ARC_BUF_AVAILABLE;
2350
2351 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
2352
2353 if (zio->io_error != 0) {
2354 hdr->b_flags |= ARC_IO_ERROR;
2355 if (hdr->b_state != arc_anon)
2356 arc_change_state(arc_anon, hdr, hash_lock);
2357 if (HDR_IN_HASH_TABLE(hdr))
2358 buf_hash_remove(hdr);
2359 freeable = refcount_is_zero(&hdr->b_refcnt);
2360 }
2361
2362 /*
2363 * Broadcast before we drop the hash_lock to avoid the possibility
2364 * that the hdr (and hence the cv) might be freed before we get to
2365 * the cv_broadcast().
2366 */
2367 cv_broadcast(&hdr->b_cv);
2368
2369 if (hash_lock) {
2370 /*
2371 * Only call arc_access on anonymous buffers. This is because
2372 * if we've issued an I/O for an evicted buffer, we've already
2373 * called arc_access (to prevent any simultaneous readers from
2374 * getting confused).
2375 */
2376 if (zio->io_error == 0 && hdr->b_state == arc_anon)
2377 arc_access(hdr, hash_lock);
2378 mutex_exit(hash_lock);
2379 } else {
2380 /*
2381 * This block was freed while we waited for the read to
2382 * complete. It has been removed from the hash table and
2383 * moved to the anonymous state (so that it won't show up
2384 * in the cache).
2385 */
2386 ASSERT3P(hdr->b_state, ==, arc_anon);
2387 freeable = refcount_is_zero(&hdr->b_refcnt);
2388 }
2389
2390 /* execute each callback and free its structure */
2391 while ((acb = callback_list) != NULL) {
2392 if (acb->acb_done)
2393 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
2394
2395 if (acb->acb_zio_dummy != NULL) {
2396 acb->acb_zio_dummy->io_error = zio->io_error;
2397 zio_nowait(acb->acb_zio_dummy);
2398 }
2399
2400 callback_list = acb->acb_next;
2401 kmem_free(acb, sizeof (arc_callback_t));
2402 }
2403
2404 if (freeable)
2405 arc_hdr_destroy(hdr);
2406 }
2407
2408 /*
2409 * "Read" the block block at the specified DVA (in bp) via the
2410 * cache. If the block is found in the cache, invoke the provided
2411 * callback immediately and return. Note that the `zio' parameter
2412 * in the callback will be NULL in this case, since no IO was
2413 * required. If the block is not in the cache pass the read request
2414 * on to the spa with a substitute callback function, so that the
2415 * requested block will be added to the cache.
2416 *
2417 * If a read request arrives for a block that has a read in-progress,
2418 * either wait for the in-progress read to complete (and return the
2419 * results); or, if this is a read with a "done" func, add a record
2420 * to the read to invoke the "done" func when the read completes,
2421 * and return; or just return.
2422 *
2423 * arc_read_done() will invoke all the requested "done" functions
2424 * for readers of this block.
2425 *
2426 * Normal callers should use arc_read and pass the arc buffer and offset
2427 * for the bp. But if you know you don't need locking, you can use
2428 * arc_read_bp.
2429 */
2430 int
2431 arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf,
2432 arc_done_func_t *done, void *private, int priority, int zio_flags,
2433 uint32_t *arc_flags, const zbookmark_t *zb)
2434 {
2435 int err;
2436 arc_buf_hdr_t *hdr = pbuf->b_hdr;
2437
2438 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt));
2439 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size);
2440 rw_enter(&pbuf->b_lock, RW_READER);
2441
2442 err = arc_read_nolock(pio, spa, bp, done, private, priority,
2443 zio_flags, arc_flags, zb);
2444
2445 ASSERT3P(hdr, ==, pbuf->b_hdr);
2446 rw_exit(&pbuf->b_lock);
2447 return (err);
2448 }
2449
2450 int
2451 arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp,
2452 arc_done_func_t *done, void *private, int priority, int zio_flags,
2453 uint32_t *arc_flags, const zbookmark_t *zb)
2454 {
2455 arc_buf_hdr_t *hdr;
2456 arc_buf_t *buf;
2457 kmutex_t *hash_lock;
2458 zio_t *rzio;
2459
2460 top:
2461 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
2462 if (hdr && hdr->b_datacnt > 0) {
2463
2464 *arc_flags |= ARC_CACHED;
2465
2466 if (HDR_IO_IN_PROGRESS(hdr)) {
2467
2468 if (*arc_flags & ARC_WAIT) {
2469 cv_wait(&hdr->b_cv, hash_lock);
2470 mutex_exit(hash_lock);
2471 goto top;
2472 }
2473 ASSERT(*arc_flags & ARC_NOWAIT);
2474
2475 if (done) {
2476 arc_callback_t *acb = NULL;
2477
2478 acb = kmem_zalloc(sizeof (arc_callback_t),
2479 KM_SLEEP);
2480 acb->acb_done = done;
2481 acb->acb_private = private;
2482 if (pio != NULL)
2483 acb->acb_zio_dummy = zio_null(pio,
2484 spa, NULL, NULL, zio_flags);
2485
2486 ASSERT(acb->acb_done != NULL);
2487 acb->acb_next = hdr->b_acb;
2488 hdr->b_acb = acb;
2489 add_reference(hdr, hash_lock, private);
2490 mutex_exit(hash_lock);
2491 return (0);
2492 }
2493 mutex_exit(hash_lock);
2494 return (0);
2495 }
2496
2497 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2498
2499 if (done) {
2500 add_reference(hdr, hash_lock, private);
2501 /*
2502 * If this block is already in use, create a new
2503 * copy of the data so that we will be guaranteed
2504 * that arc_release() will always succeed.
2505 */
2506 buf = hdr->b_buf;
2507 ASSERT(buf);
2508 ASSERT(buf->b_data);
2509 if (HDR_BUF_AVAILABLE(hdr)) {
2510 ASSERT(buf->b_efunc == NULL);
2511 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2512 } else {
2513 buf = arc_buf_clone(buf);
2514 }
2515 } else if (*arc_flags & ARC_PREFETCH &&
2516 refcount_count(&hdr->b_refcnt) == 0) {
2517 hdr->b_flags |= ARC_PREFETCH;
2518 }
2519 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2520 arc_access(hdr, hash_lock);
2521 if (*arc_flags & ARC_L2CACHE)
2522 hdr->b_flags |= ARC_L2CACHE;
2523 mutex_exit(hash_lock);
2524 ARCSTAT_BUMP(arcstat_hits);
2525 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2526 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2527 data, metadata, hits);
2528
2529 if (done)
2530 done(NULL, buf, private);
2531 } else {
2532 uint64_t size = BP_GET_LSIZE(bp);
2533 arc_callback_t *acb;
2534 vdev_t *vd = NULL;
2535 daddr_t addr;
2536
2537 if (hdr == NULL) {
2538 /* this block is not in the cache */
2539 arc_buf_hdr_t *exists;
2540 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
2541 buf = arc_buf_alloc(spa, size, private, type);
2542 hdr = buf->b_hdr;
2543 hdr->b_dva = *BP_IDENTITY(bp);
2544 hdr->b_birth = bp->blk_birth;
2545 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
2546 exists = buf_hash_insert(hdr, &hash_lock);
2547 if (exists) {
2548 /* somebody beat us to the hash insert */
2549 mutex_exit(hash_lock);
2550 bzero(&hdr->b_dva, sizeof (dva_t));
2551 hdr->b_birth = 0;
2552 hdr->b_cksum0 = 0;
2553 (void) arc_buf_remove_ref(buf, private);
2554 goto top; /* restart the IO request */
2555 }
2556 /* if this is a prefetch, we don't have a reference */
2557 if (*arc_flags & ARC_PREFETCH) {
2558 (void) remove_reference(hdr, hash_lock,
2559 private);
2560 hdr->b_flags |= ARC_PREFETCH;
2561 }
2562 if (*arc_flags & ARC_L2CACHE)
2563 hdr->b_flags |= ARC_L2CACHE;
2564 if (BP_GET_LEVEL(bp) > 0)
2565 hdr->b_flags |= ARC_INDIRECT;
2566 } else {
2567 /* this block is in the ghost cache */
2568 ASSERT(GHOST_STATE(hdr->b_state));
2569 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2570 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0);
2571 ASSERT(hdr->b_buf == NULL);
2572
2573 /* if this is a prefetch, we don't have a reference */
2574 if (*arc_flags & ARC_PREFETCH)
2575 hdr->b_flags |= ARC_PREFETCH;
2576 else
2577 add_reference(hdr, hash_lock, private);
2578 if (*arc_flags & ARC_L2CACHE)
2579 hdr->b_flags |= ARC_L2CACHE;
2580 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2581 buf->b_hdr = hdr;
2582 buf->b_data = NULL;
2583 buf->b_efunc = NULL;
2584 buf->b_private = NULL;
2585 buf->b_next = NULL;
2586 hdr->b_buf = buf;
2587 arc_get_data_buf(buf);
2588 ASSERT(hdr->b_datacnt == 0);
2589 hdr->b_datacnt = 1;
2590
2591 }
2592
2593 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
2594 acb->acb_done = done;
2595 acb->acb_private = private;
2596
2597 ASSERT(hdr->b_acb == NULL);
2598 hdr->b_acb = acb;
2599 hdr->b_flags |= ARC_IO_IN_PROGRESS;
2600
2601 /*
2602 * If the buffer has been evicted, migrate it to a present state
2603 * before issuing the I/O. Once we drop the hash-table lock,
2604 * the header will be marked as I/O in progress and have an
2605 * attached buffer. At this point, anybody who finds this
2606 * buffer ought to notice that it's legit but has a pending I/O.
2607 */
2608
2609 if (GHOST_STATE(hdr->b_state))
2610 arc_access(hdr, hash_lock);
2611
2612 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
2613 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
2614 addr = hdr->b_l2hdr->b_daddr;
2615 /*
2616 * Lock out device removal.
2617 */
2618 if (vdev_is_dead(vd) ||
2619 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
2620 vd = NULL;
2621 }
2622
2623 mutex_exit(hash_lock);
2624
2625 ASSERT3U(hdr->b_size, ==, size);
2626 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size,
2627 zbookmark_t *, zb);
2628 ARCSTAT_BUMP(arcstat_misses);
2629 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
2630 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
2631 data, metadata, misses);
2632
2633 if (vd != NULL) {
2634 /*
2635 * Read from the L2ARC if the following are true:
2636 * 1. The L2ARC vdev was previously cached.
2637 * 2. This buffer still has L2ARC metadata.
2638 * 3. This buffer isn't currently writing to the L2ARC.
2639 * 4. The L2ARC entry wasn't evicted, which may
2640 * also have invalidated the vdev.
2641 */
2642 if (hdr->b_l2hdr != NULL &&
2643 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) {
2644 l2arc_read_callback_t *cb;
2645
2646 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
2647 ARCSTAT_BUMP(arcstat_l2_hits);
2648
2649 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
2650 KM_SLEEP);
2651 cb->l2rcb_buf = buf;
2652 cb->l2rcb_spa = spa;
2653 cb->l2rcb_bp = *bp;
2654 cb->l2rcb_zb = *zb;
2655 cb->l2rcb_flags = zio_flags;
2656
2657 /*
2658 * l2arc read. The SCL_L2ARC lock will be
2659 * released by l2arc_read_done().
2660 */
2661 rzio = zio_read_phys(pio, vd, addr, size,
2662 buf->b_data, ZIO_CHECKSUM_OFF,
2663 l2arc_read_done, cb, priority, zio_flags |
2664 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
2665 ZIO_FLAG_DONT_PROPAGATE |
2666 ZIO_FLAG_DONT_RETRY, B_FALSE);
2667 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
2668 zio_t *, rzio);
2669
2670 if (*arc_flags & ARC_NOWAIT) {
2671 zio_nowait(rzio);
2672 return (0);
2673 }
2674
2675 ASSERT(*arc_flags & ARC_WAIT);
2676 if (zio_wait(rzio) == 0)
2677 return (0);
2678
2679 /* l2arc read error; goto zio_read() */
2680 } else {
2681 DTRACE_PROBE1(l2arc__miss,
2682 arc_buf_hdr_t *, hdr);
2683 ARCSTAT_BUMP(arcstat_l2_misses);
2684 if (HDR_L2_WRITING(hdr))
2685 ARCSTAT_BUMP(arcstat_l2_rw_clash);
2686 spa_config_exit(spa, SCL_L2ARC, vd);
2687 }
2688 }
2689
2690 rzio = zio_read(pio, spa, bp, buf->b_data, size,
2691 arc_read_done, buf, priority, zio_flags, zb);
2692
2693 if (*arc_flags & ARC_WAIT)
2694 return (zio_wait(rzio));
2695
2696 ASSERT(*arc_flags & ARC_NOWAIT);
2697 zio_nowait(rzio);
2698 }
2699 return (0);
2700 }
2701
2702 /*
2703 * arc_read() variant to support pool traversal. If the block is already
2704 * in the ARC, make a copy of it; otherwise, the caller will do the I/O.
2705 * The idea is that we don't want pool traversal filling up memory, but
2706 * if the ARC already has the data anyway, we shouldn't pay for the I/O.
2707 */
2708 int
2709 arc_tryread(spa_t *spa, blkptr_t *bp, void *data)
2710 {
2711 arc_buf_hdr_t *hdr;
2712 kmutex_t *hash_mtx;
2713 int rc = 0;
2714
2715 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx);
2716
2717 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) {
2718 arc_buf_t *buf = hdr->b_buf;
2719
2720 ASSERT(buf);
2721 while (buf->b_data == NULL) {
2722 buf = buf->b_next;
2723 ASSERT(buf);
2724 }
2725 bcopy(buf->b_data, data, hdr->b_size);
2726 } else {
2727 rc = ENOENT;
2728 }
2729
2730 if (hash_mtx)
2731 mutex_exit(hash_mtx);
2732
2733 return (rc);
2734 }
2735
2736 void
2737 arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
2738 {
2739 ASSERT(buf->b_hdr != NULL);
2740 ASSERT(buf->b_hdr->b_state != arc_anon);
2741 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
2742 buf->b_efunc = func;
2743 buf->b_private = private;
2744 }
2745
2746 /*
2747 * This is used by the DMU to let the ARC know that a buffer is
2748 * being evicted, so the ARC should clean up. If this arc buf
2749 * is not yet in the evicted state, it will be put there.
2750 */
2751 int
2752 arc_buf_evict(arc_buf_t *buf)
2753 {
2754 arc_buf_hdr_t *hdr;
2755 kmutex_t *hash_lock;
2756 arc_buf_t **bufp;
2757
2758 rw_enter(&buf->b_lock, RW_WRITER);
2759 hdr = buf->b_hdr;
2760 if (hdr == NULL) {
2761 /*
2762 * We are in arc_do_user_evicts().
2763 */
2764 ASSERT(buf->b_data == NULL);
2765 rw_exit(&buf->b_lock);
2766 return (0);
2767 } else if (buf->b_data == NULL) {
2768 arc_buf_t copy = *buf; /* structure assignment */
2769 /*
2770 * We are on the eviction list; process this buffer now
2771 * but let arc_do_user_evicts() do the reaping.
2772 */
2773 buf->b_efunc = NULL;
2774 rw_exit(&buf->b_lock);
2775 VERIFY(copy.b_efunc(&copy) == 0);
2776 return (1);
2777 }
2778 hash_lock = HDR_LOCK(hdr);
2779 mutex_enter(hash_lock);
2780
2781 ASSERT(buf->b_hdr == hdr);
2782 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
2783 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
2784
2785 /*
2786 * Pull this buffer off of the hdr
2787 */
2788 bufp = &hdr->b_buf;
2789 while (*bufp != buf)
2790 bufp = &(*bufp)->b_next;
2791 *bufp = buf->b_next;
2792
2793 ASSERT(buf->b_data != NULL);
2794 arc_buf_destroy(buf, FALSE, FALSE);
2795
2796 if (hdr->b_datacnt == 0) {
2797 arc_state_t *old_state = hdr->b_state;
2798 arc_state_t *evicted_state;
2799
2800 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2801
2802 evicted_state =
2803 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2804
2805 mutex_enter(&old_state->arcs_mtx);
2806 mutex_enter(&evicted_state->arcs_mtx);
2807
2808 arc_change_state(evicted_state, hdr, hash_lock);
2809 ASSERT(HDR_IN_HASH_TABLE(hdr));
2810 hdr->b_flags |= ARC_IN_HASH_TABLE;
2811 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
2812
2813 mutex_exit(&evicted_state->arcs_mtx);
2814 mutex_exit(&old_state->arcs_mtx);
2815 }
2816 mutex_exit(hash_lock);
2817 rw_exit(&buf->b_lock);
2818
2819 VERIFY(buf->b_efunc(buf) == 0);
2820 buf->b_efunc = NULL;
2821 buf->b_private = NULL;
2822 buf->b_hdr = NULL;
2823 kmem_cache_free(buf_cache, buf);
2824 return (1);
2825 }
2826
2827 /*
2828 * Release this buffer from the cache. This must be done
2829 * after a read and prior to modifying the buffer contents.
2830 * If the buffer has more than one reference, we must make
2831 * a new hdr for the buffer.
2832 */
2833 void
2834 arc_release(arc_buf_t *buf, void *tag)
2835 {
2836 arc_buf_hdr_t *hdr;
2837 kmutex_t *hash_lock;
2838 l2arc_buf_hdr_t *l2hdr;
2839 uint64_t buf_size;
2840
2841 rw_enter(&buf->b_lock, RW_WRITER);
2842 hdr = buf->b_hdr;
2843
2844 /* this buffer is not on any list */
2845 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
2846 ASSERT(!(hdr->b_flags & ARC_STORED));
2847
2848 if (hdr->b_state == arc_anon) {
2849 /* this buffer is already released */
2850 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1);
2851 ASSERT(BUF_EMPTY(hdr));
2852 ASSERT(buf->b_efunc == NULL);
2853 arc_buf_thaw(buf);
2854 rw_exit(&buf->b_lock);
2855 return;
2856 }
2857
2858 hash_lock = HDR_LOCK(hdr);
2859 mutex_enter(hash_lock);
2860
2861 l2hdr = hdr->b_l2hdr;
2862 if (l2hdr) {
2863 mutex_enter(&l2arc_buflist_mtx);
2864 hdr->b_l2hdr = NULL;
2865 buf_size = hdr->b_size;
2866 }
2867
2868 /*
2869 * Do we have more than one buf?
2870 */
2871 if (hdr->b_datacnt > 1) {
2872 arc_buf_hdr_t *nhdr;
2873 arc_buf_t **bufp;
2874 uint64_t blksz = hdr->b_size;
2875 spa_t *spa = hdr->b_spa;
2876 arc_buf_contents_t type = hdr->b_type;
2877 uint32_t flags = hdr->b_flags;
2878
2879 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
2880 /*
2881 * Pull the data off of this buf and attach it to
2882 * a new anonymous buf.
2883 */
2884 (void) remove_reference(hdr, hash_lock, tag);
2885 bufp = &hdr->b_buf;
2886 while (*bufp != buf)
2887 bufp = &(*bufp)->b_next;
2888 *bufp = (*bufp)->b_next;
2889 buf->b_next = NULL;
2890
2891 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
2892 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
2893 if (refcount_is_zero(&hdr->b_refcnt)) {
2894 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
2895 ASSERT3U(*size, >=, hdr->b_size);
2896 atomic_add_64(size, -hdr->b_size);
2897 }
2898 hdr->b_datacnt -= 1;
2899 arc_cksum_verify(buf);
2900
2901 mutex_exit(hash_lock);
2902
2903 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
2904 nhdr->b_size = blksz;
2905 nhdr->b_spa = spa;
2906 nhdr->b_type = type;
2907 nhdr->b_buf = buf;
2908 nhdr->b_state = arc_anon;
2909 nhdr->b_arc_access = 0;
2910 nhdr->b_flags = flags & ARC_L2_WRITING;
2911 nhdr->b_l2hdr = NULL;
2912 nhdr->b_datacnt = 1;
2913 nhdr->b_freeze_cksum = NULL;
2914 (void) refcount_add(&nhdr->b_refcnt, tag);
2915 buf->b_hdr = nhdr;
2916 rw_exit(&buf->b_lock);
2917 atomic_add_64(&arc_anon->arcs_size, blksz);
2918 } else {
2919 rw_exit(&buf->b_lock);
2920 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
2921 ASSERT(!list_link_active(&hdr->b_arc_node));
2922 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2923 arc_change_state(arc_anon, hdr, hash_lock);
2924 hdr->b_arc_access = 0;
2925 mutex_exit(hash_lock);
2926
2927 bzero(&hdr->b_dva, sizeof (dva_t));
2928 hdr->b_birth = 0;
2929 hdr->b_cksum0 = 0;
2930 arc_buf_thaw(buf);
2931 }
2932 buf->b_efunc = NULL;
2933 buf->b_private = NULL;
2934
2935 if (l2hdr) {
2936 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
2937 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
2938 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
2939 mutex_exit(&l2arc_buflist_mtx);
2940 }
2941 }
2942
2943 int
2944 arc_released(arc_buf_t *buf)
2945 {
2946 int released;
2947
2948 rw_enter(&buf->b_lock, RW_READER);
2949 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
2950 rw_exit(&buf->b_lock);
2951 return (released);
2952 }
2953
2954 int
2955 arc_has_callback(arc_buf_t *buf)
2956 {
2957 int callback;
2958
2959 rw_enter(&buf->b_lock, RW_READER);
2960 callback = (buf->b_efunc != NULL);
2961 rw_exit(&buf->b_lock);
2962 return (callback);
2963 }
2964
2965 #ifdef ZFS_DEBUG
2966 int
2967 arc_referenced(arc_buf_t *buf)
2968 {
2969 int referenced;
2970
2971 rw_enter(&buf->b_lock, RW_READER);
2972 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
2973 rw_exit(&buf->b_lock);
2974 return (referenced);
2975 }
2976 #endif
2977
2978 static void
2979 arc_write_ready(zio_t *zio)
2980 {
2981 arc_write_callback_t *callback = zio->io_private;
2982 arc_buf_t *buf = callback->awcb_buf;
2983 arc_buf_hdr_t *hdr = buf->b_hdr;
2984
2985 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
2986 callback->awcb_ready(zio, buf, callback->awcb_private);
2987
2988 /*
2989 * If the IO is already in progress, then this is a re-write
2990 * attempt, so we need to thaw and re-compute the cksum.
2991 * It is the responsibility of the callback to handle the
2992 * accounting for any re-write attempt.
2993 */
2994 if (HDR_IO_IN_PROGRESS(hdr)) {
2995 mutex_enter(&hdr->b_freeze_lock);
2996 if (hdr->b_freeze_cksum != NULL) {
2997 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
2998 hdr->b_freeze_cksum = NULL;
2999 }
3000 mutex_exit(&hdr->b_freeze_lock);
3001 }
3002 arc_cksum_compute(buf, B_FALSE);
3003 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3004 }
3005
3006 static void
3007 arc_write_done(zio_t *zio)
3008 {
3009 arc_write_callback_t *callback = zio->io_private;
3010 arc_buf_t *buf = callback->awcb_buf;
3011 arc_buf_hdr_t *hdr = buf->b_hdr;
3012
3013 hdr->b_acb = NULL;
3014
3015 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3016 hdr->b_birth = zio->io_bp->blk_birth;
3017 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3018 /*
3019 * If the block to be written was all-zero, we may have
3020 * compressed it away. In this case no write was performed
3021 * so there will be no dva/birth-date/checksum. The buffer
3022 * must therefor remain anonymous (and uncached).
3023 */
3024 if (!BUF_EMPTY(hdr)) {
3025 arc_buf_hdr_t *exists;
3026 kmutex_t *hash_lock;
3027
3028 arc_cksum_verify(buf);
3029
3030 exists = buf_hash_insert(hdr, &hash_lock);
3031 if (exists) {
3032 /*
3033 * This can only happen if we overwrite for
3034 * sync-to-convergence, because we remove
3035 * buffers from the hash table when we arc_free().
3036 */
3037 ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE);
3038 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig),
3039 BP_IDENTITY(zio->io_bp)));
3040 ASSERT3U(zio->io_bp_orig.blk_birth, ==,
3041 zio->io_bp->blk_birth);
3042
3043 ASSERT(refcount_is_zero(&exists->b_refcnt));
3044 arc_change_state(arc_anon, exists, hash_lock);
3045 mutex_exit(hash_lock);
3046 arc_hdr_destroy(exists);
3047 exists = buf_hash_insert(hdr, &hash_lock);
3048 ASSERT3P(exists, ==, NULL);
3049 }
3050 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3051 /* if it's not anon, we are doing a scrub */
3052 if (hdr->b_state == arc_anon)
3053 arc_access(hdr, hash_lock);
3054 mutex_exit(hash_lock);
3055 } else if (callback->awcb_done == NULL) {
3056 int destroy_hdr;
3057 /*
3058 * This is an anonymous buffer with no user callback,
3059 * destroy it if there are no active references.
3060 */
3061 mutex_enter(&arc_eviction_mtx);
3062 destroy_hdr = refcount_is_zero(&hdr->b_refcnt);
3063 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3064 mutex_exit(&arc_eviction_mtx);
3065 if (destroy_hdr)
3066 arc_hdr_destroy(hdr);
3067 } else {
3068 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3069 }
3070 hdr->b_flags &= ~ARC_STORED;
3071
3072 if (callback->awcb_done) {
3073 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3074 callback->awcb_done(zio, buf, callback->awcb_private);
3075 }
3076
3077 kmem_free(callback, sizeof (arc_write_callback_t));
3078 }
3079
3080 void
3081 write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp)
3082 {
3083 boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata);
3084
3085 /* Determine checksum setting */
3086 if (ismd) {
3087 /*
3088 * Metadata always gets checksummed. If the data
3089 * checksum is multi-bit correctable, and it's not a
3090 * ZBT-style checksum, then it's suitable for metadata
3091 * as well. Otherwise, the metadata checksum defaults
3092 * to fletcher4.
3093 */
3094 if (zio_checksum_table[wp->wp_oschecksum].ci_correctable &&
3095 !zio_checksum_table[wp->wp_oschecksum].ci_zbt)
3096 zp->zp_checksum = wp->wp_oschecksum;
3097 else
3098 zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4;
3099 } else {
3100 zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum,
3101 wp->wp_oschecksum);
3102 }
3103
3104 /* Determine compression setting */
3105 if (ismd) {
3106 /*
3107 * XXX -- we should design a compression algorithm
3108 * that specializes in arrays of bps.
3109 */
3110 zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY :
3111 ZIO_COMPRESS_LZJB;
3112 } else {
3113 zp->zp_compress = zio_compress_select(wp->wp_dncompress,
3114 wp->wp_oscompress);
3115 }
3116
3117 zp->zp_type = wp->wp_type;
3118 zp->zp_level = wp->wp_level;
3119 zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa));
3120 }
3121
3122 zio_t *
3123 arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp,
3124 boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf,
3125 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority,
3126 int zio_flags, const zbookmark_t *zb)
3127 {
3128 arc_buf_hdr_t *hdr = buf->b_hdr;
3129 arc_write_callback_t *callback;
3130 zio_t *zio;
3131 zio_prop_t zp;
3132
3133 ASSERT(ready != NULL);
3134 ASSERT(!HDR_IO_ERROR(hdr));
3135 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3136 ASSERT(hdr->b_acb == 0);
3137 if (l2arc)
3138 hdr->b_flags |= ARC_L2CACHE;
3139 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3140 callback->awcb_ready = ready;
3141 callback->awcb_done = done;
3142 callback->awcb_private = private;
3143 callback->awcb_buf = buf;
3144
3145 write_policy(spa, wp, &zp);
3146 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp,
3147 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3148
3149 return (zio);
3150 }
3151
3152 int
3153 arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
3154 zio_done_func_t *done, void *private, uint32_t arc_flags)
3155 {
3156 arc_buf_hdr_t *ab;
3157 kmutex_t *hash_lock;
3158 zio_t *zio;
3159
3160 /*
3161 * If this buffer is in the cache, release it, so it
3162 * can be re-used.
3163 */
3164 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock);
3165 if (ab != NULL) {
3166 /*
3167 * The checksum of blocks to free is not always
3168 * preserved (eg. on the deadlist). However, if it is
3169 * nonzero, it should match what we have in the cache.
3170 */
3171 ASSERT(bp->blk_cksum.zc_word[0] == 0 ||
3172 bp->blk_cksum.zc_word[0] == ab->b_cksum0 ||
3173 bp->blk_fill == BLK_FILL_ALREADY_FREED);
3174
3175 if (ab->b_state != arc_anon)
3176 arc_change_state(arc_anon, ab, hash_lock);
3177 if (HDR_IO_IN_PROGRESS(ab)) {
3178 /*
3179 * This should only happen when we prefetch.
3180 */
3181 ASSERT(ab->b_flags & ARC_PREFETCH);
3182 ASSERT3U(ab->b_datacnt, ==, 1);
3183 ab->b_flags |= ARC_FREED_IN_READ;
3184 if (HDR_IN_HASH_TABLE(ab))
3185 buf_hash_remove(ab);
3186 ab->b_arc_access = 0;
3187 bzero(&ab->b_dva, sizeof (dva_t));
3188 ab->b_birth = 0;
3189 ab->b_cksum0 = 0;
3190 ab->b_buf->b_efunc = NULL;
3191 ab->b_buf->b_private = NULL;
3192 mutex_exit(hash_lock);
3193 } else if (refcount_is_zero(&ab->b_refcnt)) {
3194 ab->b_flags |= ARC_FREE_IN_PROGRESS;
3195 mutex_exit(hash_lock);
3196 arc_hdr_destroy(ab);
3197 ARCSTAT_BUMP(arcstat_deleted);
3198 } else {
3199 /*
3200 * We still have an active reference on this
3201 * buffer. This can happen, e.g., from
3202 * dbuf_unoverride().
3203 */
3204 ASSERT(!HDR_IN_HASH_TABLE(ab));
3205 ab->b_arc_access = 0;
3206 bzero(&ab->b_dva, sizeof (dva_t));
3207 ab->b_birth = 0;
3208 ab->b_cksum0 = 0;
3209 ab->b_buf->b_efunc = NULL;
3210 ab->b_buf->b_private = NULL;
3211 mutex_exit(hash_lock);
3212 }
3213 }
3214
3215 zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED);
3216
3217 if (arc_flags & ARC_WAIT)
3218 return (zio_wait(zio));
3219
3220 ASSERT(arc_flags & ARC_NOWAIT);
3221 zio_nowait(zio);
3222
3223 return (0);
3224 }
3225
3226 static int
3227 arc_memory_throttle(uint64_t reserve, uint64_t txg)
3228 {
3229 #ifdef _KERNEL
3230 uint64_t inflight_data = arc_anon->arcs_size;
3231 uint64_t available_memory = ptob(freemem);
3232 static uint64_t page_load = 0;
3233 static uint64_t last_txg = 0;
3234
3235 #if defined(__i386)
3236 available_memory =
3237 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3238 #endif
3239 if (available_memory >= zfs_write_limit_max)
3240 return (0);
3241
3242 if (txg > last_txg) {
3243 last_txg = txg;
3244 page_load = 0;
3245 }
3246 /*
3247 * If we are in pageout, we know that memory is already tight,
3248 * the arc is already going to be evicting, so we just want to
3249 * continue to let page writes occur as quickly as possible.
3250 */
3251 if (curproc == proc_pageout) {
3252 if (page_load > MAX(ptob(minfree), available_memory) / 4)
3253 return (ERESTART);
3254 /* Note: reserve is inflated, so we deflate */
3255 page_load += reserve / 8;
3256 return (0);
3257 } else if (page_load > 0 && arc_reclaim_needed()) {
3258 /* memory is low, delay before restarting */
3259 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3260 return (EAGAIN);
3261 }
3262 page_load = 0;
3263
3264 if (arc_size > arc_c_min) {
3265 uint64_t evictable_memory =
3266 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3267 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3268 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3269 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3270 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3271 }
3272
3273 if (inflight_data > available_memory / 4) {
3274 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3275 return (ERESTART);
3276 }
3277 #endif
3278 return (0);
3279 }
3280
3281 void
3282 arc_tempreserve_clear(uint64_t reserve)
3283 {
3284 atomic_add_64(&arc_tempreserve, -reserve);
3285 ASSERT((int64_t)arc_tempreserve >= 0);
3286 }
3287
3288 int
3289 arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3290 {
3291 int error;
3292
3293 #ifdef ZFS_DEBUG
3294 /*
3295 * Once in a while, fail for no reason. Everything should cope.
3296 */
3297 if (spa_get_random(10000) == 0) {
3298 dprintf("forcing random failure\n");
3299 return (ERESTART);
3300 }
3301 #endif
3302 if (reserve > arc_c/4 && !arc_no_grow)
3303 arc_c = MIN(arc_c_max, reserve * 4);
3304 if (reserve > arc_c)
3305 return (ENOMEM);
3306
3307 /*
3308 * Writes will, almost always, require additional memory allocations
3309 * in order to compress/encrypt/etc the data. We therefor need to
3310 * make sure that there is sufficient available memory for this.
3311 */
3312 if (error = arc_memory_throttle(reserve, txg))
3313 return (error);
3314
3315 /*
3316 * Throttle writes when the amount of dirty data in the cache
3317 * gets too large. We try to keep the cache less than half full
3318 * of dirty blocks so that our sync times don't grow too large.
3319 * Note: if two requests come in concurrently, we might let them
3320 * both succeed, when one of them should fail. Not a huge deal.
3321 */
3322 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 &&
3323 arc_anon->arcs_size > arc_c / 4) {
3324 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3325 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3326 arc_tempreserve>>10,
3327 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3328 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3329 reserve>>10, arc_c>>10);
3330 return (ERESTART);
3331 }
3332 atomic_add_64(&arc_tempreserve, reserve);
3333 return (0);
3334 }
3335
3336 void
3337 arc_init(void)
3338 {
3339 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3340 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3341
3342 /* Convert seconds to clock ticks */
3343 arc_min_prefetch_lifespan = 1 * hz;
3344
3345 /* Start out with 1/8 of all memory */
3346 arc_c = physmem * PAGESIZE / 8;
3347
3348 #ifdef _KERNEL
3349 /*
3350 * On architectures where the physical memory can be larger
3351 * than the addressable space (intel in 32-bit mode), we may
3352 * need to limit the cache to 1/8 of VM size.
3353 */
3354 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3355 #endif
3356
3357 /* set min cache to 1/32 of all memory, or 64MB, whichever is more */
3358 arc_c_min = MAX(arc_c / 4, 64<<20);
3359 /* set max to 3/4 of all memory, or all but 1GB, whichever is more */
3360 if (arc_c * 8 >= 1<<30)
3361 arc_c_max = (arc_c * 8) - (1<<30);
3362 else
3363 arc_c_max = arc_c_min;
3364 arc_c_max = MAX(arc_c * 6, arc_c_max);
3365
3366 /*
3367 * Allow the tunables to override our calculations if they are
3368 * reasonable (ie. over 64MB)
3369 */
3370 if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE)
3371 arc_c_max = zfs_arc_max;
3372 if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max)
3373 arc_c_min = zfs_arc_min;
3374
3375 arc_c = arc_c_max;
3376 arc_p = (arc_c >> 1);
3377
3378 /* limit meta-data to 1/4 of the arc capacity */
3379 arc_meta_limit = arc_c_max / 4;
3380
3381 /* Allow the tunable to override if it is reasonable */
3382 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
3383 arc_meta_limit = zfs_arc_meta_limit;
3384
3385 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
3386 arc_c_min = arc_meta_limit / 2;
3387
3388 /* if kmem_flags are set, lets try to use less memory */
3389 if (kmem_debugging())
3390 arc_c = arc_c / 2;
3391 if (arc_c < arc_c_min)
3392 arc_c = arc_c_min;
3393
3394 arc_anon = &ARC_anon;
3395 arc_mru = &ARC_mru;
3396 arc_mru_ghost = &ARC_mru_ghost;
3397 arc_mfu = &ARC_mfu;
3398 arc_mfu_ghost = &ARC_mfu_ghost;
3399 arc_l2c_only = &ARC_l2c_only;
3400 arc_size = 0;
3401
3402 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3403 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3404 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3405 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3406 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3407 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL);
3408
3409 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
3410 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3411 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
3412 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3413 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
3414 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3415 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
3416 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3417 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
3418 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3419 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
3420 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3421 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
3422 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3423 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
3424 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3425 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
3426 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3427 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
3428 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
3429
3430 buf_init();
3431
3432 arc_thread_exit = 0;
3433 arc_eviction_list = NULL;
3434 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
3435 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
3436
3437 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
3438 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
3439
3440 if (arc_ksp != NULL) {
3441 arc_ksp->ks_data = &arc_stats;
3442 kstat_install(arc_ksp);
3443 }
3444
3445 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
3446 TS_RUN, minclsyspri);
3447
3448 arc_dead = FALSE;
3449 arc_warm = B_FALSE;
3450
3451 if (zfs_write_limit_max == 0)
3452 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
3453 else
3454 zfs_write_limit_shift = 0;
3455 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
3456 }
3457
3458 void
3459 arc_fini(void)
3460 {
3461 mutex_enter(&arc_reclaim_thr_lock);
3462 arc_thread_exit = 1;
3463 while (arc_thread_exit != 0)
3464 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
3465 mutex_exit(&arc_reclaim_thr_lock);
3466
3467 arc_flush(NULL);
3468
3469 arc_dead = TRUE;
3470
3471 if (arc_ksp != NULL) {
3472 kstat_delete(arc_ksp);
3473 arc_ksp = NULL;
3474 }
3475
3476 mutex_destroy(&arc_eviction_mtx);
3477 mutex_destroy(&arc_reclaim_thr_lock);
3478 cv_destroy(&arc_reclaim_thr_cv);
3479
3480 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
3481 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
3482 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
3483 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
3484 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
3485 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
3486 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
3487 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
3488
3489 mutex_destroy(&arc_anon->arcs_mtx);
3490 mutex_destroy(&arc_mru->arcs_mtx);
3491 mutex_destroy(&arc_mru_ghost->arcs_mtx);
3492 mutex_destroy(&arc_mfu->arcs_mtx);
3493 mutex_destroy(&arc_mfu_ghost->arcs_mtx);
3494 mutex_destroy(&arc_l2c_only->arcs_mtx);
3495
3496 mutex_destroy(&zfs_write_limit_lock);
3497
3498 buf_fini();
3499 }
3500
3501 /*
3502 * Level 2 ARC
3503 *
3504 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
3505 * It uses dedicated storage devices to hold cached data, which are populated
3506 * using large infrequent writes. The main role of this cache is to boost
3507 * the performance of random read workloads. The intended L2ARC devices
3508 * include short-stroked disks, solid state disks, and other media with
3509 * substantially faster read latency than disk.
3510 *
3511 * +-----------------------+
3512 * | ARC |
3513 * +-----------------------+
3514 * | ^ ^
3515 * | | |
3516 * l2arc_feed_thread() arc_read()
3517 * | | |
3518 * | l2arc read |
3519 * V | |
3520 * +---------------+ |
3521 * | L2ARC | |
3522 * +---------------+ |
3523 * | ^ |
3524 * l2arc_write() | |
3525 * | | |
3526 * V | |
3527 * +-------+ +-------+
3528 * | vdev | | vdev |
3529 * | cache | | cache |
3530 * +-------+ +-------+
3531 * +=========+ .-----.
3532 * : L2ARC : |-_____-|
3533 * : devices : | Disks |
3534 * +=========+ `-_____-'
3535 *
3536 * Read requests are satisfied from the following sources, in order:
3537 *
3538 * 1) ARC
3539 * 2) vdev cache of L2ARC devices
3540 * 3) L2ARC devices
3541 * 4) vdev cache of disks
3542 * 5) disks
3543 *
3544 * Some L2ARC device types exhibit extremely slow write performance.
3545 * To accommodate for this there are some significant differences between
3546 * the L2ARC and traditional cache design:
3547 *
3548 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
3549 * the ARC behave as usual, freeing buffers and placing headers on ghost
3550 * lists. The ARC does not send buffers to the L2ARC during eviction as
3551 * this would add inflated write latencies for all ARC memory pressure.
3552 *
3553 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
3554 * It does this by periodically scanning buffers from the eviction-end of
3555 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3556 * not already there. It scans until a headroom of buffers is satisfied,
3557 * which itself is a buffer for ARC eviction. The thread that does this is
3558 * l2arc_feed_thread(), illustrated below; example sizes are included to
3559 * provide a better sense of ratio than this diagram:
3560 *
3561 * head --> tail
3562 * +---------------------+----------+
3563 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
3564 * +---------------------+----------+ | o L2ARC eligible
3565 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
3566 * +---------------------+----------+ |
3567 * 15.9 Gbytes ^ 32 Mbytes |
3568 * headroom |
3569 * l2arc_feed_thread()
3570 * |
3571 * l2arc write hand <--[oooo]--'
3572 * | 8 Mbyte
3573 * | write max
3574 * V
3575 * +==============================+
3576 * L2ARC dev |####|#|###|###| |####| ... |
3577 * +==============================+
3578 * 32 Gbytes
3579 *
3580 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
3581 * evicted, then the L2ARC has cached a buffer much sooner than it probably
3582 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
3583 * safe to say that this is an uncommon case, since buffers at the end of
3584 * the ARC lists have moved there due to inactivity.
3585 *
3586 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
3587 * then the L2ARC simply misses copying some buffers. This serves as a
3588 * pressure valve to prevent heavy read workloads from both stalling the ARC
3589 * with waits and clogging the L2ARC with writes. This also helps prevent
3590 * the potential for the L2ARC to churn if it attempts to cache content too
3591 * quickly, such as during backups of the entire pool.
3592 *
3593 * 5. After system boot and before the ARC has filled main memory, there are
3594 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
3595 * lists can remain mostly static. Instead of searching from tail of these
3596 * lists as pictured, the l2arc_feed_thread() will search from the list heads
3597 * for eligible buffers, greatly increasing its chance of finding them.
3598 *
3599 * The L2ARC device write speed is also boosted during this time so that
3600 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
3601 * there are no L2ARC reads, and no fear of degrading read performance
3602 * through increased writes.
3603 *
3604 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
3605 * the vdev queue can aggregate them into larger and fewer writes. Each
3606 * device is written to in a rotor fashion, sweeping writes through
3607 * available space then repeating.
3608 *
3609 * 7. The L2ARC does not store dirty content. It never needs to flush
3610 * write buffers back to disk based storage.
3611 *
3612 * 8. If an ARC buffer is written (and dirtied) which also exists in the
3613 * L2ARC, the now stale L2ARC buffer is immediately dropped.
3614 *
3615 * The performance of the L2ARC can be tweaked by a number of tunables, which
3616 * may be necessary for different workloads:
3617 *
3618 * l2arc_write_max max write bytes per interval
3619 * l2arc_write_boost extra write bytes during device warmup
3620 * l2arc_noprefetch skip caching prefetched buffers
3621 * l2arc_headroom number of max device writes to precache
3622 * l2arc_feed_secs seconds between L2ARC writing
3623 *
3624 * Tunables may be removed or added as future performance improvements are
3625 * integrated, and also may become zpool properties.
3626 */
3627
3628 static void
3629 l2arc_hdr_stat_add(void)
3630 {
3631 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
3632 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
3633 }
3634
3635 static void
3636 l2arc_hdr_stat_remove(void)
3637 {
3638 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
3639 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
3640 }
3641
3642 /*
3643 * Cycle through L2ARC devices. This is how L2ARC load balances.
3644 * If a device is returned, this also returns holding the spa config lock.
3645 */
3646 static l2arc_dev_t *
3647 l2arc_dev_get_next(void)
3648 {
3649 l2arc_dev_t *first, *next = NULL;
3650
3651 /*
3652 * Lock out the removal of spas (spa_namespace_lock), then removal
3653 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
3654 * both locks will be dropped and a spa config lock held instead.
3655 */
3656 mutex_enter(&spa_namespace_lock);
3657 mutex_enter(&l2arc_dev_mtx);
3658
3659 /* if there are no vdevs, there is nothing to do */
3660 if (l2arc_ndev == 0)
3661 goto out;
3662
3663 first = NULL;
3664 next = l2arc_dev_last;
3665 do {
3666 /* loop around the list looking for a non-faulted vdev */
3667 if (next == NULL) {
3668 next = list_head(l2arc_dev_list);
3669 } else {
3670 next = list_next(l2arc_dev_list, next);
3671 if (next == NULL)
3672 next = list_head(l2arc_dev_list);
3673 }
3674
3675 /* if we have come back to the start, bail out */
3676 if (first == NULL)
3677 first = next;
3678 else if (next == first)
3679 break;
3680
3681 } while (vdev_is_dead(next->l2ad_vdev));
3682
3683 /* if we were unable to find any usable vdevs, return NULL */
3684 if (vdev_is_dead(next->l2ad_vdev))
3685 next = NULL;
3686
3687 l2arc_dev_last = next;
3688
3689 out:
3690 mutex_exit(&l2arc_dev_mtx);
3691
3692 /*
3693 * Grab the config lock to prevent the 'next' device from being
3694 * removed while we are writing to it.
3695 */
3696 if (next != NULL)
3697 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
3698 mutex_exit(&spa_namespace_lock);
3699
3700 return (next);
3701 }
3702
3703 /*
3704 * Free buffers that were tagged for destruction.
3705 */
3706 static void
3707 l2arc_do_free_on_write()
3708 {
3709 list_t *buflist;
3710 l2arc_data_free_t *df, *df_prev;
3711
3712 mutex_enter(&l2arc_free_on_write_mtx);
3713 buflist = l2arc_free_on_write;
3714
3715 for (df = list_tail(buflist); df; df = df_prev) {
3716 df_prev = list_prev(buflist, df);
3717 ASSERT(df->l2df_data != NULL);
3718 ASSERT(df->l2df_func != NULL);
3719 df->l2df_func(df->l2df_data, df->l2df_size);
3720 list_remove(buflist, df);
3721 kmem_free(df, sizeof (l2arc_data_free_t));
3722 }
3723
3724 mutex_exit(&l2arc_free_on_write_mtx);
3725 }
3726
3727 /*
3728 * A write to a cache device has completed. Update all headers to allow
3729 * reads from these buffers to begin.
3730 */
3731 static void
3732 l2arc_write_done(zio_t *zio)
3733 {
3734 l2arc_write_callback_t *cb;
3735 l2arc_dev_t *dev;
3736 list_t *buflist;
3737 arc_buf_hdr_t *head, *ab, *ab_prev;
3738 l2arc_buf_hdr_t *abl2;
3739 kmutex_t *hash_lock;
3740
3741 cb = zio->io_private;
3742 ASSERT(cb != NULL);
3743 dev = cb->l2wcb_dev;
3744 ASSERT(dev != NULL);
3745 head = cb->l2wcb_head;
3746 ASSERT(head != NULL);
3747 buflist = dev->l2ad_buflist;
3748 ASSERT(buflist != NULL);
3749 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
3750 l2arc_write_callback_t *, cb);
3751
3752 if (zio->io_error != 0)
3753 ARCSTAT_BUMP(arcstat_l2_writes_error);
3754
3755 mutex_enter(&l2arc_buflist_mtx);
3756
3757 /*
3758 * All writes completed, or an error was hit.
3759 */
3760 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
3761 ab_prev = list_prev(buflist, ab);
3762
3763 hash_lock = HDR_LOCK(ab);
3764 if (!mutex_tryenter(hash_lock)) {
3765 /*
3766 * This buffer misses out. It may be in a stage
3767 * of eviction. Its ARC_L2_WRITING flag will be
3768 * left set, denying reads to this buffer.
3769 */
3770 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
3771 continue;
3772 }
3773
3774 if (zio->io_error != 0) {
3775 /*
3776 * Error - drop L2ARC entry.
3777 */
3778 list_remove(buflist, ab);
3779 abl2 = ab->b_l2hdr;
3780 ab->b_l2hdr = NULL;
3781 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
3782 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
3783 }
3784
3785 /*
3786 * Allow ARC to begin reads to this L2ARC entry.
3787 */
3788 ab->b_flags &= ~ARC_L2_WRITING;
3789
3790 mutex_exit(hash_lock);
3791 }
3792
3793 atomic_inc_64(&l2arc_writes_done);
3794 list_remove(buflist, head);
3795 kmem_cache_free(hdr_cache, head);
3796 mutex_exit(&l2arc_buflist_mtx);
3797
3798 l2arc_do_free_on_write();
3799
3800 kmem_free(cb, sizeof (l2arc_write_callback_t));
3801 }
3802
3803 /*
3804 * A read to a cache device completed. Validate buffer contents before
3805 * handing over to the regular ARC routines.
3806 */
3807 static void
3808 l2arc_read_done(zio_t *zio)
3809 {
3810 l2arc_read_callback_t *cb;
3811 arc_buf_hdr_t *hdr;
3812 arc_buf_t *buf;
3813 kmutex_t *hash_lock;
3814 int equal;
3815
3816 ASSERT(zio->io_vd != NULL);
3817 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
3818
3819 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
3820
3821 cb = zio->io_private;
3822 ASSERT(cb != NULL);
3823 buf = cb->l2rcb_buf;
3824 ASSERT(buf != NULL);
3825 hdr = buf->b_hdr;
3826 ASSERT(hdr != NULL);
3827
3828 hash_lock = HDR_LOCK(hdr);
3829 mutex_enter(hash_lock);
3830
3831 /*
3832 * Check this survived the L2ARC journey.
3833 */
3834 equal = arc_cksum_equal(buf);
3835 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
3836 mutex_exit(hash_lock);
3837 zio->io_private = buf;
3838 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
3839 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
3840 arc_read_done(zio);
3841 } else {
3842 mutex_exit(hash_lock);
3843 /*
3844 * Buffer didn't survive caching. Increment stats and
3845 * reissue to the original storage device.
3846 */
3847 if (zio->io_error != 0) {
3848 ARCSTAT_BUMP(arcstat_l2_io_error);
3849 } else {
3850 zio->io_error = EIO;
3851 }
3852 if (!equal)
3853 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
3854
3855 /*
3856 * If there's no waiter, issue an async i/o to the primary
3857 * storage now. If there *is* a waiter, the caller must
3858 * issue the i/o in a context where it's OK to block.
3859 */
3860 if (zio->io_waiter == NULL)
3861 zio_nowait(zio_read(zio->io_parent,
3862 cb->l2rcb_spa, &cb->l2rcb_bp,
3863 buf->b_data, zio->io_size, arc_read_done, buf,
3864 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
3865 }
3866
3867 kmem_free(cb, sizeof (l2arc_read_callback_t));
3868 }
3869
3870 /*
3871 * This is the list priority from which the L2ARC will search for pages to
3872 * cache. This is used within loops (0..3) to cycle through lists in the
3873 * desired order. This order can have a significant effect on cache
3874 * performance.
3875 *
3876 * Currently the metadata lists are hit first, MFU then MRU, followed by
3877 * the data lists. This function returns a locked list, and also returns
3878 * the lock pointer.
3879 */
3880 static list_t *
3881 l2arc_list_locked(int list_num, kmutex_t **lock)
3882 {
3883 list_t *list;
3884
3885 ASSERT(list_num >= 0 && list_num <= 3);
3886
3887 switch (list_num) {
3888 case 0:
3889 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
3890 *lock = &arc_mfu->arcs_mtx;
3891 break;
3892 case 1:
3893 list = &arc_mru->arcs_list[ARC_BUFC_METADATA];
3894 *lock = &arc_mru->arcs_mtx;
3895 break;
3896 case 2:
3897 list = &arc_mfu->arcs_list[ARC_BUFC_DATA];
3898 *lock = &arc_mfu->arcs_mtx;
3899 break;
3900 case 3:
3901 list = &arc_mru->arcs_list[ARC_BUFC_DATA];
3902 *lock = &arc_mru->arcs_mtx;
3903 break;
3904 }
3905
3906 ASSERT(!(MUTEX_HELD(*lock)));
3907 mutex_enter(*lock);
3908 return (list);
3909 }
3910
3911 /*
3912 * Evict buffers from the device write hand to the distance specified in
3913 * bytes. This distance may span populated buffers, it may span nothing.
3914 * This is clearing a region on the L2ARC device ready for writing.
3915 * If the 'all' boolean is set, every buffer is evicted.
3916 */
3917 static void
3918 l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
3919 {
3920 list_t *buflist;
3921 l2arc_buf_hdr_t *abl2;
3922 arc_buf_hdr_t *ab, *ab_prev;
3923 kmutex_t *hash_lock;
3924 uint64_t taddr;
3925
3926 buflist = dev->l2ad_buflist;
3927
3928 if (buflist == NULL)
3929 return;
3930
3931 if (!all && dev->l2ad_first) {
3932 /*
3933 * This is the first sweep through the device. There is
3934 * nothing to evict.
3935 */
3936 return;
3937 }
3938
3939 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
3940 /*
3941 * When nearing the end of the device, evict to the end
3942 * before the device write hand jumps to the start.
3943 */
3944 taddr = dev->l2ad_end;
3945 } else {
3946 taddr = dev->l2ad_hand + distance;
3947 }
3948 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
3949 uint64_t, taddr, boolean_t, all);
3950
3951 top:
3952 mutex_enter(&l2arc_buflist_mtx);
3953 for (ab = list_tail(buflist); ab; ab = ab_prev) {
3954 ab_prev = list_prev(buflist, ab);
3955
3956 hash_lock = HDR_LOCK(ab);
3957 if (!mutex_tryenter(hash_lock)) {
3958 /*
3959 * Missed the hash lock. Retry.
3960 */
3961 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
3962 mutex_exit(&l2arc_buflist_mtx);
3963 mutex_enter(hash_lock);
3964 mutex_exit(hash_lock);
3965 goto top;
3966 }
3967
3968 if (HDR_L2_WRITE_HEAD(ab)) {
3969 /*
3970 * We hit a write head node. Leave it for
3971 * l2arc_write_done().
3972 */
3973 list_remove(buflist, ab);
3974 mutex_exit(hash_lock);
3975 continue;
3976 }
3977
3978 if (!all && ab->b_l2hdr != NULL &&
3979 (ab->b_l2hdr->b_daddr > taddr ||
3980 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
3981 /*
3982 * We've evicted to the target address,
3983 * or the end of the device.
3984 */
3985 mutex_exit(hash_lock);
3986 break;
3987 }
3988
3989 if (HDR_FREE_IN_PROGRESS(ab)) {
3990 /*
3991 * Already on the path to destruction.
3992 */
3993 mutex_exit(hash_lock);
3994 continue;
3995 }
3996
3997 if (ab->b_state == arc_l2c_only) {
3998 ASSERT(!HDR_L2_READING(ab));
3999 /*
4000 * This doesn't exist in the ARC. Destroy.
4001 * arc_hdr_destroy() will call list_remove()
4002 * and decrement arcstat_l2_size.
4003 */
4004 arc_change_state(arc_anon, ab, hash_lock);
4005 arc_hdr_destroy(ab);
4006 } else {
4007 /*
4008 * Invalidate issued or about to be issued
4009 * reads, since we may be about to write
4010 * over this location.
4011 */
4012 if (HDR_L2_READING(ab)) {
4013 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4014 ab->b_flags |= ARC_L2_EVICTED;
4015 }
4016
4017 /*
4018 * Tell ARC this no longer exists in L2ARC.
4019 */
4020 if (ab->b_l2hdr != NULL) {
4021 abl2 = ab->b_l2hdr;
4022 ab->b_l2hdr = NULL;
4023 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4024 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4025 }
4026 list_remove(buflist, ab);
4027
4028 /*
4029 * This may have been leftover after a
4030 * failed write.
4031 */
4032 ab->b_flags &= ~ARC_L2_WRITING;
4033 }
4034 mutex_exit(hash_lock);
4035 }
4036 mutex_exit(&l2arc_buflist_mtx);
4037
4038 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict));
4039 dev->l2ad_evict = taddr;
4040 }
4041
4042 /*
4043 * Find and write ARC buffers to the L2ARC device.
4044 *
4045 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4046 * for reading until they have completed writing.
4047 */
4048 static void
4049 l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4050 {
4051 arc_buf_hdr_t *ab, *ab_prev, *head;
4052 l2arc_buf_hdr_t *hdrl2;
4053 list_t *list;
4054 uint64_t passed_sz, write_sz, buf_sz, headroom;
4055 void *buf_data;
4056 kmutex_t *hash_lock, *list_lock;
4057 boolean_t have_lock, full;
4058 l2arc_write_callback_t *cb;
4059 zio_t *pio, *wzio;
4060
4061 ASSERT(dev->l2ad_vdev != NULL);
4062
4063 pio = NULL;
4064 write_sz = 0;
4065 full = B_FALSE;
4066 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4067 head->b_flags |= ARC_L2_WRITE_HEAD;
4068
4069 /*
4070 * Copy buffers for L2ARC writing.
4071 */
4072 mutex_enter(&l2arc_buflist_mtx);
4073 for (int try = 0; try <= 3; try++) {
4074 list = l2arc_list_locked(try, &list_lock);
4075 passed_sz = 0;
4076
4077 /*
4078 * L2ARC fast warmup.
4079 *
4080 * Until the ARC is warm and starts to evict, read from the
4081 * head of the ARC lists rather than the tail.
4082 */
4083 headroom = target_sz * l2arc_headroom;
4084 if (arc_warm == B_FALSE)
4085 ab = list_head(list);
4086 else
4087 ab = list_tail(list);
4088
4089 for (; ab; ab = ab_prev) {
4090 if (arc_warm == B_FALSE)
4091 ab_prev = list_next(list, ab);
4092 else
4093 ab_prev = list_prev(list, ab);
4094
4095 hash_lock = HDR_LOCK(ab);
4096 have_lock = MUTEX_HELD(hash_lock);
4097 if (!have_lock && !mutex_tryenter(hash_lock)) {
4098 /*
4099 * Skip this buffer rather than waiting.
4100 */
4101 continue;
4102 }
4103
4104 passed_sz += ab->b_size;
4105 if (passed_sz > headroom) {
4106 /*
4107 * Searched too far.
4108 */
4109 mutex_exit(hash_lock);
4110 break;
4111 }
4112
4113 if (ab->b_spa != spa) {
4114 mutex_exit(hash_lock);
4115 continue;
4116 }
4117
4118 if (ab->b_l2hdr != NULL) {
4119 /*
4120 * Already in L2ARC.
4121 */
4122 mutex_exit(hash_lock);
4123 continue;
4124 }
4125
4126 if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) {
4127 mutex_exit(hash_lock);
4128 continue;
4129 }
4130
4131 if ((write_sz + ab->b_size) > target_sz) {
4132 full = B_TRUE;
4133 mutex_exit(hash_lock);
4134 break;
4135 }
4136
4137 if (ab->b_buf == NULL) {
4138 DTRACE_PROBE1(l2arc__buf__null, void *, ab);
4139 mutex_exit(hash_lock);
4140 continue;
4141 }
4142
4143 if (pio == NULL) {
4144 /*
4145 * Insert a dummy header on the buflist so
4146 * l2arc_write_done() can find where the
4147 * write buffers begin without searching.
4148 */
4149 list_insert_head(dev->l2ad_buflist, head);
4150
4151 cb = kmem_alloc(
4152 sizeof (l2arc_write_callback_t), KM_SLEEP);
4153 cb->l2wcb_dev = dev;
4154 cb->l2wcb_head = head;
4155 pio = zio_root(spa, l2arc_write_done, cb,
4156 ZIO_FLAG_CANFAIL);
4157 }
4158
4159 /*
4160 * Create and add a new L2ARC header.
4161 */
4162 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4163 hdrl2->b_dev = dev;
4164 hdrl2->b_daddr = dev->l2ad_hand;
4165
4166 ab->b_flags |= ARC_L2_WRITING;
4167 ab->b_l2hdr = hdrl2;
4168 list_insert_head(dev->l2ad_buflist, ab);
4169 buf_data = ab->b_buf->b_data;
4170 buf_sz = ab->b_size;
4171
4172 /*
4173 * Compute and store the buffer cksum before
4174 * writing. On debug the cksum is verified first.
4175 */
4176 arc_cksum_verify(ab->b_buf);
4177 arc_cksum_compute(ab->b_buf, B_TRUE);
4178
4179 mutex_exit(hash_lock);
4180
4181 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4182 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4183 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4184 ZIO_FLAG_CANFAIL, B_FALSE);
4185
4186 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4187 zio_t *, wzio);
4188 (void) zio_nowait(wzio);
4189
4190 /*
4191 * Keep the clock hand suitably device-aligned.
4192 */
4193 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4194
4195 write_sz += buf_sz;
4196 dev->l2ad_hand += buf_sz;
4197 }
4198
4199 mutex_exit(list_lock);
4200
4201 if (full == B_TRUE)
4202 break;
4203 }
4204 mutex_exit(&l2arc_buflist_mtx);
4205
4206 if (pio == NULL) {
4207 ASSERT3U(write_sz, ==, 0);
4208 kmem_cache_free(hdr_cache, head);
4209 return;
4210 }
4211
4212 ASSERT3U(write_sz, <=, target_sz);
4213 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4214 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4215 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz);
4216
4217 /*
4218 * Bump device hand to the device start if it is approaching the end.
4219 * l2arc_evict() will already have evicted ahead for this case.
4220 */
4221 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4222 spa_l2cache_space_update(dev->l2ad_vdev, 0,
4223 dev->l2ad_end - dev->l2ad_hand);
4224 dev->l2ad_hand = dev->l2ad_start;
4225 dev->l2ad_evict = dev->l2ad_start;
4226 dev->l2ad_first = B_FALSE;
4227 }
4228
4229 (void) zio_wait(pio);
4230 }
4231
4232 /*
4233 * This thread feeds the L2ARC at regular intervals. This is the beating
4234 * heart of the L2ARC.
4235 */
4236 static void
4237 l2arc_feed_thread(void)
4238 {
4239 callb_cpr_t cpr;
4240 l2arc_dev_t *dev;
4241 spa_t *spa;
4242 uint64_t size;
4243
4244 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4245
4246 mutex_enter(&l2arc_feed_thr_lock);
4247
4248 while (l2arc_thread_exit == 0) {
4249 /*
4250 * Pause for l2arc_feed_secs seconds between writes.
4251 */
4252 CALLB_CPR_SAFE_BEGIN(&cpr);
4253 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
4254 lbolt + (hz * l2arc_feed_secs));
4255 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
4256
4257 /*
4258 * Quick check for L2ARC devices.
4259 */
4260 mutex_enter(&l2arc_dev_mtx);
4261 if (l2arc_ndev == 0) {
4262 mutex_exit(&l2arc_dev_mtx);
4263 continue;
4264 }
4265 mutex_exit(&l2arc_dev_mtx);
4266
4267 /*
4268 * This selects the next l2arc device to write to, and in
4269 * doing so the next spa to feed from: dev->l2ad_spa. This
4270 * will return NULL if there are now no l2arc devices or if
4271 * they are all faulted.
4272 *
4273 * If a device is returned, its spa's config lock is also
4274 * held to prevent device removal. l2arc_dev_get_next()
4275 * will grab and release l2arc_dev_mtx.
4276 */
4277 if ((dev = l2arc_dev_get_next()) == NULL)
4278 continue;
4279
4280 spa = dev->l2ad_spa;
4281 ASSERT(spa != NULL);
4282
4283 /*
4284 * Avoid contributing to memory pressure.
4285 */
4286 if (arc_reclaim_needed()) {
4287 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4288 spa_config_exit(spa, SCL_L2ARC, dev);
4289 continue;
4290 }
4291
4292 ARCSTAT_BUMP(arcstat_l2_feeds);
4293
4294 size = dev->l2ad_write;
4295 if (arc_warm == B_FALSE)
4296 size += dev->l2ad_boost;
4297
4298 /*
4299 * Evict L2ARC buffers that will be overwritten.
4300 */
4301 l2arc_evict(dev, size, B_FALSE);
4302
4303 /*
4304 * Write ARC buffers.
4305 */
4306 l2arc_write_buffers(spa, dev, size);
4307 spa_config_exit(spa, SCL_L2ARC, dev);
4308 }
4309
4310 l2arc_thread_exit = 0;
4311 cv_broadcast(&l2arc_feed_thr_cv);
4312 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
4313 thread_exit();
4314 }
4315
4316 boolean_t
4317 l2arc_vdev_present(vdev_t *vd)
4318 {
4319 l2arc_dev_t *dev;
4320
4321 mutex_enter(&l2arc_dev_mtx);
4322 for (dev = list_head(l2arc_dev_list); dev != NULL;
4323 dev = list_next(l2arc_dev_list, dev)) {
4324 if (dev->l2ad_vdev == vd)
4325 break;
4326 }
4327 mutex_exit(&l2arc_dev_mtx);
4328
4329 return (dev != NULL);
4330 }
4331
4332 /*
4333 * Add a vdev for use by the L2ARC. By this point the spa has already
4334 * validated the vdev and opened it.
4335 */
4336 void
4337 l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end)
4338 {
4339 l2arc_dev_t *adddev;
4340
4341 ASSERT(!l2arc_vdev_present(vd));
4342
4343 /*
4344 * Create a new l2arc device entry.
4345 */
4346 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
4347 adddev->l2ad_spa = spa;
4348 adddev->l2ad_vdev = vd;
4349 adddev->l2ad_write = l2arc_write_max;
4350 adddev->l2ad_boost = l2arc_write_boost;
4351 adddev->l2ad_start = start;
4352 adddev->l2ad_end = end;
4353 adddev->l2ad_hand = adddev->l2ad_start;
4354 adddev->l2ad_evict = adddev->l2ad_start;
4355 adddev->l2ad_first = B_TRUE;
4356 ASSERT3U(adddev->l2ad_write, >, 0);
4357
4358 /*
4359 * This is a list of all ARC buffers that are still valid on the
4360 * device.
4361 */
4362 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
4363 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
4364 offsetof(arc_buf_hdr_t, b_l2node));
4365
4366 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0);
4367
4368 /*
4369 * Add device to global list
4370 */
4371 mutex_enter(&l2arc_dev_mtx);
4372 list_insert_head(l2arc_dev_list, adddev);
4373 atomic_inc_64(&l2arc_ndev);
4374 mutex_exit(&l2arc_dev_mtx);
4375 }
4376
4377 /*
4378 * Remove a vdev from the L2ARC.
4379 */
4380 void
4381 l2arc_remove_vdev(vdev_t *vd)
4382 {
4383 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
4384
4385 /*
4386 * Find the device by vdev
4387 */
4388 mutex_enter(&l2arc_dev_mtx);
4389 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
4390 nextdev = list_next(l2arc_dev_list, dev);
4391 if (vd == dev->l2ad_vdev) {
4392 remdev = dev;
4393 break;
4394 }
4395 }
4396 ASSERT(remdev != NULL);
4397
4398 /*
4399 * Remove device from global list
4400 */
4401 list_remove(l2arc_dev_list, remdev);
4402 l2arc_dev_last = NULL; /* may have been invalidated */
4403 atomic_dec_64(&l2arc_ndev);
4404 mutex_exit(&l2arc_dev_mtx);
4405
4406 /*
4407 * Clear all buflists and ARC references. L2ARC device flush.
4408 */
4409 l2arc_evict(remdev, 0, B_TRUE);
4410 list_destroy(remdev->l2ad_buflist);
4411 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
4412 kmem_free(remdev, sizeof (l2arc_dev_t));
4413 }
4414
4415 void
4416 l2arc_init(void)
4417 {
4418 l2arc_thread_exit = 0;
4419 l2arc_ndev = 0;
4420 l2arc_writes_sent = 0;
4421 l2arc_writes_done = 0;
4422
4423 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4424 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
4425 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
4426 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
4427 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
4428
4429 l2arc_dev_list = &L2ARC_dev_list;
4430 l2arc_free_on_write = &L2ARC_free_on_write;
4431 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
4432 offsetof(l2arc_dev_t, l2ad_node));
4433 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
4434 offsetof(l2arc_data_free_t, l2df_list_node));
4435 }
4436
4437 void
4438 l2arc_fini(void)
4439 {
4440 /*
4441 * This is called from dmu_fini(), which is called from spa_fini();
4442 * Because of this, we can assume that all l2arc devices have
4443 * already been removed when the pools themselves were removed.
4444 */
4445
4446 l2arc_do_free_on_write();
4447
4448 mutex_destroy(&l2arc_feed_thr_lock);
4449 cv_destroy(&l2arc_feed_thr_cv);
4450 mutex_destroy(&l2arc_dev_mtx);
4451 mutex_destroy(&l2arc_buflist_mtx);
4452 mutex_destroy(&l2arc_free_on_write_mtx);
4453
4454 list_destroy(l2arc_dev_list);
4455 list_destroy(l2arc_free_on_write);
4456 }
4457
4458 void
4459 l2arc_start(void)
4460 {
4461 if (!(spa_mode_global & FWRITE))
4462 return;
4463
4464 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
4465 TS_RUN, minclsyspri);
4466 }
4467
4468 void
4469 l2arc_stop(void)
4470 {
4471 if (!(spa_mode_global & FWRITE))
4472 return;
4473
4474 mutex_enter(&l2arc_feed_thr_lock);
4475 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
4476 l2arc_thread_exit = 1;
4477 while (l2arc_thread_exit != 0)
4478 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
4479 mutex_exit(&l2arc_feed_thr_lock);
4480 }