]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dbuf.c
Annotated dprintf as printf-like
[mirror_zfs.git] / module / zfs / dbuf.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2019, Klara Inc.
28 * Copyright (c) 2019, Allan Jude
29 */
30
31 #include <sys/zfs_context.h>
32 #include <sys/arc.h>
33 #include <sys/dmu.h>
34 #include <sys/dmu_send.h>
35 #include <sys/dmu_impl.h>
36 #include <sys/dbuf.h>
37 #include <sys/dmu_objset.h>
38 #include <sys/dsl_dataset.h>
39 #include <sys/dsl_dir.h>
40 #include <sys/dmu_tx.h>
41 #include <sys/spa.h>
42 #include <sys/zio.h>
43 #include <sys/dmu_zfetch.h>
44 #include <sys/sa.h>
45 #include <sys/sa_impl.h>
46 #include <sys/zfeature.h>
47 #include <sys/blkptr.h>
48 #include <sys/range_tree.h>
49 #include <sys/trace_zfs.h>
50 #include <sys/callb.h>
51 #include <sys/abd.h>
52 #include <sys/vdev.h>
53 #include <cityhash.h>
54 #include <sys/spa_impl.h>
55 #include <sys/wmsum.h>
56
57 kstat_t *dbuf_ksp;
58
59 typedef struct dbuf_stats {
60 /*
61 * Various statistics about the size of the dbuf cache.
62 */
63 kstat_named_t cache_count;
64 kstat_named_t cache_size_bytes;
65 kstat_named_t cache_size_bytes_max;
66 /*
67 * Statistics regarding the bounds on the dbuf cache size.
68 */
69 kstat_named_t cache_target_bytes;
70 kstat_named_t cache_lowater_bytes;
71 kstat_named_t cache_hiwater_bytes;
72 /*
73 * Total number of dbuf cache evictions that have occurred.
74 */
75 kstat_named_t cache_total_evicts;
76 /*
77 * The distribution of dbuf levels in the dbuf cache and
78 * the total size of all dbufs at each level.
79 */
80 kstat_named_t cache_levels[DN_MAX_LEVELS];
81 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
82 /*
83 * Statistics about the dbuf hash table.
84 */
85 kstat_named_t hash_hits;
86 kstat_named_t hash_misses;
87 kstat_named_t hash_collisions;
88 kstat_named_t hash_elements;
89 kstat_named_t hash_elements_max;
90 /*
91 * Number of sublists containing more than one dbuf in the dbuf
92 * hash table. Keep track of the longest hash chain.
93 */
94 kstat_named_t hash_chains;
95 kstat_named_t hash_chain_max;
96 /*
97 * Number of times a dbuf_create() discovers that a dbuf was
98 * already created and in the dbuf hash table.
99 */
100 kstat_named_t hash_insert_race;
101 /*
102 * Statistics about the size of the metadata dbuf cache.
103 */
104 kstat_named_t metadata_cache_count;
105 kstat_named_t metadata_cache_size_bytes;
106 kstat_named_t metadata_cache_size_bytes_max;
107 /*
108 * For diagnostic purposes, this is incremented whenever we can't add
109 * something to the metadata cache because it's full, and instead put
110 * the data in the regular dbuf cache.
111 */
112 kstat_named_t metadata_cache_overflow;
113 } dbuf_stats_t;
114
115 dbuf_stats_t dbuf_stats = {
116 { "cache_count", KSTAT_DATA_UINT64 },
117 { "cache_size_bytes", KSTAT_DATA_UINT64 },
118 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
119 { "cache_target_bytes", KSTAT_DATA_UINT64 },
120 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
121 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
122 { "cache_total_evicts", KSTAT_DATA_UINT64 },
123 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
124 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
125 { "hash_hits", KSTAT_DATA_UINT64 },
126 { "hash_misses", KSTAT_DATA_UINT64 },
127 { "hash_collisions", KSTAT_DATA_UINT64 },
128 { "hash_elements", KSTAT_DATA_UINT64 },
129 { "hash_elements_max", KSTAT_DATA_UINT64 },
130 { "hash_chains", KSTAT_DATA_UINT64 },
131 { "hash_chain_max", KSTAT_DATA_UINT64 },
132 { "hash_insert_race", KSTAT_DATA_UINT64 },
133 { "metadata_cache_count", KSTAT_DATA_UINT64 },
134 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
135 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
136 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
137 };
138
139 struct {
140 wmsum_t cache_count;
141 wmsum_t cache_total_evicts;
142 wmsum_t cache_levels[DN_MAX_LEVELS];
143 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
144 wmsum_t hash_hits;
145 wmsum_t hash_misses;
146 wmsum_t hash_collisions;
147 wmsum_t hash_chains;
148 wmsum_t hash_insert_race;
149 wmsum_t metadata_cache_count;
150 wmsum_t metadata_cache_overflow;
151 } dbuf_sums;
152
153 #define DBUF_STAT_INCR(stat, val) \
154 wmsum_add(&dbuf_sums.stat, val);
155 #define DBUF_STAT_DECR(stat, val) \
156 DBUF_STAT_INCR(stat, -(val));
157 #define DBUF_STAT_BUMP(stat) \
158 DBUF_STAT_INCR(stat, 1);
159 #define DBUF_STAT_BUMPDOWN(stat) \
160 DBUF_STAT_INCR(stat, -1);
161 #define DBUF_STAT_MAX(stat, v) { \
162 uint64_t _m; \
163 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
164 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
165 continue; \
166 }
167
168 static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
169 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
170 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
171 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
172
173 extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
174 dmu_buf_evict_func_t *evict_func_sync,
175 dmu_buf_evict_func_t *evict_func_async,
176 dmu_buf_t **clear_on_evict_dbufp);
177
178 /*
179 * Global data structures and functions for the dbuf cache.
180 */
181 static kmem_cache_t *dbuf_kmem_cache;
182 static taskq_t *dbu_evict_taskq;
183
184 static kthread_t *dbuf_cache_evict_thread;
185 static kmutex_t dbuf_evict_lock;
186 static kcondvar_t dbuf_evict_cv;
187 static boolean_t dbuf_evict_thread_exit;
188
189 /*
190 * There are two dbuf caches; each dbuf can only be in one of them at a time.
191 *
192 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
193 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
194 * that represent the metadata that describes filesystems/snapshots/
195 * bookmarks/properties/etc. We only evict from this cache when we export a
196 * pool, to short-circuit as much I/O as possible for all administrative
197 * commands that need the metadata. There is no eviction policy for this
198 * cache, because we try to only include types in it which would occupy a
199 * very small amount of space per object but create a large impact on the
200 * performance of these commands. Instead, after it reaches a maximum size
201 * (which should only happen on very small memory systems with a very large
202 * number of filesystem objects), we stop taking new dbufs into the
203 * metadata cache, instead putting them in the normal dbuf cache.
204 *
205 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
206 * are not currently held but have been recently released. These dbufs
207 * are not eligible for arc eviction until they are aged out of the cache.
208 * Dbufs that are aged out of the cache will be immediately destroyed and
209 * become eligible for arc eviction.
210 *
211 * Dbufs are added to these caches once the last hold is released. If a dbuf is
212 * later accessed and still exists in the dbuf cache, then it will be removed
213 * from the cache and later re-added to the head of the cache.
214 *
215 * If a given dbuf meets the requirements for the metadata cache, it will go
216 * there, otherwise it will be considered for the generic LRU dbuf cache. The
217 * caches and the refcounts tracking their sizes are stored in an array indexed
218 * by those caches' matching enum values (from dbuf_cached_state_t).
219 */
220 typedef struct dbuf_cache {
221 multilist_t cache;
222 zfs_refcount_t size ____cacheline_aligned;
223 } dbuf_cache_t;
224 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
225
226 /* Size limits for the caches */
227 unsigned long dbuf_cache_max_bytes = ULONG_MAX;
228 unsigned long dbuf_metadata_cache_max_bytes = ULONG_MAX;
229
230 /* Set the default sizes of the caches to log2 fraction of arc size */
231 int dbuf_cache_shift = 5;
232 int dbuf_metadata_cache_shift = 6;
233
234 static unsigned long dbuf_cache_target_bytes(void);
235 static unsigned long dbuf_metadata_cache_target_bytes(void);
236
237 /*
238 * The LRU dbuf cache uses a three-stage eviction policy:
239 * - A low water marker designates when the dbuf eviction thread
240 * should stop evicting from the dbuf cache.
241 * - When we reach the maximum size (aka mid water mark), we
242 * signal the eviction thread to run.
243 * - The high water mark indicates when the eviction thread
244 * is unable to keep up with the incoming load and eviction must
245 * happen in the context of the calling thread.
246 *
247 * The dbuf cache:
248 * (max size)
249 * low water mid water hi water
250 * +----------------------------------------+----------+----------+
251 * | | | |
252 * | | | |
253 * | | | |
254 * | | | |
255 * +----------------------------------------+----------+----------+
256 * stop signal evict
257 * evicting eviction directly
258 * thread
259 *
260 * The high and low water marks indicate the operating range for the eviction
261 * thread. The low water mark is, by default, 90% of the total size of the
262 * cache and the high water mark is at 110% (both of these percentages can be
263 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
264 * respectively). The eviction thread will try to ensure that the cache remains
265 * within this range by waking up every second and checking if the cache is
266 * above the low water mark. The thread can also be woken up by callers adding
267 * elements into the cache if the cache is larger than the mid water (i.e max
268 * cache size). Once the eviction thread is woken up and eviction is required,
269 * it will continue evicting buffers until it's able to reduce the cache size
270 * to the low water mark. If the cache size continues to grow and hits the high
271 * water mark, then callers adding elements to the cache will begin to evict
272 * directly from the cache until the cache is no longer above the high water
273 * mark.
274 */
275
276 /*
277 * The percentage above and below the maximum cache size.
278 */
279 uint_t dbuf_cache_hiwater_pct = 10;
280 uint_t dbuf_cache_lowater_pct = 10;
281
282 /* ARGSUSED */
283 static int
284 dbuf_cons(void *vdb, void *unused, int kmflag)
285 {
286 dmu_buf_impl_t *db = vdb;
287 bzero(db, sizeof (dmu_buf_impl_t));
288
289 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
290 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
291 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
292 multilist_link_init(&db->db_cache_link);
293 zfs_refcount_create(&db->db_holds);
294
295 return (0);
296 }
297
298 /* ARGSUSED */
299 static void
300 dbuf_dest(void *vdb, void *unused)
301 {
302 dmu_buf_impl_t *db = vdb;
303 mutex_destroy(&db->db_mtx);
304 rw_destroy(&db->db_rwlock);
305 cv_destroy(&db->db_changed);
306 ASSERT(!multilist_link_active(&db->db_cache_link));
307 zfs_refcount_destroy(&db->db_holds);
308 }
309
310 /*
311 * dbuf hash table routines
312 */
313 static dbuf_hash_table_t dbuf_hash_table;
314
315 /*
316 * We use Cityhash for this. It's fast, and has good hash properties without
317 * requiring any large static buffers.
318 */
319 static uint64_t
320 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
321 {
322 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
323 }
324
325 #define DTRACE_SET_STATE(db, why) \
326 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
327 const char *, why)
328
329 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
330 ((dbuf)->db.db_object == (obj) && \
331 (dbuf)->db_objset == (os) && \
332 (dbuf)->db_level == (level) && \
333 (dbuf)->db_blkid == (blkid))
334
335 dmu_buf_impl_t *
336 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
337 {
338 dbuf_hash_table_t *h = &dbuf_hash_table;
339 uint64_t hv;
340 uint64_t idx;
341 dmu_buf_impl_t *db;
342
343 hv = dbuf_hash(os, obj, level, blkid);
344 idx = hv & h->hash_table_mask;
345
346 mutex_enter(DBUF_HASH_MUTEX(h, idx));
347 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
348 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
349 mutex_enter(&db->db_mtx);
350 if (db->db_state != DB_EVICTING) {
351 mutex_exit(DBUF_HASH_MUTEX(h, idx));
352 return (db);
353 }
354 mutex_exit(&db->db_mtx);
355 }
356 }
357 mutex_exit(DBUF_HASH_MUTEX(h, idx));
358 return (NULL);
359 }
360
361 static dmu_buf_impl_t *
362 dbuf_find_bonus(objset_t *os, uint64_t object)
363 {
364 dnode_t *dn;
365 dmu_buf_impl_t *db = NULL;
366
367 if (dnode_hold(os, object, FTAG, &dn) == 0) {
368 rw_enter(&dn->dn_struct_rwlock, RW_READER);
369 if (dn->dn_bonus != NULL) {
370 db = dn->dn_bonus;
371 mutex_enter(&db->db_mtx);
372 }
373 rw_exit(&dn->dn_struct_rwlock);
374 dnode_rele(dn, FTAG);
375 }
376 return (db);
377 }
378
379 /*
380 * Insert an entry into the hash table. If there is already an element
381 * equal to elem in the hash table, then the already existing element
382 * will be returned and the new element will not be inserted.
383 * Otherwise returns NULL.
384 */
385 static dmu_buf_impl_t *
386 dbuf_hash_insert(dmu_buf_impl_t *db)
387 {
388 dbuf_hash_table_t *h = &dbuf_hash_table;
389 objset_t *os = db->db_objset;
390 uint64_t obj = db->db.db_object;
391 int level = db->db_level;
392 uint64_t blkid, hv, idx;
393 dmu_buf_impl_t *dbf;
394 uint32_t i;
395
396 blkid = db->db_blkid;
397 hv = dbuf_hash(os, obj, level, blkid);
398 idx = hv & h->hash_table_mask;
399
400 mutex_enter(DBUF_HASH_MUTEX(h, idx));
401 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
402 dbf = dbf->db_hash_next, i++) {
403 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
404 mutex_enter(&dbf->db_mtx);
405 if (dbf->db_state != DB_EVICTING) {
406 mutex_exit(DBUF_HASH_MUTEX(h, idx));
407 return (dbf);
408 }
409 mutex_exit(&dbf->db_mtx);
410 }
411 }
412
413 if (i > 0) {
414 DBUF_STAT_BUMP(hash_collisions);
415 if (i == 1)
416 DBUF_STAT_BUMP(hash_chains);
417
418 DBUF_STAT_MAX(hash_chain_max, i);
419 }
420
421 mutex_enter(&db->db_mtx);
422 db->db_hash_next = h->hash_table[idx];
423 h->hash_table[idx] = db;
424 mutex_exit(DBUF_HASH_MUTEX(h, idx));
425 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
426 DBUF_STAT_MAX(hash_elements_max, he);
427
428 return (NULL);
429 }
430
431 /*
432 * This returns whether this dbuf should be stored in the metadata cache, which
433 * is based on whether it's from one of the dnode types that store data related
434 * to traversing dataset hierarchies.
435 */
436 static boolean_t
437 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
438 {
439 DB_DNODE_ENTER(db);
440 dmu_object_type_t type = DB_DNODE(db)->dn_type;
441 DB_DNODE_EXIT(db);
442
443 /* Check if this dbuf is one of the types we care about */
444 if (DMU_OT_IS_METADATA_CACHED(type)) {
445 /* If we hit this, then we set something up wrong in dmu_ot */
446 ASSERT(DMU_OT_IS_METADATA(type));
447
448 /*
449 * Sanity check for small-memory systems: don't allocate too
450 * much memory for this purpose.
451 */
452 if (zfs_refcount_count(
453 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
454 dbuf_metadata_cache_target_bytes()) {
455 DBUF_STAT_BUMP(metadata_cache_overflow);
456 return (B_FALSE);
457 }
458
459 return (B_TRUE);
460 }
461
462 return (B_FALSE);
463 }
464
465 /*
466 * Remove an entry from the hash table. It must be in the EVICTING state.
467 */
468 static void
469 dbuf_hash_remove(dmu_buf_impl_t *db)
470 {
471 dbuf_hash_table_t *h = &dbuf_hash_table;
472 uint64_t hv, idx;
473 dmu_buf_impl_t *dbf, **dbp;
474
475 hv = dbuf_hash(db->db_objset, db->db.db_object,
476 db->db_level, db->db_blkid);
477 idx = hv & h->hash_table_mask;
478
479 /*
480 * We mustn't hold db_mtx to maintain lock ordering:
481 * DBUF_HASH_MUTEX > db_mtx.
482 */
483 ASSERT(zfs_refcount_is_zero(&db->db_holds));
484 ASSERT(db->db_state == DB_EVICTING);
485 ASSERT(!MUTEX_HELD(&db->db_mtx));
486
487 mutex_enter(DBUF_HASH_MUTEX(h, idx));
488 dbp = &h->hash_table[idx];
489 while ((dbf = *dbp) != db) {
490 dbp = &dbf->db_hash_next;
491 ASSERT(dbf != NULL);
492 }
493 *dbp = db->db_hash_next;
494 db->db_hash_next = NULL;
495 if (h->hash_table[idx] &&
496 h->hash_table[idx]->db_hash_next == NULL)
497 DBUF_STAT_BUMPDOWN(hash_chains);
498 mutex_exit(DBUF_HASH_MUTEX(h, idx));
499 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
500 }
501
502 typedef enum {
503 DBVU_EVICTING,
504 DBVU_NOT_EVICTING
505 } dbvu_verify_type_t;
506
507 static void
508 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
509 {
510 #ifdef ZFS_DEBUG
511 int64_t holds;
512
513 if (db->db_user == NULL)
514 return;
515
516 /* Only data blocks support the attachment of user data. */
517 ASSERT(db->db_level == 0);
518
519 /* Clients must resolve a dbuf before attaching user data. */
520 ASSERT(db->db.db_data != NULL);
521 ASSERT3U(db->db_state, ==, DB_CACHED);
522
523 holds = zfs_refcount_count(&db->db_holds);
524 if (verify_type == DBVU_EVICTING) {
525 /*
526 * Immediate eviction occurs when holds == dirtycnt.
527 * For normal eviction buffers, holds is zero on
528 * eviction, except when dbuf_fix_old_data() calls
529 * dbuf_clear_data(). However, the hold count can grow
530 * during eviction even though db_mtx is held (see
531 * dmu_bonus_hold() for an example), so we can only
532 * test the generic invariant that holds >= dirtycnt.
533 */
534 ASSERT3U(holds, >=, db->db_dirtycnt);
535 } else {
536 if (db->db_user_immediate_evict == TRUE)
537 ASSERT3U(holds, >=, db->db_dirtycnt);
538 else
539 ASSERT3U(holds, >, 0);
540 }
541 #endif
542 }
543
544 static void
545 dbuf_evict_user(dmu_buf_impl_t *db)
546 {
547 dmu_buf_user_t *dbu = db->db_user;
548
549 ASSERT(MUTEX_HELD(&db->db_mtx));
550
551 if (dbu == NULL)
552 return;
553
554 dbuf_verify_user(db, DBVU_EVICTING);
555 db->db_user = NULL;
556
557 #ifdef ZFS_DEBUG
558 if (dbu->dbu_clear_on_evict_dbufp != NULL)
559 *dbu->dbu_clear_on_evict_dbufp = NULL;
560 #endif
561
562 /*
563 * There are two eviction callbacks - one that we call synchronously
564 * and one that we invoke via a taskq. The async one is useful for
565 * avoiding lock order reversals and limiting stack depth.
566 *
567 * Note that if we have a sync callback but no async callback,
568 * it's likely that the sync callback will free the structure
569 * containing the dbu. In that case we need to take care to not
570 * dereference dbu after calling the sync evict func.
571 */
572 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
573
574 if (dbu->dbu_evict_func_sync != NULL)
575 dbu->dbu_evict_func_sync(dbu);
576
577 if (has_async) {
578 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
579 dbu, 0, &dbu->dbu_tqent);
580 }
581 }
582
583 boolean_t
584 dbuf_is_metadata(dmu_buf_impl_t *db)
585 {
586 /*
587 * Consider indirect blocks and spill blocks to be meta data.
588 */
589 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
590 return (B_TRUE);
591 } else {
592 boolean_t is_metadata;
593
594 DB_DNODE_ENTER(db);
595 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
596 DB_DNODE_EXIT(db);
597
598 return (is_metadata);
599 }
600 }
601
602
603 /*
604 * This function *must* return indices evenly distributed between all
605 * sublists of the multilist. This is needed due to how the dbuf eviction
606 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
607 * distributed between all sublists and uses this assumption when
608 * deciding which sublist to evict from and how much to evict from it.
609 */
610 static unsigned int
611 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
612 {
613 dmu_buf_impl_t *db = obj;
614
615 /*
616 * The assumption here, is the hash value for a given
617 * dmu_buf_impl_t will remain constant throughout it's lifetime
618 * (i.e. it's objset, object, level and blkid fields don't change).
619 * Thus, we don't need to store the dbuf's sublist index
620 * on insertion, as this index can be recalculated on removal.
621 *
622 * Also, the low order bits of the hash value are thought to be
623 * distributed evenly. Otherwise, in the case that the multilist
624 * has a power of two number of sublists, each sublists' usage
625 * would not be evenly distributed.
626 */
627 return (dbuf_hash(db->db_objset, db->db.db_object,
628 db->db_level, db->db_blkid) %
629 multilist_get_num_sublists(ml));
630 }
631
632 /*
633 * The target size of the dbuf cache can grow with the ARC target,
634 * unless limited by the tunable dbuf_cache_max_bytes.
635 */
636 static inline unsigned long
637 dbuf_cache_target_bytes(void)
638 {
639 return (MIN(dbuf_cache_max_bytes,
640 arc_target_bytes() >> dbuf_cache_shift));
641 }
642
643 /*
644 * The target size of the dbuf metadata cache can grow with the ARC target,
645 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
646 */
647 static inline unsigned long
648 dbuf_metadata_cache_target_bytes(void)
649 {
650 return (MIN(dbuf_metadata_cache_max_bytes,
651 arc_target_bytes() >> dbuf_metadata_cache_shift));
652 }
653
654 static inline uint64_t
655 dbuf_cache_hiwater_bytes(void)
656 {
657 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
658 return (dbuf_cache_target +
659 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
660 }
661
662 static inline uint64_t
663 dbuf_cache_lowater_bytes(void)
664 {
665 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
666 return (dbuf_cache_target -
667 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
668 }
669
670 static inline boolean_t
671 dbuf_cache_above_lowater(void)
672 {
673 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
674 dbuf_cache_lowater_bytes());
675 }
676
677 /*
678 * Evict the oldest eligible dbuf from the dbuf cache.
679 */
680 static void
681 dbuf_evict_one(void)
682 {
683 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
684 multilist_sublist_t *mls = multilist_sublist_lock(
685 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
686
687 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
688
689 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
690 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
691 db = multilist_sublist_prev(mls, db);
692 }
693
694 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
695 multilist_sublist_t *, mls);
696
697 if (db != NULL) {
698 multilist_sublist_remove(mls, db);
699 multilist_sublist_unlock(mls);
700 (void) zfs_refcount_remove_many(
701 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
702 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
703 DBUF_STAT_BUMPDOWN(cache_count);
704 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
705 db->db.db_size);
706 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
707 db->db_caching_status = DB_NO_CACHE;
708 dbuf_destroy(db);
709 DBUF_STAT_BUMP(cache_total_evicts);
710 } else {
711 multilist_sublist_unlock(mls);
712 }
713 }
714
715 /*
716 * The dbuf evict thread is responsible for aging out dbufs from the
717 * cache. Once the cache has reached it's maximum size, dbufs are removed
718 * and destroyed. The eviction thread will continue running until the size
719 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
720 * out of the cache it is destroyed and becomes eligible for arc eviction.
721 */
722 /* ARGSUSED */
723 static void
724 dbuf_evict_thread(void *unused)
725 {
726 callb_cpr_t cpr;
727
728 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
729
730 mutex_enter(&dbuf_evict_lock);
731 while (!dbuf_evict_thread_exit) {
732 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
733 CALLB_CPR_SAFE_BEGIN(&cpr);
734 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
735 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
736 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
737 }
738 mutex_exit(&dbuf_evict_lock);
739
740 /*
741 * Keep evicting as long as we're above the low water mark
742 * for the cache. We do this without holding the locks to
743 * minimize lock contention.
744 */
745 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
746 dbuf_evict_one();
747 }
748
749 mutex_enter(&dbuf_evict_lock);
750 }
751
752 dbuf_evict_thread_exit = B_FALSE;
753 cv_broadcast(&dbuf_evict_cv);
754 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
755 thread_exit();
756 }
757
758 /*
759 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
760 * If the dbuf cache is at its high water mark, then evict a dbuf from the
761 * dbuf cache using the callers context.
762 */
763 static void
764 dbuf_evict_notify(uint64_t size)
765 {
766 /*
767 * We check if we should evict without holding the dbuf_evict_lock,
768 * because it's OK to occasionally make the wrong decision here,
769 * and grabbing the lock results in massive lock contention.
770 */
771 if (size > dbuf_cache_target_bytes()) {
772 if (size > dbuf_cache_hiwater_bytes())
773 dbuf_evict_one();
774 cv_signal(&dbuf_evict_cv);
775 }
776 }
777
778 static int
779 dbuf_kstat_update(kstat_t *ksp, int rw)
780 {
781 dbuf_stats_t *ds = ksp->ks_data;
782
783 if (rw == KSTAT_WRITE)
784 return (SET_ERROR(EACCES));
785
786 ds->cache_count.value.ui64 =
787 wmsum_value(&dbuf_sums.cache_count);
788 ds->cache_size_bytes.value.ui64 =
789 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
790 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
791 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
792 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
793 ds->cache_total_evicts.value.ui64 =
794 wmsum_value(&dbuf_sums.cache_total_evicts);
795 for (int i = 0; i < DN_MAX_LEVELS; i++) {
796 ds->cache_levels[i].value.ui64 =
797 wmsum_value(&dbuf_sums.cache_levels[i]);
798 ds->cache_levels_bytes[i].value.ui64 =
799 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
800 }
801 ds->hash_hits.value.ui64 =
802 wmsum_value(&dbuf_sums.hash_hits);
803 ds->hash_misses.value.ui64 =
804 wmsum_value(&dbuf_sums.hash_misses);
805 ds->hash_collisions.value.ui64 =
806 wmsum_value(&dbuf_sums.hash_collisions);
807 ds->hash_chains.value.ui64 =
808 wmsum_value(&dbuf_sums.hash_chains);
809 ds->hash_insert_race.value.ui64 =
810 wmsum_value(&dbuf_sums.hash_insert_race);
811 ds->metadata_cache_count.value.ui64 =
812 wmsum_value(&dbuf_sums.metadata_cache_count);
813 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
814 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
815 ds->metadata_cache_overflow.value.ui64 =
816 wmsum_value(&dbuf_sums.metadata_cache_overflow);
817 return (0);
818 }
819
820 void
821 dbuf_init(void)
822 {
823 uint64_t hsize = 1ULL << 16;
824 dbuf_hash_table_t *h = &dbuf_hash_table;
825 int i;
826
827 /*
828 * The hash table is big enough to fill all of physical memory
829 * with an average block size of zfs_arc_average_blocksize (default 8K).
830 * By default, the table will take up
831 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
832 */
833 while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
834 hsize <<= 1;
835
836 retry:
837 h->hash_table_mask = hsize - 1;
838 #if defined(_KERNEL)
839 /*
840 * Large allocations which do not require contiguous pages
841 * should be using vmem_alloc() in the linux kernel
842 */
843 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
844 #else
845 h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
846 #endif
847 if (h->hash_table == NULL) {
848 /* XXX - we should really return an error instead of assert */
849 ASSERT(hsize > (1ULL << 10));
850 hsize >>= 1;
851 goto retry;
852 }
853
854 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
855 sizeof (dmu_buf_impl_t),
856 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
857
858 for (i = 0; i < DBUF_MUTEXES; i++)
859 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
860
861 dbuf_stats_init(h);
862
863 /*
864 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
865 * configuration is not required.
866 */
867 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
868
869 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
870 multilist_create(&dbuf_caches[dcs].cache,
871 sizeof (dmu_buf_impl_t),
872 offsetof(dmu_buf_impl_t, db_cache_link),
873 dbuf_cache_multilist_index_func);
874 zfs_refcount_create(&dbuf_caches[dcs].size);
875 }
876
877 dbuf_evict_thread_exit = B_FALSE;
878 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
879 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
880 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
881 NULL, 0, &p0, TS_RUN, minclsyspri);
882
883 wmsum_init(&dbuf_sums.cache_count, 0);
884 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
885 for (i = 0; i < DN_MAX_LEVELS; i++) {
886 wmsum_init(&dbuf_sums.cache_levels[i], 0);
887 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
888 }
889 wmsum_init(&dbuf_sums.hash_hits, 0);
890 wmsum_init(&dbuf_sums.hash_misses, 0);
891 wmsum_init(&dbuf_sums.hash_collisions, 0);
892 wmsum_init(&dbuf_sums.hash_chains, 0);
893 wmsum_init(&dbuf_sums.hash_insert_race, 0);
894 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
895 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
896
897 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
898 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
899 KSTAT_FLAG_VIRTUAL);
900 if (dbuf_ksp != NULL) {
901 for (i = 0; i < DN_MAX_LEVELS; i++) {
902 snprintf(dbuf_stats.cache_levels[i].name,
903 KSTAT_STRLEN, "cache_level_%d", i);
904 dbuf_stats.cache_levels[i].data_type =
905 KSTAT_DATA_UINT64;
906 snprintf(dbuf_stats.cache_levels_bytes[i].name,
907 KSTAT_STRLEN, "cache_level_%d_bytes", i);
908 dbuf_stats.cache_levels_bytes[i].data_type =
909 KSTAT_DATA_UINT64;
910 }
911 dbuf_ksp->ks_data = &dbuf_stats;
912 dbuf_ksp->ks_update = dbuf_kstat_update;
913 kstat_install(dbuf_ksp);
914 }
915 }
916
917 void
918 dbuf_fini(void)
919 {
920 dbuf_hash_table_t *h = &dbuf_hash_table;
921 int i;
922
923 dbuf_stats_destroy();
924
925 for (i = 0; i < DBUF_MUTEXES; i++)
926 mutex_destroy(&h->hash_mutexes[i]);
927 #if defined(_KERNEL)
928 /*
929 * Large allocations which do not require contiguous pages
930 * should be using vmem_free() in the linux kernel
931 */
932 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
933 #else
934 kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
935 #endif
936 kmem_cache_destroy(dbuf_kmem_cache);
937 taskq_destroy(dbu_evict_taskq);
938
939 mutex_enter(&dbuf_evict_lock);
940 dbuf_evict_thread_exit = B_TRUE;
941 while (dbuf_evict_thread_exit) {
942 cv_signal(&dbuf_evict_cv);
943 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
944 }
945 mutex_exit(&dbuf_evict_lock);
946
947 mutex_destroy(&dbuf_evict_lock);
948 cv_destroy(&dbuf_evict_cv);
949
950 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
951 zfs_refcount_destroy(&dbuf_caches[dcs].size);
952 multilist_destroy(&dbuf_caches[dcs].cache);
953 }
954
955 if (dbuf_ksp != NULL) {
956 kstat_delete(dbuf_ksp);
957 dbuf_ksp = NULL;
958 }
959
960 wmsum_fini(&dbuf_sums.cache_count);
961 wmsum_fini(&dbuf_sums.cache_total_evicts);
962 for (i = 0; i < DN_MAX_LEVELS; i++) {
963 wmsum_fini(&dbuf_sums.cache_levels[i]);
964 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
965 }
966 wmsum_fini(&dbuf_sums.hash_hits);
967 wmsum_fini(&dbuf_sums.hash_misses);
968 wmsum_fini(&dbuf_sums.hash_collisions);
969 wmsum_fini(&dbuf_sums.hash_chains);
970 wmsum_fini(&dbuf_sums.hash_insert_race);
971 wmsum_fini(&dbuf_sums.metadata_cache_count);
972 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
973 }
974
975 /*
976 * Other stuff.
977 */
978
979 #ifdef ZFS_DEBUG
980 static void
981 dbuf_verify(dmu_buf_impl_t *db)
982 {
983 dnode_t *dn;
984 dbuf_dirty_record_t *dr;
985 uint32_t txg_prev;
986
987 ASSERT(MUTEX_HELD(&db->db_mtx));
988
989 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
990 return;
991
992 ASSERT(db->db_objset != NULL);
993 DB_DNODE_ENTER(db);
994 dn = DB_DNODE(db);
995 if (dn == NULL) {
996 ASSERT(db->db_parent == NULL);
997 ASSERT(db->db_blkptr == NULL);
998 } else {
999 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1000 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1001 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1002 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1003 db->db_blkid == DMU_SPILL_BLKID ||
1004 !avl_is_empty(&dn->dn_dbufs));
1005 }
1006 if (db->db_blkid == DMU_BONUS_BLKID) {
1007 ASSERT(dn != NULL);
1008 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1009 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1010 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1011 ASSERT(dn != NULL);
1012 ASSERT0(db->db.db_offset);
1013 } else {
1014 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1015 }
1016
1017 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1018 ASSERT(dr->dr_dbuf == db);
1019 txg_prev = dr->dr_txg;
1020 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1021 dr = list_next(&db->db_dirty_records, dr)) {
1022 ASSERT(dr->dr_dbuf == db);
1023 ASSERT(txg_prev > dr->dr_txg);
1024 txg_prev = dr->dr_txg;
1025 }
1026 }
1027
1028 /*
1029 * We can't assert that db_size matches dn_datablksz because it
1030 * can be momentarily different when another thread is doing
1031 * dnode_set_blksz().
1032 */
1033 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1034 dr = db->db_data_pending;
1035 /*
1036 * It should only be modified in syncing context, so
1037 * make sure we only have one copy of the data.
1038 */
1039 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1040 }
1041
1042 /* verify db->db_blkptr */
1043 if (db->db_blkptr) {
1044 if (db->db_parent == dn->dn_dbuf) {
1045 /* db is pointed to by the dnode */
1046 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1047 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1048 ASSERT(db->db_parent == NULL);
1049 else
1050 ASSERT(db->db_parent != NULL);
1051 if (db->db_blkid != DMU_SPILL_BLKID)
1052 ASSERT3P(db->db_blkptr, ==,
1053 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1054 } else {
1055 /* db is pointed to by an indirect block */
1056 int epb __maybe_unused = db->db_parent->db.db_size >>
1057 SPA_BLKPTRSHIFT;
1058 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1059 ASSERT3U(db->db_parent->db.db_object, ==,
1060 db->db.db_object);
1061 /*
1062 * dnode_grow_indblksz() can make this fail if we don't
1063 * have the parent's rwlock. XXX indblksz no longer
1064 * grows. safe to do this now?
1065 */
1066 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1067 ASSERT3P(db->db_blkptr, ==,
1068 ((blkptr_t *)db->db_parent->db.db_data +
1069 db->db_blkid % epb));
1070 }
1071 }
1072 }
1073 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1074 (db->db_buf == NULL || db->db_buf->b_data) &&
1075 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1076 db->db_state != DB_FILL && !dn->dn_free_txg) {
1077 /*
1078 * If the blkptr isn't set but they have nonzero data,
1079 * it had better be dirty, otherwise we'll lose that
1080 * data when we evict this buffer.
1081 *
1082 * There is an exception to this rule for indirect blocks; in
1083 * this case, if the indirect block is a hole, we fill in a few
1084 * fields on each of the child blocks (importantly, birth time)
1085 * to prevent hole birth times from being lost when you
1086 * partially fill in a hole.
1087 */
1088 if (db->db_dirtycnt == 0) {
1089 if (db->db_level == 0) {
1090 uint64_t *buf = db->db.db_data;
1091 int i;
1092
1093 for (i = 0; i < db->db.db_size >> 3; i++) {
1094 ASSERT(buf[i] == 0);
1095 }
1096 } else {
1097 blkptr_t *bps = db->db.db_data;
1098 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1099 db->db.db_size);
1100 /*
1101 * We want to verify that all the blkptrs in the
1102 * indirect block are holes, but we may have
1103 * automatically set up a few fields for them.
1104 * We iterate through each blkptr and verify
1105 * they only have those fields set.
1106 */
1107 for (int i = 0;
1108 i < db->db.db_size / sizeof (blkptr_t);
1109 i++) {
1110 blkptr_t *bp = &bps[i];
1111 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1112 &bp->blk_cksum));
1113 ASSERT(
1114 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1115 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1116 DVA_IS_EMPTY(&bp->blk_dva[2]));
1117 ASSERT0(bp->blk_fill);
1118 ASSERT0(bp->blk_pad[0]);
1119 ASSERT0(bp->blk_pad[1]);
1120 ASSERT(!BP_IS_EMBEDDED(bp));
1121 ASSERT(BP_IS_HOLE(bp));
1122 ASSERT0(bp->blk_phys_birth);
1123 }
1124 }
1125 }
1126 }
1127 DB_DNODE_EXIT(db);
1128 }
1129 #endif
1130
1131 static void
1132 dbuf_clear_data(dmu_buf_impl_t *db)
1133 {
1134 ASSERT(MUTEX_HELD(&db->db_mtx));
1135 dbuf_evict_user(db);
1136 ASSERT3P(db->db_buf, ==, NULL);
1137 db->db.db_data = NULL;
1138 if (db->db_state != DB_NOFILL) {
1139 db->db_state = DB_UNCACHED;
1140 DTRACE_SET_STATE(db, "clear data");
1141 }
1142 }
1143
1144 static void
1145 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1146 {
1147 ASSERT(MUTEX_HELD(&db->db_mtx));
1148 ASSERT(buf != NULL);
1149
1150 db->db_buf = buf;
1151 ASSERT(buf->b_data != NULL);
1152 db->db.db_data = buf->b_data;
1153 }
1154
1155 static arc_buf_t *
1156 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1157 {
1158 spa_t *spa = db->db_objset->os_spa;
1159
1160 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1161 }
1162
1163 /*
1164 * Loan out an arc_buf for read. Return the loaned arc_buf.
1165 */
1166 arc_buf_t *
1167 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1168 {
1169 arc_buf_t *abuf;
1170
1171 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1172 mutex_enter(&db->db_mtx);
1173 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1174 int blksz = db->db.db_size;
1175 spa_t *spa = db->db_objset->os_spa;
1176
1177 mutex_exit(&db->db_mtx);
1178 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1179 bcopy(db->db.db_data, abuf->b_data, blksz);
1180 } else {
1181 abuf = db->db_buf;
1182 arc_loan_inuse_buf(abuf, db);
1183 db->db_buf = NULL;
1184 dbuf_clear_data(db);
1185 mutex_exit(&db->db_mtx);
1186 }
1187 return (abuf);
1188 }
1189
1190 /*
1191 * Calculate which level n block references the data at the level 0 offset
1192 * provided.
1193 */
1194 uint64_t
1195 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1196 {
1197 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1198 /*
1199 * The level n blkid is equal to the level 0 blkid divided by
1200 * the number of level 0s in a level n block.
1201 *
1202 * The level 0 blkid is offset >> datablkshift =
1203 * offset / 2^datablkshift.
1204 *
1205 * The number of level 0s in a level n is the number of block
1206 * pointers in an indirect block, raised to the power of level.
1207 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1208 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1209 *
1210 * Thus, the level n blkid is: offset /
1211 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1212 * = offset / 2^(datablkshift + level *
1213 * (indblkshift - SPA_BLKPTRSHIFT))
1214 * = offset >> (datablkshift + level *
1215 * (indblkshift - SPA_BLKPTRSHIFT))
1216 */
1217
1218 const unsigned exp = dn->dn_datablkshift +
1219 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1220
1221 if (exp >= 8 * sizeof (offset)) {
1222 /* This only happens on the highest indirection level */
1223 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1224 return (0);
1225 }
1226
1227 ASSERT3U(exp, <, 8 * sizeof (offset));
1228
1229 return (offset >> exp);
1230 } else {
1231 ASSERT3U(offset, <, dn->dn_datablksz);
1232 return (0);
1233 }
1234 }
1235
1236 /*
1237 * This function is used to lock the parent of the provided dbuf. This should be
1238 * used when modifying or reading db_blkptr.
1239 */
1240 db_lock_type_t
1241 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, void *tag)
1242 {
1243 enum db_lock_type ret = DLT_NONE;
1244 if (db->db_parent != NULL) {
1245 rw_enter(&db->db_parent->db_rwlock, rw);
1246 ret = DLT_PARENT;
1247 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1248 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1249 tag);
1250 ret = DLT_OBJSET;
1251 }
1252 /*
1253 * We only return a DLT_NONE lock when it's the top-most indirect block
1254 * of the meta-dnode of the MOS.
1255 */
1256 return (ret);
1257 }
1258
1259 /*
1260 * We need to pass the lock type in because it's possible that the block will
1261 * move from being the topmost indirect block in a dnode (and thus, have no
1262 * parent) to not the top-most via an indirection increase. This would cause a
1263 * panic if we didn't pass the lock type in.
1264 */
1265 void
1266 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, void *tag)
1267 {
1268 if (type == DLT_PARENT)
1269 rw_exit(&db->db_parent->db_rwlock);
1270 else if (type == DLT_OBJSET)
1271 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1272 }
1273
1274 static void
1275 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1276 arc_buf_t *buf, void *vdb)
1277 {
1278 dmu_buf_impl_t *db = vdb;
1279
1280 mutex_enter(&db->db_mtx);
1281 ASSERT3U(db->db_state, ==, DB_READ);
1282 /*
1283 * All reads are synchronous, so we must have a hold on the dbuf
1284 */
1285 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1286 ASSERT(db->db_buf == NULL);
1287 ASSERT(db->db.db_data == NULL);
1288 if (buf == NULL) {
1289 /* i/o error */
1290 ASSERT(zio == NULL || zio->io_error != 0);
1291 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1292 ASSERT3P(db->db_buf, ==, NULL);
1293 db->db_state = DB_UNCACHED;
1294 DTRACE_SET_STATE(db, "i/o error");
1295 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1296 /* freed in flight */
1297 ASSERT(zio == NULL || zio->io_error == 0);
1298 arc_release(buf, db);
1299 bzero(buf->b_data, db->db.db_size);
1300 arc_buf_freeze(buf);
1301 db->db_freed_in_flight = FALSE;
1302 dbuf_set_data(db, buf);
1303 db->db_state = DB_CACHED;
1304 DTRACE_SET_STATE(db, "freed in flight");
1305 } else {
1306 /* success */
1307 ASSERT(zio == NULL || zio->io_error == 0);
1308 dbuf_set_data(db, buf);
1309 db->db_state = DB_CACHED;
1310 DTRACE_SET_STATE(db, "successful read");
1311 }
1312 cv_broadcast(&db->db_changed);
1313 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1314 }
1315
1316 /*
1317 * Shortcut for performing reads on bonus dbufs. Returns
1318 * an error if we fail to verify the dnode associated with
1319 * a decrypted block. Otherwise success.
1320 */
1321 static int
1322 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1323 {
1324 int bonuslen, max_bonuslen, err;
1325
1326 err = dbuf_read_verify_dnode_crypt(db, flags);
1327 if (err)
1328 return (err);
1329
1330 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1331 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1332 ASSERT(MUTEX_HELD(&db->db_mtx));
1333 ASSERT(DB_DNODE_HELD(db));
1334 ASSERT3U(bonuslen, <=, db->db.db_size);
1335 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1336 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1337 if (bonuslen < max_bonuslen)
1338 bzero(db->db.db_data, max_bonuslen);
1339 if (bonuslen)
1340 bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
1341 db->db_state = DB_CACHED;
1342 DTRACE_SET_STATE(db, "bonus buffer filled");
1343 return (0);
1344 }
1345
1346 static void
1347 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn)
1348 {
1349 blkptr_t *bps = db->db.db_data;
1350 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1351 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1352
1353 for (int i = 0; i < n_bps; i++) {
1354 blkptr_t *bp = &bps[i];
1355
1356 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==, indbs);
1357 BP_SET_LSIZE(bp, BP_GET_LEVEL(db->db_blkptr) == 1 ?
1358 dn->dn_datablksz : BP_GET_LSIZE(db->db_blkptr));
1359 BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
1360 BP_SET_LEVEL(bp, BP_GET_LEVEL(db->db_blkptr) - 1);
1361 BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
1362 }
1363 }
1364
1365 /*
1366 * Handle reads on dbufs that are holes, if necessary. This function
1367 * requires that the dbuf's mutex is held. Returns success (0) if action
1368 * was taken, ENOENT if no action was taken.
1369 */
1370 static int
1371 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1372 {
1373 ASSERT(MUTEX_HELD(&db->db_mtx));
1374
1375 int is_hole = db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr);
1376 /*
1377 * For level 0 blocks only, if the above check fails:
1378 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1379 * processes the delete record and clears the bp while we are waiting
1380 * for the dn_mtx (resulting in a "no" from block_freed).
1381 */
1382 if (!is_hole && db->db_level == 0) {
1383 is_hole = dnode_block_freed(dn, db->db_blkid) ||
1384 BP_IS_HOLE(db->db_blkptr);
1385 }
1386
1387 if (is_hole) {
1388 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1389 bzero(db->db.db_data, db->db.db_size);
1390
1391 if (db->db_blkptr != NULL && db->db_level > 0 &&
1392 BP_IS_HOLE(db->db_blkptr) &&
1393 db->db_blkptr->blk_birth != 0) {
1394 dbuf_handle_indirect_hole(db, dn);
1395 }
1396 db->db_state = DB_CACHED;
1397 DTRACE_SET_STATE(db, "hole read satisfied");
1398 return (0);
1399 }
1400 return (ENOENT);
1401 }
1402
1403 /*
1404 * This function ensures that, when doing a decrypting read of a block,
1405 * we make sure we have decrypted the dnode associated with it. We must do
1406 * this so that we ensure we are fully authenticating the checksum-of-MACs
1407 * tree from the root of the objset down to this block. Indirect blocks are
1408 * always verified against their secure checksum-of-MACs assuming that the
1409 * dnode containing them is correct. Now that we are doing a decrypting read,
1410 * we can be sure that the key is loaded and verify that assumption. This is
1411 * especially important considering that we always read encrypted dnode
1412 * blocks as raw data (without verifying their MACs) to start, and
1413 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1414 */
1415 static int
1416 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1417 {
1418 int err = 0;
1419 objset_t *os = db->db_objset;
1420 arc_buf_t *dnode_abuf;
1421 dnode_t *dn;
1422 zbookmark_phys_t zb;
1423
1424 ASSERT(MUTEX_HELD(&db->db_mtx));
1425
1426 if (!os->os_encrypted || os->os_raw_receive ||
1427 (flags & DB_RF_NO_DECRYPT) != 0)
1428 return (0);
1429
1430 DB_DNODE_ENTER(db);
1431 dn = DB_DNODE(db);
1432 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1433
1434 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1435 DB_DNODE_EXIT(db);
1436 return (0);
1437 }
1438
1439 SET_BOOKMARK(&zb, dmu_objset_id(os),
1440 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1441 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1442
1443 /*
1444 * An error code of EACCES tells us that the key is still not
1445 * available. This is ok if we are only reading authenticated
1446 * (and therefore non-encrypted) blocks.
1447 */
1448 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1449 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1450 (db->db_blkid == DMU_BONUS_BLKID &&
1451 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1452 err = 0;
1453
1454 DB_DNODE_EXIT(db);
1455
1456 return (err);
1457 }
1458
1459 /*
1460 * Drops db_mtx and the parent lock specified by dblt and tag before
1461 * returning.
1462 */
1463 static int
1464 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1465 db_lock_type_t dblt, void *tag)
1466 {
1467 dnode_t *dn;
1468 zbookmark_phys_t zb;
1469 uint32_t aflags = ARC_FLAG_NOWAIT;
1470 int err, zio_flags;
1471
1472 err = zio_flags = 0;
1473 DB_DNODE_ENTER(db);
1474 dn = DB_DNODE(db);
1475 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1476 ASSERT(MUTEX_HELD(&db->db_mtx));
1477 ASSERT(db->db_state == DB_UNCACHED);
1478 ASSERT(db->db_buf == NULL);
1479 ASSERT(db->db_parent == NULL ||
1480 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1481
1482 if (db->db_blkid == DMU_BONUS_BLKID) {
1483 err = dbuf_read_bonus(db, dn, flags);
1484 goto early_unlock;
1485 }
1486
1487 err = dbuf_read_hole(db, dn, flags);
1488 if (err == 0)
1489 goto early_unlock;
1490
1491 /*
1492 * Any attempt to read a redacted block should result in an error. This
1493 * will never happen under normal conditions, but can be useful for
1494 * debugging purposes.
1495 */
1496 if (BP_IS_REDACTED(db->db_blkptr)) {
1497 ASSERT(dsl_dataset_feature_is_active(
1498 db->db_objset->os_dsl_dataset,
1499 SPA_FEATURE_REDACTED_DATASETS));
1500 err = SET_ERROR(EIO);
1501 goto early_unlock;
1502 }
1503
1504 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1505 db->db.db_object, db->db_level, db->db_blkid);
1506
1507 /*
1508 * All bps of an encrypted os should have the encryption bit set.
1509 * If this is not true it indicates tampering and we report an error.
1510 */
1511 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
1512 spa_log_error(db->db_objset->os_spa, &zb);
1513 zfs_panic_recover("unencrypted block in encrypted "
1514 "object set %llu", dmu_objset_id(db->db_objset));
1515 err = SET_ERROR(EIO);
1516 goto early_unlock;
1517 }
1518
1519 err = dbuf_read_verify_dnode_crypt(db, flags);
1520 if (err != 0)
1521 goto early_unlock;
1522
1523 DB_DNODE_EXIT(db);
1524
1525 db->db_state = DB_READ;
1526 DTRACE_SET_STATE(db, "read issued");
1527 mutex_exit(&db->db_mtx);
1528
1529 if (DBUF_IS_L2CACHEABLE(db))
1530 aflags |= ARC_FLAG_L2CACHE;
1531
1532 dbuf_add_ref(db, NULL);
1533
1534 zio_flags = (flags & DB_RF_CANFAIL) ?
1535 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1536
1537 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1538 zio_flags |= ZIO_FLAG_RAW;
1539 /*
1540 * The zio layer will copy the provided blkptr later, but we need to
1541 * do this now so that we can release the parent's rwlock. We have to
1542 * do that now so that if dbuf_read_done is called synchronously (on
1543 * an l1 cache hit) we don't acquire the db_mtx while holding the
1544 * parent's rwlock, which would be a lock ordering violation.
1545 */
1546 blkptr_t bp = *db->db_blkptr;
1547 dmu_buf_unlock_parent(db, dblt, tag);
1548 (void) arc_read(zio, db->db_objset->os_spa, &bp,
1549 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1550 &aflags, &zb);
1551 return (err);
1552 early_unlock:
1553 DB_DNODE_EXIT(db);
1554 mutex_exit(&db->db_mtx);
1555 dmu_buf_unlock_parent(db, dblt, tag);
1556 return (err);
1557 }
1558
1559 /*
1560 * This is our just-in-time copy function. It makes a copy of buffers that
1561 * have been modified in a previous transaction group before we access them in
1562 * the current active group.
1563 *
1564 * This function is used in three places: when we are dirtying a buffer for the
1565 * first time in a txg, when we are freeing a range in a dnode that includes
1566 * this buffer, and when we are accessing a buffer which was received compressed
1567 * and later referenced in a WRITE_BYREF record.
1568 *
1569 * Note that when we are called from dbuf_free_range() we do not put a hold on
1570 * the buffer, we just traverse the active dbuf list for the dnode.
1571 */
1572 static void
1573 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1574 {
1575 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1576
1577 ASSERT(MUTEX_HELD(&db->db_mtx));
1578 ASSERT(db->db.db_data != NULL);
1579 ASSERT(db->db_level == 0);
1580 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1581
1582 if (dr == NULL ||
1583 (dr->dt.dl.dr_data !=
1584 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1585 return;
1586
1587 /*
1588 * If the last dirty record for this dbuf has not yet synced
1589 * and its referencing the dbuf data, either:
1590 * reset the reference to point to a new copy,
1591 * or (if there a no active holders)
1592 * just null out the current db_data pointer.
1593 */
1594 ASSERT3U(dr->dr_txg, >=, txg - 2);
1595 if (db->db_blkid == DMU_BONUS_BLKID) {
1596 dnode_t *dn = DB_DNODE(db);
1597 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1598 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1599 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1600 bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
1601 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1602 dnode_t *dn = DB_DNODE(db);
1603 int size = arc_buf_size(db->db_buf);
1604 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1605 spa_t *spa = db->db_objset->os_spa;
1606 enum zio_compress compress_type =
1607 arc_get_compression(db->db_buf);
1608 uint8_t complevel = arc_get_complevel(db->db_buf);
1609
1610 if (arc_is_encrypted(db->db_buf)) {
1611 boolean_t byteorder;
1612 uint8_t salt[ZIO_DATA_SALT_LEN];
1613 uint8_t iv[ZIO_DATA_IV_LEN];
1614 uint8_t mac[ZIO_DATA_MAC_LEN];
1615
1616 arc_get_raw_params(db->db_buf, &byteorder, salt,
1617 iv, mac);
1618 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1619 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1620 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1621 compress_type, complevel);
1622 } else if (compress_type != ZIO_COMPRESS_OFF) {
1623 ASSERT3U(type, ==, ARC_BUFC_DATA);
1624 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1625 size, arc_buf_lsize(db->db_buf), compress_type,
1626 complevel);
1627 } else {
1628 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1629 }
1630 bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
1631 } else {
1632 db->db_buf = NULL;
1633 dbuf_clear_data(db);
1634 }
1635 }
1636
1637 int
1638 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1639 {
1640 int err = 0;
1641 boolean_t prefetch;
1642 dnode_t *dn;
1643
1644 /*
1645 * We don't have to hold the mutex to check db_state because it
1646 * can't be freed while we have a hold on the buffer.
1647 */
1648 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1649
1650 if (db->db_state == DB_NOFILL)
1651 return (SET_ERROR(EIO));
1652
1653 DB_DNODE_ENTER(db);
1654 dn = DB_DNODE(db);
1655
1656 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1657 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
1658 DBUF_IS_CACHEABLE(db);
1659
1660 mutex_enter(&db->db_mtx);
1661 if (db->db_state == DB_CACHED) {
1662 spa_t *spa = dn->dn_objset->os_spa;
1663
1664 /*
1665 * Ensure that this block's dnode has been decrypted if
1666 * the caller has requested decrypted data.
1667 */
1668 err = dbuf_read_verify_dnode_crypt(db, flags);
1669
1670 /*
1671 * If the arc buf is compressed or encrypted and the caller
1672 * requested uncompressed data, we need to untransform it
1673 * before returning. We also call arc_untransform() on any
1674 * unauthenticated blocks, which will verify their MAC if
1675 * the key is now available.
1676 */
1677 if (err == 0 && db->db_buf != NULL &&
1678 (flags & DB_RF_NO_DECRYPT) == 0 &&
1679 (arc_is_encrypted(db->db_buf) ||
1680 arc_is_unauthenticated(db->db_buf) ||
1681 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1682 zbookmark_phys_t zb;
1683
1684 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1685 db->db.db_object, db->db_level, db->db_blkid);
1686 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1687 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1688 dbuf_set_data(db, db->db_buf);
1689 }
1690 mutex_exit(&db->db_mtx);
1691 if (err == 0 && prefetch) {
1692 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1693 B_FALSE, flags & DB_RF_HAVESTRUCT);
1694 }
1695 DB_DNODE_EXIT(db);
1696 DBUF_STAT_BUMP(hash_hits);
1697 } else if (db->db_state == DB_UNCACHED) {
1698 spa_t *spa = dn->dn_objset->os_spa;
1699 boolean_t need_wait = B_FALSE;
1700
1701 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1702
1703 if (zio == NULL &&
1704 db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
1705 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1706 need_wait = B_TRUE;
1707 }
1708 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1709 /*
1710 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1711 * for us
1712 */
1713 if (!err && prefetch) {
1714 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1715 db->db_state != DB_CACHED,
1716 flags & DB_RF_HAVESTRUCT);
1717 }
1718
1719 DB_DNODE_EXIT(db);
1720 DBUF_STAT_BUMP(hash_misses);
1721
1722 /*
1723 * If we created a zio_root we must execute it to avoid
1724 * leaking it, even if it isn't attached to any work due
1725 * to an error in dbuf_read_impl().
1726 */
1727 if (need_wait) {
1728 if (err == 0)
1729 err = zio_wait(zio);
1730 else
1731 VERIFY0(zio_wait(zio));
1732 }
1733 } else {
1734 /*
1735 * Another reader came in while the dbuf was in flight
1736 * between UNCACHED and CACHED. Either a writer will finish
1737 * writing the buffer (sending the dbuf to CACHED) or the
1738 * first reader's request will reach the read_done callback
1739 * and send the dbuf to CACHED. Otherwise, a failure
1740 * occurred and the dbuf went to UNCACHED.
1741 */
1742 mutex_exit(&db->db_mtx);
1743 if (prefetch) {
1744 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1745 B_TRUE, flags & DB_RF_HAVESTRUCT);
1746 }
1747 DB_DNODE_EXIT(db);
1748 DBUF_STAT_BUMP(hash_misses);
1749
1750 /* Skip the wait per the caller's request. */
1751 if ((flags & DB_RF_NEVERWAIT) == 0) {
1752 mutex_enter(&db->db_mtx);
1753 while (db->db_state == DB_READ ||
1754 db->db_state == DB_FILL) {
1755 ASSERT(db->db_state == DB_READ ||
1756 (flags & DB_RF_HAVESTRUCT) == 0);
1757 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1758 db, zio_t *, zio);
1759 cv_wait(&db->db_changed, &db->db_mtx);
1760 }
1761 if (db->db_state == DB_UNCACHED)
1762 err = SET_ERROR(EIO);
1763 mutex_exit(&db->db_mtx);
1764 }
1765 }
1766
1767 return (err);
1768 }
1769
1770 static void
1771 dbuf_noread(dmu_buf_impl_t *db)
1772 {
1773 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1774 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1775 mutex_enter(&db->db_mtx);
1776 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1777 cv_wait(&db->db_changed, &db->db_mtx);
1778 if (db->db_state == DB_UNCACHED) {
1779 ASSERT(db->db_buf == NULL);
1780 ASSERT(db->db.db_data == NULL);
1781 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1782 db->db_state = DB_FILL;
1783 DTRACE_SET_STATE(db, "assigning filled buffer");
1784 } else if (db->db_state == DB_NOFILL) {
1785 dbuf_clear_data(db);
1786 } else {
1787 ASSERT3U(db->db_state, ==, DB_CACHED);
1788 }
1789 mutex_exit(&db->db_mtx);
1790 }
1791
1792 void
1793 dbuf_unoverride(dbuf_dirty_record_t *dr)
1794 {
1795 dmu_buf_impl_t *db = dr->dr_dbuf;
1796 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1797 uint64_t txg = dr->dr_txg;
1798
1799 ASSERT(MUTEX_HELD(&db->db_mtx));
1800 /*
1801 * This assert is valid because dmu_sync() expects to be called by
1802 * a zilog's get_data while holding a range lock. This call only
1803 * comes from dbuf_dirty() callers who must also hold a range lock.
1804 */
1805 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1806 ASSERT(db->db_level == 0);
1807
1808 if (db->db_blkid == DMU_BONUS_BLKID ||
1809 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1810 return;
1811
1812 ASSERT(db->db_data_pending != dr);
1813
1814 /* free this block */
1815 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1816 zio_free(db->db_objset->os_spa, txg, bp);
1817
1818 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1819 dr->dt.dl.dr_nopwrite = B_FALSE;
1820 dr->dt.dl.dr_has_raw_params = B_FALSE;
1821
1822 /*
1823 * Release the already-written buffer, so we leave it in
1824 * a consistent dirty state. Note that all callers are
1825 * modifying the buffer, so they will immediately do
1826 * another (redundant) arc_release(). Therefore, leave
1827 * the buf thawed to save the effort of freezing &
1828 * immediately re-thawing it.
1829 */
1830 arc_release(dr->dt.dl.dr_data, db);
1831 }
1832
1833 /*
1834 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1835 * data blocks in the free range, so that any future readers will find
1836 * empty blocks.
1837 */
1838 void
1839 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1840 dmu_tx_t *tx)
1841 {
1842 dmu_buf_impl_t *db_search;
1843 dmu_buf_impl_t *db, *db_next;
1844 uint64_t txg = tx->tx_txg;
1845 avl_index_t where;
1846 dbuf_dirty_record_t *dr;
1847
1848 if (end_blkid > dn->dn_maxblkid &&
1849 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1850 end_blkid = dn->dn_maxblkid;
1851 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1852 (u_longlong_t)end_blkid);
1853
1854 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1855 db_search->db_level = 0;
1856 db_search->db_blkid = start_blkid;
1857 db_search->db_state = DB_SEARCH;
1858
1859 mutex_enter(&dn->dn_dbufs_mtx);
1860 db = avl_find(&dn->dn_dbufs, db_search, &where);
1861 ASSERT3P(db, ==, NULL);
1862
1863 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1864
1865 for (; db != NULL; db = db_next) {
1866 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1867 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1868
1869 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1870 break;
1871 }
1872 ASSERT3U(db->db_blkid, >=, start_blkid);
1873
1874 /* found a level 0 buffer in the range */
1875 mutex_enter(&db->db_mtx);
1876 if (dbuf_undirty(db, tx)) {
1877 /* mutex has been dropped and dbuf destroyed */
1878 continue;
1879 }
1880
1881 if (db->db_state == DB_UNCACHED ||
1882 db->db_state == DB_NOFILL ||
1883 db->db_state == DB_EVICTING) {
1884 ASSERT(db->db.db_data == NULL);
1885 mutex_exit(&db->db_mtx);
1886 continue;
1887 }
1888 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
1889 /* will be handled in dbuf_read_done or dbuf_rele */
1890 db->db_freed_in_flight = TRUE;
1891 mutex_exit(&db->db_mtx);
1892 continue;
1893 }
1894 if (zfs_refcount_count(&db->db_holds) == 0) {
1895 ASSERT(db->db_buf);
1896 dbuf_destroy(db);
1897 continue;
1898 }
1899 /* The dbuf is referenced */
1900
1901 dr = list_head(&db->db_dirty_records);
1902 if (dr != NULL) {
1903 if (dr->dr_txg == txg) {
1904 /*
1905 * This buffer is "in-use", re-adjust the file
1906 * size to reflect that this buffer may
1907 * contain new data when we sync.
1908 */
1909 if (db->db_blkid != DMU_SPILL_BLKID &&
1910 db->db_blkid > dn->dn_maxblkid)
1911 dn->dn_maxblkid = db->db_blkid;
1912 dbuf_unoverride(dr);
1913 } else {
1914 /*
1915 * This dbuf is not dirty in the open context.
1916 * Either uncache it (if its not referenced in
1917 * the open context) or reset its contents to
1918 * empty.
1919 */
1920 dbuf_fix_old_data(db, txg);
1921 }
1922 }
1923 /* clear the contents if its cached */
1924 if (db->db_state == DB_CACHED) {
1925 ASSERT(db->db.db_data != NULL);
1926 arc_release(db->db_buf, db);
1927 rw_enter(&db->db_rwlock, RW_WRITER);
1928 bzero(db->db.db_data, db->db.db_size);
1929 rw_exit(&db->db_rwlock);
1930 arc_buf_freeze(db->db_buf);
1931 }
1932
1933 mutex_exit(&db->db_mtx);
1934 }
1935
1936 kmem_free(db_search, sizeof (dmu_buf_impl_t));
1937 mutex_exit(&dn->dn_dbufs_mtx);
1938 }
1939
1940 void
1941 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
1942 {
1943 arc_buf_t *buf, *old_buf;
1944 dbuf_dirty_record_t *dr;
1945 int osize = db->db.db_size;
1946 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1947 dnode_t *dn;
1948
1949 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1950
1951 DB_DNODE_ENTER(db);
1952 dn = DB_DNODE(db);
1953
1954 /*
1955 * XXX we should be doing a dbuf_read, checking the return
1956 * value and returning that up to our callers
1957 */
1958 dmu_buf_will_dirty(&db->db, tx);
1959
1960 /* create the data buffer for the new block */
1961 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
1962
1963 /* copy old block data to the new block */
1964 old_buf = db->db_buf;
1965 bcopy(old_buf->b_data, buf->b_data, MIN(osize, size));
1966 /* zero the remainder */
1967 if (size > osize)
1968 bzero((uint8_t *)buf->b_data + osize, size - osize);
1969
1970 mutex_enter(&db->db_mtx);
1971 dbuf_set_data(db, buf);
1972 arc_buf_destroy(old_buf, db);
1973 db->db.db_size = size;
1974
1975 dr = list_head(&db->db_dirty_records);
1976 /* dirty record added by dmu_buf_will_dirty() */
1977 VERIFY(dr != NULL);
1978 if (db->db_level == 0)
1979 dr->dt.dl.dr_data = buf;
1980 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
1981 ASSERT3U(dr->dr_accounted, ==, osize);
1982 dr->dr_accounted = size;
1983 mutex_exit(&db->db_mtx);
1984
1985 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
1986 DB_DNODE_EXIT(db);
1987 }
1988
1989 void
1990 dbuf_release_bp(dmu_buf_impl_t *db)
1991 {
1992 objset_t *os __maybe_unused = db->db_objset;
1993
1994 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
1995 ASSERT(arc_released(os->os_phys_buf) ||
1996 list_link_active(&os->os_dsl_dataset->ds_synced_link));
1997 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
1998
1999 (void) arc_release(db->db_buf, db);
2000 }
2001
2002 /*
2003 * We already have a dirty record for this TXG, and we are being
2004 * dirtied again.
2005 */
2006 static void
2007 dbuf_redirty(dbuf_dirty_record_t *dr)
2008 {
2009 dmu_buf_impl_t *db = dr->dr_dbuf;
2010
2011 ASSERT(MUTEX_HELD(&db->db_mtx));
2012
2013 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2014 /*
2015 * If this buffer has already been written out,
2016 * we now need to reset its state.
2017 */
2018 dbuf_unoverride(dr);
2019 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2020 db->db_state != DB_NOFILL) {
2021 /* Already released on initial dirty, so just thaw. */
2022 ASSERT(arc_released(db->db_buf));
2023 arc_buf_thaw(db->db_buf);
2024 }
2025 }
2026 }
2027
2028 dbuf_dirty_record_t *
2029 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2030 {
2031 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2032 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2033 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2034 ASSERT(dn->dn_maxblkid >= blkid);
2035
2036 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2037 list_link_init(&dr->dr_dirty_node);
2038 list_link_init(&dr->dr_dbuf_node);
2039 dr->dr_dnode = dn;
2040 dr->dr_txg = tx->tx_txg;
2041 dr->dt.dll.dr_blkid = blkid;
2042 dr->dr_accounted = dn->dn_datablksz;
2043
2044 /*
2045 * There should not be any dbuf for the block that we're dirtying.
2046 * Otherwise the buffer contents could be inconsistent between the
2047 * dbuf and the lightweight dirty record.
2048 */
2049 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid));
2050
2051 mutex_enter(&dn->dn_mtx);
2052 int txgoff = tx->tx_txg & TXG_MASK;
2053 if (dn->dn_free_ranges[txgoff] != NULL) {
2054 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2055 }
2056
2057 if (dn->dn_nlevels == 1) {
2058 ASSERT3U(blkid, <, dn->dn_nblkptr);
2059 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2060 mutex_exit(&dn->dn_mtx);
2061 rw_exit(&dn->dn_struct_rwlock);
2062 dnode_setdirty(dn, tx);
2063 } else {
2064 mutex_exit(&dn->dn_mtx);
2065
2066 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2067 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2068 1, blkid >> epbs, FTAG);
2069 rw_exit(&dn->dn_struct_rwlock);
2070 if (parent_db == NULL) {
2071 kmem_free(dr, sizeof (*dr));
2072 return (NULL);
2073 }
2074 int err = dbuf_read(parent_db, NULL,
2075 (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2076 if (err != 0) {
2077 dbuf_rele(parent_db, FTAG);
2078 kmem_free(dr, sizeof (*dr));
2079 return (NULL);
2080 }
2081
2082 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2083 dbuf_rele(parent_db, FTAG);
2084 mutex_enter(&parent_dr->dt.di.dr_mtx);
2085 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2086 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2087 mutex_exit(&parent_dr->dt.di.dr_mtx);
2088 dr->dr_parent = parent_dr;
2089 }
2090
2091 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2092
2093 return (dr);
2094 }
2095
2096 dbuf_dirty_record_t *
2097 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2098 {
2099 dnode_t *dn;
2100 objset_t *os;
2101 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2102 int txgoff = tx->tx_txg & TXG_MASK;
2103 boolean_t drop_struct_rwlock = B_FALSE;
2104
2105 ASSERT(tx->tx_txg != 0);
2106 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2107 DMU_TX_DIRTY_BUF(tx, db);
2108
2109 DB_DNODE_ENTER(db);
2110 dn = DB_DNODE(db);
2111 /*
2112 * Shouldn't dirty a regular buffer in syncing context. Private
2113 * objects may be dirtied in syncing context, but only if they
2114 * were already pre-dirtied in open context.
2115 */
2116 #ifdef ZFS_DEBUG
2117 if (dn->dn_objset->os_dsl_dataset != NULL) {
2118 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2119 RW_READER, FTAG);
2120 }
2121 ASSERT(!dmu_tx_is_syncing(tx) ||
2122 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2123 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2124 dn->dn_objset->os_dsl_dataset == NULL);
2125 if (dn->dn_objset->os_dsl_dataset != NULL)
2126 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2127 #endif
2128 /*
2129 * We make this assert for private objects as well, but after we
2130 * check if we're already dirty. They are allowed to re-dirty
2131 * in syncing context.
2132 */
2133 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2134 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2135 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2136
2137 mutex_enter(&db->db_mtx);
2138 /*
2139 * XXX make this true for indirects too? The problem is that
2140 * transactions created with dmu_tx_create_assigned() from
2141 * syncing context don't bother holding ahead.
2142 */
2143 ASSERT(db->db_level != 0 ||
2144 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2145 db->db_state == DB_NOFILL);
2146
2147 mutex_enter(&dn->dn_mtx);
2148 dnode_set_dirtyctx(dn, tx, db);
2149 if (tx->tx_txg > dn->dn_dirty_txg)
2150 dn->dn_dirty_txg = tx->tx_txg;
2151 mutex_exit(&dn->dn_mtx);
2152
2153 if (db->db_blkid == DMU_SPILL_BLKID)
2154 dn->dn_have_spill = B_TRUE;
2155
2156 /*
2157 * If this buffer is already dirty, we're done.
2158 */
2159 dr_head = list_head(&db->db_dirty_records);
2160 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2161 db->db.db_object == DMU_META_DNODE_OBJECT);
2162 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2163 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2164 DB_DNODE_EXIT(db);
2165
2166 dbuf_redirty(dr_next);
2167 mutex_exit(&db->db_mtx);
2168 return (dr_next);
2169 }
2170
2171 /*
2172 * Only valid if not already dirty.
2173 */
2174 ASSERT(dn->dn_object == 0 ||
2175 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2176 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2177
2178 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2179
2180 /*
2181 * We should only be dirtying in syncing context if it's the
2182 * mos or we're initializing the os or it's a special object.
2183 * However, we are allowed to dirty in syncing context provided
2184 * we already dirtied it in open context. Hence we must make
2185 * this assertion only if we're not already dirty.
2186 */
2187 os = dn->dn_objset;
2188 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2189 #ifdef ZFS_DEBUG
2190 if (dn->dn_objset->os_dsl_dataset != NULL)
2191 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2192 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2193 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2194 if (dn->dn_objset->os_dsl_dataset != NULL)
2195 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2196 #endif
2197 ASSERT(db->db.db_size != 0);
2198
2199 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2200
2201 if (db->db_blkid != DMU_BONUS_BLKID) {
2202 dmu_objset_willuse_space(os, db->db.db_size, tx);
2203 }
2204
2205 /*
2206 * If this buffer is dirty in an old transaction group we need
2207 * to make a copy of it so that the changes we make in this
2208 * transaction group won't leak out when we sync the older txg.
2209 */
2210 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2211 list_link_init(&dr->dr_dirty_node);
2212 list_link_init(&dr->dr_dbuf_node);
2213 dr->dr_dnode = dn;
2214 if (db->db_level == 0) {
2215 void *data_old = db->db_buf;
2216
2217 if (db->db_state != DB_NOFILL) {
2218 if (db->db_blkid == DMU_BONUS_BLKID) {
2219 dbuf_fix_old_data(db, tx->tx_txg);
2220 data_old = db->db.db_data;
2221 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2222 /*
2223 * Release the data buffer from the cache so
2224 * that we can modify it without impacting
2225 * possible other users of this cached data
2226 * block. Note that indirect blocks and
2227 * private objects are not released until the
2228 * syncing state (since they are only modified
2229 * then).
2230 */
2231 arc_release(db->db_buf, db);
2232 dbuf_fix_old_data(db, tx->tx_txg);
2233 data_old = db->db_buf;
2234 }
2235 ASSERT(data_old != NULL);
2236 }
2237 dr->dt.dl.dr_data = data_old;
2238 } else {
2239 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2240 list_create(&dr->dt.di.dr_children,
2241 sizeof (dbuf_dirty_record_t),
2242 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2243 }
2244 if (db->db_blkid != DMU_BONUS_BLKID)
2245 dr->dr_accounted = db->db.db_size;
2246 dr->dr_dbuf = db;
2247 dr->dr_txg = tx->tx_txg;
2248 list_insert_before(&db->db_dirty_records, dr_next, dr);
2249
2250 /*
2251 * We could have been freed_in_flight between the dbuf_noread
2252 * and dbuf_dirty. We win, as though the dbuf_noread() had
2253 * happened after the free.
2254 */
2255 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2256 db->db_blkid != DMU_SPILL_BLKID) {
2257 mutex_enter(&dn->dn_mtx);
2258 if (dn->dn_free_ranges[txgoff] != NULL) {
2259 range_tree_clear(dn->dn_free_ranges[txgoff],
2260 db->db_blkid, 1);
2261 }
2262 mutex_exit(&dn->dn_mtx);
2263 db->db_freed_in_flight = FALSE;
2264 }
2265
2266 /*
2267 * This buffer is now part of this txg
2268 */
2269 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2270 db->db_dirtycnt += 1;
2271 ASSERT3U(db->db_dirtycnt, <=, 3);
2272
2273 mutex_exit(&db->db_mtx);
2274
2275 if (db->db_blkid == DMU_BONUS_BLKID ||
2276 db->db_blkid == DMU_SPILL_BLKID) {
2277 mutex_enter(&dn->dn_mtx);
2278 ASSERT(!list_link_active(&dr->dr_dirty_node));
2279 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2280 mutex_exit(&dn->dn_mtx);
2281 dnode_setdirty(dn, tx);
2282 DB_DNODE_EXIT(db);
2283 return (dr);
2284 }
2285
2286 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2287 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2288 drop_struct_rwlock = B_TRUE;
2289 }
2290
2291 /*
2292 * If we are overwriting a dedup BP, then unless it is snapshotted,
2293 * when we get to syncing context we will need to decrement its
2294 * refcount in the DDT. Prefetch the relevant DDT block so that
2295 * syncing context won't have to wait for the i/o.
2296 */
2297 if (db->db_blkptr != NULL) {
2298 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2299 ddt_prefetch(os->os_spa, db->db_blkptr);
2300 dmu_buf_unlock_parent(db, dblt, FTAG);
2301 }
2302
2303 /*
2304 * We need to hold the dn_struct_rwlock to make this assertion,
2305 * because it protects dn_phys / dn_next_nlevels from changing.
2306 */
2307 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2308 dn->dn_phys->dn_nlevels > db->db_level ||
2309 dn->dn_next_nlevels[txgoff] > db->db_level ||
2310 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2311 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2312
2313
2314 if (db->db_level == 0) {
2315 ASSERT(!db->db_objset->os_raw_receive ||
2316 dn->dn_maxblkid >= db->db_blkid);
2317 dnode_new_blkid(dn, db->db_blkid, tx,
2318 drop_struct_rwlock, B_FALSE);
2319 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2320 }
2321
2322 if (db->db_level+1 < dn->dn_nlevels) {
2323 dmu_buf_impl_t *parent = db->db_parent;
2324 dbuf_dirty_record_t *di;
2325 int parent_held = FALSE;
2326
2327 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2328 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2329 parent = dbuf_hold_level(dn, db->db_level + 1,
2330 db->db_blkid >> epbs, FTAG);
2331 ASSERT(parent != NULL);
2332 parent_held = TRUE;
2333 }
2334 if (drop_struct_rwlock)
2335 rw_exit(&dn->dn_struct_rwlock);
2336 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2337 di = dbuf_dirty(parent, tx);
2338 if (parent_held)
2339 dbuf_rele(parent, FTAG);
2340
2341 mutex_enter(&db->db_mtx);
2342 /*
2343 * Since we've dropped the mutex, it's possible that
2344 * dbuf_undirty() might have changed this out from under us.
2345 */
2346 if (list_head(&db->db_dirty_records) == dr ||
2347 dn->dn_object == DMU_META_DNODE_OBJECT) {
2348 mutex_enter(&di->dt.di.dr_mtx);
2349 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2350 ASSERT(!list_link_active(&dr->dr_dirty_node));
2351 list_insert_tail(&di->dt.di.dr_children, dr);
2352 mutex_exit(&di->dt.di.dr_mtx);
2353 dr->dr_parent = di;
2354 }
2355 mutex_exit(&db->db_mtx);
2356 } else {
2357 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2358 ASSERT(db->db_blkid < dn->dn_nblkptr);
2359 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2360 mutex_enter(&dn->dn_mtx);
2361 ASSERT(!list_link_active(&dr->dr_dirty_node));
2362 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2363 mutex_exit(&dn->dn_mtx);
2364 if (drop_struct_rwlock)
2365 rw_exit(&dn->dn_struct_rwlock);
2366 }
2367
2368 dnode_setdirty(dn, tx);
2369 DB_DNODE_EXIT(db);
2370 return (dr);
2371 }
2372
2373 static void
2374 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2375 {
2376 dmu_buf_impl_t *db = dr->dr_dbuf;
2377
2378 if (dr->dt.dl.dr_data != db->db.db_data) {
2379 struct dnode *dn = dr->dr_dnode;
2380 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2381
2382 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2383 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2384 }
2385 db->db_data_pending = NULL;
2386 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2387 list_remove(&db->db_dirty_records, dr);
2388 if (dr->dr_dbuf->db_level != 0) {
2389 mutex_destroy(&dr->dt.di.dr_mtx);
2390 list_destroy(&dr->dt.di.dr_children);
2391 }
2392 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2393 ASSERT3U(db->db_dirtycnt, >, 0);
2394 db->db_dirtycnt -= 1;
2395 }
2396
2397 /*
2398 * Undirty a buffer in the transaction group referenced by the given
2399 * transaction. Return whether this evicted the dbuf.
2400 */
2401 static boolean_t
2402 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2403 {
2404 uint64_t txg = tx->tx_txg;
2405
2406 ASSERT(txg != 0);
2407
2408 /*
2409 * Due to our use of dn_nlevels below, this can only be called
2410 * in open context, unless we are operating on the MOS.
2411 * From syncing context, dn_nlevels may be different from the
2412 * dn_nlevels used when dbuf was dirtied.
2413 */
2414 ASSERT(db->db_objset ==
2415 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2416 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2417 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2418 ASSERT0(db->db_level);
2419 ASSERT(MUTEX_HELD(&db->db_mtx));
2420
2421 /*
2422 * If this buffer is not dirty, we're done.
2423 */
2424 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2425 if (dr == NULL)
2426 return (B_FALSE);
2427 ASSERT(dr->dr_dbuf == db);
2428
2429 dnode_t *dn = dr->dr_dnode;
2430
2431 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2432
2433 ASSERT(db->db.db_size != 0);
2434
2435 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2436 dr->dr_accounted, txg);
2437
2438 list_remove(&db->db_dirty_records, dr);
2439
2440 /*
2441 * Note that there are three places in dbuf_dirty()
2442 * where this dirty record may be put on a list.
2443 * Make sure to do a list_remove corresponding to
2444 * every one of those list_insert calls.
2445 */
2446 if (dr->dr_parent) {
2447 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2448 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2449 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2450 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2451 db->db_level + 1 == dn->dn_nlevels) {
2452 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2453 mutex_enter(&dn->dn_mtx);
2454 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2455 mutex_exit(&dn->dn_mtx);
2456 }
2457
2458 if (db->db_state != DB_NOFILL) {
2459 dbuf_unoverride(dr);
2460
2461 ASSERT(db->db_buf != NULL);
2462 ASSERT(dr->dt.dl.dr_data != NULL);
2463 if (dr->dt.dl.dr_data != db->db_buf)
2464 arc_buf_destroy(dr->dt.dl.dr_data, db);
2465 }
2466
2467 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2468
2469 ASSERT(db->db_dirtycnt > 0);
2470 db->db_dirtycnt -= 1;
2471
2472 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2473 ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
2474 dbuf_destroy(db);
2475 return (B_TRUE);
2476 }
2477
2478 return (B_FALSE);
2479 }
2480
2481 static void
2482 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2483 {
2484 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2485
2486 ASSERT(tx->tx_txg != 0);
2487 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2488
2489 /*
2490 * Quick check for dirtiness. For already dirty blocks, this
2491 * reduces runtime of this function by >90%, and overall performance
2492 * by 50% for some workloads (e.g. file deletion with indirect blocks
2493 * cached).
2494 */
2495 mutex_enter(&db->db_mtx);
2496
2497 if (db->db_state == DB_CACHED) {
2498 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2499 /*
2500 * It's possible that it is already dirty but not cached,
2501 * because there are some calls to dbuf_dirty() that don't
2502 * go through dmu_buf_will_dirty().
2503 */
2504 if (dr != NULL) {
2505 /* This dbuf is already dirty and cached. */
2506 dbuf_redirty(dr);
2507 mutex_exit(&db->db_mtx);
2508 return;
2509 }
2510 }
2511 mutex_exit(&db->db_mtx);
2512
2513 DB_DNODE_ENTER(db);
2514 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2515 flags |= DB_RF_HAVESTRUCT;
2516 DB_DNODE_EXIT(db);
2517 (void) dbuf_read(db, NULL, flags);
2518 (void) dbuf_dirty(db, tx);
2519 }
2520
2521 void
2522 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2523 {
2524 dmu_buf_will_dirty_impl(db_fake,
2525 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2526 }
2527
2528 boolean_t
2529 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2530 {
2531 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2532 dbuf_dirty_record_t *dr;
2533
2534 mutex_enter(&db->db_mtx);
2535 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2536 mutex_exit(&db->db_mtx);
2537 return (dr != NULL);
2538 }
2539
2540 void
2541 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2542 {
2543 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2544
2545 db->db_state = DB_NOFILL;
2546 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2547 dmu_buf_will_fill(db_fake, tx);
2548 }
2549
2550 void
2551 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2552 {
2553 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2554
2555 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2556 ASSERT(tx->tx_txg != 0);
2557 ASSERT(db->db_level == 0);
2558 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2559
2560 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2561 dmu_tx_private_ok(tx));
2562
2563 dbuf_noread(db);
2564 (void) dbuf_dirty(db, tx);
2565 }
2566
2567 /*
2568 * This function is effectively the same as dmu_buf_will_dirty(), but
2569 * indicates the caller expects raw encrypted data in the db, and provides
2570 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2571 * blkptr_t when this dbuf is written. This is only used for blocks of
2572 * dnodes, during raw receive.
2573 */
2574 void
2575 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2576 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2577 {
2578 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2579 dbuf_dirty_record_t *dr;
2580
2581 /*
2582 * dr_has_raw_params is only processed for blocks of dnodes
2583 * (see dbuf_sync_dnode_leaf_crypt()).
2584 */
2585 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2586 ASSERT3U(db->db_level, ==, 0);
2587 ASSERT(db->db_objset->os_raw_receive);
2588
2589 dmu_buf_will_dirty_impl(db_fake,
2590 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2591
2592 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2593
2594 ASSERT3P(dr, !=, NULL);
2595
2596 dr->dt.dl.dr_has_raw_params = B_TRUE;
2597 dr->dt.dl.dr_byteorder = byteorder;
2598 bcopy(salt, dr->dt.dl.dr_salt, ZIO_DATA_SALT_LEN);
2599 bcopy(iv, dr->dt.dl.dr_iv, ZIO_DATA_IV_LEN);
2600 bcopy(mac, dr->dt.dl.dr_mac, ZIO_DATA_MAC_LEN);
2601 }
2602
2603 static void
2604 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2605 {
2606 struct dirty_leaf *dl;
2607 dbuf_dirty_record_t *dr;
2608
2609 dr = list_head(&db->db_dirty_records);
2610 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2611 dl = &dr->dt.dl;
2612 dl->dr_overridden_by = *bp;
2613 dl->dr_override_state = DR_OVERRIDDEN;
2614 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2615 }
2616
2617 /* ARGSUSED */
2618 void
2619 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2620 {
2621 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2622 dbuf_states_t old_state;
2623 mutex_enter(&db->db_mtx);
2624 DBUF_VERIFY(db);
2625
2626 old_state = db->db_state;
2627 db->db_state = DB_CACHED;
2628 if (old_state == DB_FILL) {
2629 if (db->db_level == 0 && db->db_freed_in_flight) {
2630 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2631 /* we were freed while filling */
2632 /* XXX dbuf_undirty? */
2633 bzero(db->db.db_data, db->db.db_size);
2634 db->db_freed_in_flight = FALSE;
2635 DTRACE_SET_STATE(db,
2636 "fill done handling freed in flight");
2637 } else {
2638 DTRACE_SET_STATE(db, "fill done");
2639 }
2640 cv_broadcast(&db->db_changed);
2641 }
2642 mutex_exit(&db->db_mtx);
2643 }
2644
2645 void
2646 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2647 bp_embedded_type_t etype, enum zio_compress comp,
2648 int uncompressed_size, int compressed_size, int byteorder,
2649 dmu_tx_t *tx)
2650 {
2651 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2652 struct dirty_leaf *dl;
2653 dmu_object_type_t type;
2654 dbuf_dirty_record_t *dr;
2655
2656 if (etype == BP_EMBEDDED_TYPE_DATA) {
2657 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2658 SPA_FEATURE_EMBEDDED_DATA));
2659 }
2660
2661 DB_DNODE_ENTER(db);
2662 type = DB_DNODE(db)->dn_type;
2663 DB_DNODE_EXIT(db);
2664
2665 ASSERT0(db->db_level);
2666 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2667
2668 dmu_buf_will_not_fill(dbuf, tx);
2669
2670 dr = list_head(&db->db_dirty_records);
2671 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2672 dl = &dr->dt.dl;
2673 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2674 data, comp, uncompressed_size, compressed_size);
2675 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2676 BP_SET_TYPE(&dl->dr_overridden_by, type);
2677 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2678 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2679
2680 dl->dr_override_state = DR_OVERRIDDEN;
2681 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2682 }
2683
2684 void
2685 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2686 {
2687 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2688 dmu_object_type_t type;
2689 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2690 SPA_FEATURE_REDACTED_DATASETS));
2691
2692 DB_DNODE_ENTER(db);
2693 type = DB_DNODE(db)->dn_type;
2694 DB_DNODE_EXIT(db);
2695
2696 ASSERT0(db->db_level);
2697 dmu_buf_will_not_fill(dbuf, tx);
2698
2699 blkptr_t bp = { { { {0} } } };
2700 BP_SET_TYPE(&bp, type);
2701 BP_SET_LEVEL(&bp, 0);
2702 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2703 BP_SET_REDACTED(&bp);
2704 BPE_SET_LSIZE(&bp, dbuf->db_size);
2705
2706 dbuf_override_impl(db, &bp, tx);
2707 }
2708
2709 /*
2710 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2711 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2712 */
2713 void
2714 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2715 {
2716 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2717 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2718 ASSERT(db->db_level == 0);
2719 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2720 ASSERT(buf != NULL);
2721 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2722 ASSERT(tx->tx_txg != 0);
2723
2724 arc_return_buf(buf, db);
2725 ASSERT(arc_released(buf));
2726
2727 mutex_enter(&db->db_mtx);
2728
2729 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2730 cv_wait(&db->db_changed, &db->db_mtx);
2731
2732 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2733
2734 if (db->db_state == DB_CACHED &&
2735 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2736 /*
2737 * In practice, we will never have a case where we have an
2738 * encrypted arc buffer while additional holds exist on the
2739 * dbuf. We don't handle this here so we simply assert that
2740 * fact instead.
2741 */
2742 ASSERT(!arc_is_encrypted(buf));
2743 mutex_exit(&db->db_mtx);
2744 (void) dbuf_dirty(db, tx);
2745 bcopy(buf->b_data, db->db.db_data, db->db.db_size);
2746 arc_buf_destroy(buf, db);
2747 return;
2748 }
2749
2750 if (db->db_state == DB_CACHED) {
2751 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2752
2753 ASSERT(db->db_buf != NULL);
2754 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2755 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2756
2757 if (!arc_released(db->db_buf)) {
2758 ASSERT(dr->dt.dl.dr_override_state ==
2759 DR_OVERRIDDEN);
2760 arc_release(db->db_buf, db);
2761 }
2762 dr->dt.dl.dr_data = buf;
2763 arc_buf_destroy(db->db_buf, db);
2764 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2765 arc_release(db->db_buf, db);
2766 arc_buf_destroy(db->db_buf, db);
2767 }
2768 db->db_buf = NULL;
2769 }
2770 ASSERT(db->db_buf == NULL);
2771 dbuf_set_data(db, buf);
2772 db->db_state = DB_FILL;
2773 DTRACE_SET_STATE(db, "filling assigned arcbuf");
2774 mutex_exit(&db->db_mtx);
2775 (void) dbuf_dirty(db, tx);
2776 dmu_buf_fill_done(&db->db, tx);
2777 }
2778
2779 void
2780 dbuf_destroy(dmu_buf_impl_t *db)
2781 {
2782 dnode_t *dn;
2783 dmu_buf_impl_t *parent = db->db_parent;
2784 dmu_buf_impl_t *dndb;
2785
2786 ASSERT(MUTEX_HELD(&db->db_mtx));
2787 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2788
2789 if (db->db_buf != NULL) {
2790 arc_buf_destroy(db->db_buf, db);
2791 db->db_buf = NULL;
2792 }
2793
2794 if (db->db_blkid == DMU_BONUS_BLKID) {
2795 int slots = DB_DNODE(db)->dn_num_slots;
2796 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2797 if (db->db.db_data != NULL) {
2798 kmem_free(db->db.db_data, bonuslen);
2799 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2800 db->db_state = DB_UNCACHED;
2801 DTRACE_SET_STATE(db, "buffer cleared");
2802 }
2803 }
2804
2805 dbuf_clear_data(db);
2806
2807 if (multilist_link_active(&db->db_cache_link)) {
2808 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2809 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2810
2811 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
2812 (void) zfs_refcount_remove_many(
2813 &dbuf_caches[db->db_caching_status].size,
2814 db->db.db_size, db);
2815
2816 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2817 DBUF_STAT_BUMPDOWN(metadata_cache_count);
2818 } else {
2819 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2820 DBUF_STAT_BUMPDOWN(cache_count);
2821 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2822 db->db.db_size);
2823 }
2824 db->db_caching_status = DB_NO_CACHE;
2825 }
2826
2827 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2828 ASSERT(db->db_data_pending == NULL);
2829 ASSERT(list_is_empty(&db->db_dirty_records));
2830
2831 db->db_state = DB_EVICTING;
2832 DTRACE_SET_STATE(db, "buffer eviction started");
2833 db->db_blkptr = NULL;
2834
2835 /*
2836 * Now that db_state is DB_EVICTING, nobody else can find this via
2837 * the hash table. We can now drop db_mtx, which allows us to
2838 * acquire the dn_dbufs_mtx.
2839 */
2840 mutex_exit(&db->db_mtx);
2841
2842 DB_DNODE_ENTER(db);
2843 dn = DB_DNODE(db);
2844 dndb = dn->dn_dbuf;
2845 if (db->db_blkid != DMU_BONUS_BLKID) {
2846 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2847 if (needlock)
2848 mutex_enter_nested(&dn->dn_dbufs_mtx,
2849 NESTED_SINGLE);
2850 avl_remove(&dn->dn_dbufs, db);
2851 membar_producer();
2852 DB_DNODE_EXIT(db);
2853 if (needlock)
2854 mutex_exit(&dn->dn_dbufs_mtx);
2855 /*
2856 * Decrementing the dbuf count means that the hold corresponding
2857 * to the removed dbuf is no longer discounted in dnode_move(),
2858 * so the dnode cannot be moved until after we release the hold.
2859 * The membar_producer() ensures visibility of the decremented
2860 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2861 * release any lock.
2862 */
2863 mutex_enter(&dn->dn_mtx);
2864 dnode_rele_and_unlock(dn, db, B_TRUE);
2865 db->db_dnode_handle = NULL;
2866
2867 dbuf_hash_remove(db);
2868 } else {
2869 DB_DNODE_EXIT(db);
2870 }
2871
2872 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2873
2874 db->db_parent = NULL;
2875
2876 ASSERT(db->db_buf == NULL);
2877 ASSERT(db->db.db_data == NULL);
2878 ASSERT(db->db_hash_next == NULL);
2879 ASSERT(db->db_blkptr == NULL);
2880 ASSERT(db->db_data_pending == NULL);
2881 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
2882 ASSERT(!multilist_link_active(&db->db_cache_link));
2883
2884 kmem_cache_free(dbuf_kmem_cache, db);
2885 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
2886
2887 /*
2888 * If this dbuf is referenced from an indirect dbuf,
2889 * decrement the ref count on the indirect dbuf.
2890 */
2891 if (parent && parent != dndb) {
2892 mutex_enter(&parent->db_mtx);
2893 dbuf_rele_and_unlock(parent, db, B_TRUE);
2894 }
2895 }
2896
2897 /*
2898 * Note: While bpp will always be updated if the function returns success,
2899 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
2900 * this happens when the dnode is the meta-dnode, or {user|group|project}used
2901 * object.
2902 */
2903 __attribute__((always_inline))
2904 static inline int
2905 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
2906 dmu_buf_impl_t **parentp, blkptr_t **bpp)
2907 {
2908 *parentp = NULL;
2909 *bpp = NULL;
2910
2911 ASSERT(blkid != DMU_BONUS_BLKID);
2912
2913 if (blkid == DMU_SPILL_BLKID) {
2914 mutex_enter(&dn->dn_mtx);
2915 if (dn->dn_have_spill &&
2916 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
2917 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
2918 else
2919 *bpp = NULL;
2920 dbuf_add_ref(dn->dn_dbuf, NULL);
2921 *parentp = dn->dn_dbuf;
2922 mutex_exit(&dn->dn_mtx);
2923 return (0);
2924 }
2925
2926 int nlevels =
2927 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
2928 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2929
2930 ASSERT3U(level * epbs, <, 64);
2931 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
2932 /*
2933 * This assertion shouldn't trip as long as the max indirect block size
2934 * is less than 1M. The reason for this is that up to that point,
2935 * the number of levels required to address an entire object with blocks
2936 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
2937 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
2938 * (i.e. we can address the entire object), objects will all use at most
2939 * N-1 levels and the assertion won't overflow. However, once epbs is
2940 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
2941 * enough to address an entire object, so objects will have 5 levels,
2942 * but then this assertion will overflow.
2943 *
2944 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
2945 * need to redo this logic to handle overflows.
2946 */
2947 ASSERT(level >= nlevels ||
2948 ((nlevels - level - 1) * epbs) +
2949 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
2950 if (level >= nlevels ||
2951 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
2952 ((nlevels - level - 1) * epbs)) ||
2953 (fail_sparse &&
2954 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
2955 /* the buffer has no parent yet */
2956 return (SET_ERROR(ENOENT));
2957 } else if (level < nlevels-1) {
2958 /* this block is referenced from an indirect block */
2959 int err;
2960
2961 err = dbuf_hold_impl(dn, level + 1,
2962 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
2963
2964 if (err)
2965 return (err);
2966 err = dbuf_read(*parentp, NULL,
2967 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2968 if (err) {
2969 dbuf_rele(*parentp, NULL);
2970 *parentp = NULL;
2971 return (err);
2972 }
2973 rw_enter(&(*parentp)->db_rwlock, RW_READER);
2974 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
2975 (blkid & ((1ULL << epbs) - 1));
2976 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
2977 ASSERT(BP_IS_HOLE(*bpp));
2978 rw_exit(&(*parentp)->db_rwlock);
2979 return (0);
2980 } else {
2981 /* the block is referenced from the dnode */
2982 ASSERT3U(level, ==, nlevels-1);
2983 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
2984 blkid < dn->dn_phys->dn_nblkptr);
2985 if (dn->dn_dbuf) {
2986 dbuf_add_ref(dn->dn_dbuf, NULL);
2987 *parentp = dn->dn_dbuf;
2988 }
2989 *bpp = &dn->dn_phys->dn_blkptr[blkid];
2990 return (0);
2991 }
2992 }
2993
2994 static dmu_buf_impl_t *
2995 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
2996 dmu_buf_impl_t *parent, blkptr_t *blkptr)
2997 {
2998 objset_t *os = dn->dn_objset;
2999 dmu_buf_impl_t *db, *odb;
3000
3001 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3002 ASSERT(dn->dn_type != DMU_OT_NONE);
3003
3004 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3005
3006 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3007 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3008
3009 db->db_objset = os;
3010 db->db.db_object = dn->dn_object;
3011 db->db_level = level;
3012 db->db_blkid = blkid;
3013 db->db_dirtycnt = 0;
3014 db->db_dnode_handle = dn->dn_handle;
3015 db->db_parent = parent;
3016 db->db_blkptr = blkptr;
3017
3018 db->db_user = NULL;
3019 db->db_user_immediate_evict = FALSE;
3020 db->db_freed_in_flight = FALSE;
3021 db->db_pending_evict = FALSE;
3022
3023 if (blkid == DMU_BONUS_BLKID) {
3024 ASSERT3P(parent, ==, dn->dn_dbuf);
3025 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3026 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3027 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3028 db->db.db_offset = DMU_BONUS_BLKID;
3029 db->db_state = DB_UNCACHED;
3030 DTRACE_SET_STATE(db, "bonus buffer created");
3031 db->db_caching_status = DB_NO_CACHE;
3032 /* the bonus dbuf is not placed in the hash table */
3033 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3034 return (db);
3035 } else if (blkid == DMU_SPILL_BLKID) {
3036 db->db.db_size = (blkptr != NULL) ?
3037 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3038 db->db.db_offset = 0;
3039 } else {
3040 int blocksize =
3041 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3042 db->db.db_size = blocksize;
3043 db->db.db_offset = db->db_blkid * blocksize;
3044 }
3045
3046 /*
3047 * Hold the dn_dbufs_mtx while we get the new dbuf
3048 * in the hash table *and* added to the dbufs list.
3049 * This prevents a possible deadlock with someone
3050 * trying to look up this dbuf before it's added to the
3051 * dn_dbufs list.
3052 */
3053 mutex_enter(&dn->dn_dbufs_mtx);
3054 db->db_state = DB_EVICTING; /* not worth logging this state change */
3055 if ((odb = dbuf_hash_insert(db)) != NULL) {
3056 /* someone else inserted it first */
3057 kmem_cache_free(dbuf_kmem_cache, db);
3058 mutex_exit(&dn->dn_dbufs_mtx);
3059 DBUF_STAT_BUMP(hash_insert_race);
3060 return (odb);
3061 }
3062 avl_add(&dn->dn_dbufs, db);
3063
3064 db->db_state = DB_UNCACHED;
3065 DTRACE_SET_STATE(db, "regular buffer created");
3066 db->db_caching_status = DB_NO_CACHE;
3067 mutex_exit(&dn->dn_dbufs_mtx);
3068 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3069
3070 if (parent && parent != dn->dn_dbuf)
3071 dbuf_add_ref(parent, db);
3072
3073 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3074 zfs_refcount_count(&dn->dn_holds) > 0);
3075 (void) zfs_refcount_add(&dn->dn_holds, db);
3076
3077 dprintf_dbuf(db, "db=%p\n", db);
3078
3079 return (db);
3080 }
3081
3082 /*
3083 * This function returns a block pointer and information about the object,
3084 * given a dnode and a block. This is a publicly accessible version of
3085 * dbuf_findbp that only returns some information, rather than the
3086 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3087 * should be locked as (at least) a reader.
3088 */
3089 int
3090 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3091 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3092 {
3093 dmu_buf_impl_t *dbp = NULL;
3094 blkptr_t *bp2;
3095 int err = 0;
3096 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3097
3098 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3099 if (err == 0) {
3100 *bp = *bp2;
3101 if (dbp != NULL)
3102 dbuf_rele(dbp, NULL);
3103 if (datablkszsec != NULL)
3104 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3105 if (indblkshift != NULL)
3106 *indblkshift = dn->dn_phys->dn_indblkshift;
3107 }
3108
3109 return (err);
3110 }
3111
3112 typedef struct dbuf_prefetch_arg {
3113 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3114 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3115 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3116 int dpa_curlevel; /* The current level that we're reading */
3117 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3118 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3119 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3120 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3121 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3122 void *dpa_arg; /* prefetch completion arg */
3123 } dbuf_prefetch_arg_t;
3124
3125 static void
3126 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3127 {
3128 if (dpa->dpa_cb != NULL)
3129 dpa->dpa_cb(dpa->dpa_arg, io_done);
3130 kmem_free(dpa, sizeof (*dpa));
3131 }
3132
3133 static void
3134 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3135 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3136 {
3137 dbuf_prefetch_arg_t *dpa = private;
3138
3139 dbuf_prefetch_fini(dpa, B_TRUE);
3140 if (abuf != NULL)
3141 arc_buf_destroy(abuf, private);
3142 }
3143
3144 /*
3145 * Actually issue the prefetch read for the block given.
3146 */
3147 static void
3148 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3149 {
3150 ASSERT(!BP_IS_REDACTED(bp) ||
3151 dsl_dataset_feature_is_active(
3152 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3153 SPA_FEATURE_REDACTED_DATASETS));
3154
3155 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3156 return (dbuf_prefetch_fini(dpa, B_FALSE));
3157
3158 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3159 arc_flags_t aflags =
3160 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3161 ARC_FLAG_NO_BUF;
3162
3163 /* dnodes are always read as raw and then converted later */
3164 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3165 dpa->dpa_curlevel == 0)
3166 zio_flags |= ZIO_FLAG_RAW;
3167
3168 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3169 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3170 ASSERT(dpa->dpa_zio != NULL);
3171 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3172 dbuf_issue_final_prefetch_done, dpa,
3173 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3174 }
3175
3176 /*
3177 * Called when an indirect block above our prefetch target is read in. This
3178 * will either read in the next indirect block down the tree or issue the actual
3179 * prefetch if the next block down is our target.
3180 */
3181 static void
3182 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3183 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3184 {
3185 dbuf_prefetch_arg_t *dpa = private;
3186
3187 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3188 ASSERT3S(dpa->dpa_curlevel, >, 0);
3189
3190 if (abuf == NULL) {
3191 ASSERT(zio == NULL || zio->io_error != 0);
3192 return (dbuf_prefetch_fini(dpa, B_TRUE));
3193 }
3194 ASSERT(zio == NULL || zio->io_error == 0);
3195
3196 /*
3197 * The dpa_dnode is only valid if we are called with a NULL
3198 * zio. This indicates that the arc_read() returned without
3199 * first calling zio_read() to issue a physical read. Once
3200 * a physical read is made the dpa_dnode must be invalidated
3201 * as the locks guarding it may have been dropped. If the
3202 * dpa_dnode is still valid, then we want to add it to the dbuf
3203 * cache. To do so, we must hold the dbuf associated with the block
3204 * we just prefetched, read its contents so that we associate it
3205 * with an arc_buf_t, and then release it.
3206 */
3207 if (zio != NULL) {
3208 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3209 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3210 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3211 } else {
3212 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3213 }
3214 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3215
3216 dpa->dpa_dnode = NULL;
3217 } else if (dpa->dpa_dnode != NULL) {
3218 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3219 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3220 dpa->dpa_zb.zb_level));
3221 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3222 dpa->dpa_curlevel, curblkid, FTAG);
3223 if (db == NULL) {
3224 arc_buf_destroy(abuf, private);
3225 return (dbuf_prefetch_fini(dpa, B_TRUE));
3226 }
3227 (void) dbuf_read(db, NULL,
3228 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3229 dbuf_rele(db, FTAG);
3230 }
3231
3232 dpa->dpa_curlevel--;
3233 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3234 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3235 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3236 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3237
3238 ASSERT(!BP_IS_REDACTED(bp) ||
3239 dsl_dataset_feature_is_active(
3240 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3241 SPA_FEATURE_REDACTED_DATASETS));
3242 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3243 dbuf_prefetch_fini(dpa, B_TRUE);
3244 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3245 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3246 dbuf_issue_final_prefetch(dpa, bp);
3247 } else {
3248 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3249 zbookmark_phys_t zb;
3250
3251 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3252 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3253 iter_aflags |= ARC_FLAG_L2CACHE;
3254
3255 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3256
3257 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3258 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3259
3260 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3261 bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
3262 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3263 &iter_aflags, &zb);
3264 }
3265
3266 arc_buf_destroy(abuf, private);
3267 }
3268
3269 /*
3270 * Issue prefetch reads for the given block on the given level. If the indirect
3271 * blocks above that block are not in memory, we will read them in
3272 * asynchronously. As a result, this call never blocks waiting for a read to
3273 * complete. Note that the prefetch might fail if the dataset is encrypted and
3274 * the encryption key is unmapped before the IO completes.
3275 */
3276 int
3277 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3278 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3279 void *arg)
3280 {
3281 blkptr_t bp;
3282 int epbs, nlevels, curlevel;
3283 uint64_t curblkid;
3284
3285 ASSERT(blkid != DMU_BONUS_BLKID);
3286 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3287
3288 if (blkid > dn->dn_maxblkid)
3289 goto no_issue;
3290
3291 if (level == 0 && dnode_block_freed(dn, blkid))
3292 goto no_issue;
3293
3294 /*
3295 * This dnode hasn't been written to disk yet, so there's nothing to
3296 * prefetch.
3297 */
3298 nlevels = dn->dn_phys->dn_nlevels;
3299 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3300 goto no_issue;
3301
3302 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3303 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3304 goto no_issue;
3305
3306 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3307 level, blkid);
3308 if (db != NULL) {
3309 mutex_exit(&db->db_mtx);
3310 /*
3311 * This dbuf already exists. It is either CACHED, or
3312 * (we assume) about to be read or filled.
3313 */
3314 goto no_issue;
3315 }
3316
3317 /*
3318 * Find the closest ancestor (indirect block) of the target block
3319 * that is present in the cache. In this indirect block, we will
3320 * find the bp that is at curlevel, curblkid.
3321 */
3322 curlevel = level;
3323 curblkid = blkid;
3324 while (curlevel < nlevels - 1) {
3325 int parent_level = curlevel + 1;
3326 uint64_t parent_blkid = curblkid >> epbs;
3327 dmu_buf_impl_t *db;
3328
3329 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3330 FALSE, TRUE, FTAG, &db) == 0) {
3331 blkptr_t *bpp = db->db_buf->b_data;
3332 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3333 dbuf_rele(db, FTAG);
3334 break;
3335 }
3336
3337 curlevel = parent_level;
3338 curblkid = parent_blkid;
3339 }
3340
3341 if (curlevel == nlevels - 1) {
3342 /* No cached indirect blocks found. */
3343 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3344 bp = dn->dn_phys->dn_blkptr[curblkid];
3345 }
3346 ASSERT(!BP_IS_REDACTED(&bp) ||
3347 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3348 SPA_FEATURE_REDACTED_DATASETS));
3349 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3350 goto no_issue;
3351
3352 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3353
3354 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3355 ZIO_FLAG_CANFAIL);
3356
3357 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3358 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3359 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3360 dn->dn_object, level, blkid);
3361 dpa->dpa_curlevel = curlevel;
3362 dpa->dpa_prio = prio;
3363 dpa->dpa_aflags = aflags;
3364 dpa->dpa_spa = dn->dn_objset->os_spa;
3365 dpa->dpa_dnode = dn;
3366 dpa->dpa_epbs = epbs;
3367 dpa->dpa_zio = pio;
3368 dpa->dpa_cb = cb;
3369 dpa->dpa_arg = arg;
3370
3371 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3372 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3373 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3374
3375 /*
3376 * If we have the indirect just above us, no need to do the asynchronous
3377 * prefetch chain; we'll just run the last step ourselves. If we're at
3378 * a higher level, though, we want to issue the prefetches for all the
3379 * indirect blocks asynchronously, so we can go on with whatever we were
3380 * doing.
3381 */
3382 if (curlevel == level) {
3383 ASSERT3U(curblkid, ==, blkid);
3384 dbuf_issue_final_prefetch(dpa, &bp);
3385 } else {
3386 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3387 zbookmark_phys_t zb;
3388
3389 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3390 if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
3391 iter_aflags |= ARC_FLAG_L2CACHE;
3392
3393 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3394 dn->dn_object, curlevel, curblkid);
3395 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3396 &bp, dbuf_prefetch_indirect_done, dpa, prio,
3397 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3398 &iter_aflags, &zb);
3399 }
3400 /*
3401 * We use pio here instead of dpa_zio since it's possible that
3402 * dpa may have already been freed.
3403 */
3404 zio_nowait(pio);
3405 return (1);
3406 no_issue:
3407 if (cb != NULL)
3408 cb(arg, B_FALSE);
3409 return (0);
3410 }
3411
3412 int
3413 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3414 arc_flags_t aflags)
3415 {
3416
3417 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3418 }
3419
3420 /*
3421 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3422 * the case of encrypted, compressed and uncompressed buffers by
3423 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3424 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3425 *
3426 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3427 */
3428 noinline static void
3429 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3430 {
3431 dbuf_dirty_record_t *dr = db->db_data_pending;
3432 arc_buf_t *data = dr->dt.dl.dr_data;
3433 enum zio_compress compress_type = arc_get_compression(data);
3434 uint8_t complevel = arc_get_complevel(data);
3435
3436 if (arc_is_encrypted(data)) {
3437 boolean_t byteorder;
3438 uint8_t salt[ZIO_DATA_SALT_LEN];
3439 uint8_t iv[ZIO_DATA_IV_LEN];
3440 uint8_t mac[ZIO_DATA_MAC_LEN];
3441
3442 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3443 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3444 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3445 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3446 compress_type, complevel));
3447 } else if (compress_type != ZIO_COMPRESS_OFF) {
3448 dbuf_set_data(db, arc_alloc_compressed_buf(
3449 dn->dn_objset->os_spa, db, arc_buf_size(data),
3450 arc_buf_lsize(data), compress_type, complevel));
3451 } else {
3452 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3453 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3454 }
3455
3456 rw_enter(&db->db_rwlock, RW_WRITER);
3457 bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
3458 rw_exit(&db->db_rwlock);
3459 }
3460
3461 /*
3462 * Returns with db_holds incremented, and db_mtx not held.
3463 * Note: dn_struct_rwlock must be held.
3464 */
3465 int
3466 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3467 boolean_t fail_sparse, boolean_t fail_uncached,
3468 void *tag, dmu_buf_impl_t **dbp)
3469 {
3470 dmu_buf_impl_t *db, *parent = NULL;
3471
3472 /* If the pool has been created, verify the tx_sync_lock is not held */
3473 spa_t *spa = dn->dn_objset->os_spa;
3474 dsl_pool_t *dp = spa->spa_dsl_pool;
3475 if (dp != NULL) {
3476 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3477 }
3478
3479 ASSERT(blkid != DMU_BONUS_BLKID);
3480 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3481 ASSERT3U(dn->dn_nlevels, >, level);
3482
3483 *dbp = NULL;
3484
3485 /* dbuf_find() returns with db_mtx held */
3486 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid);
3487
3488 if (db == NULL) {
3489 blkptr_t *bp = NULL;
3490 int err;
3491
3492 if (fail_uncached)
3493 return (SET_ERROR(ENOENT));
3494
3495 ASSERT3P(parent, ==, NULL);
3496 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3497 if (fail_sparse) {
3498 if (err == 0 && bp && BP_IS_HOLE(bp))
3499 err = SET_ERROR(ENOENT);
3500 if (err) {
3501 if (parent)
3502 dbuf_rele(parent, NULL);
3503 return (err);
3504 }
3505 }
3506 if (err && err != ENOENT)
3507 return (err);
3508 db = dbuf_create(dn, level, blkid, parent, bp);
3509 }
3510
3511 if (fail_uncached && db->db_state != DB_CACHED) {
3512 mutex_exit(&db->db_mtx);
3513 return (SET_ERROR(ENOENT));
3514 }
3515
3516 if (db->db_buf != NULL) {
3517 arc_buf_access(db->db_buf);
3518 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3519 }
3520
3521 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3522
3523 /*
3524 * If this buffer is currently syncing out, and we are
3525 * still referencing it from db_data, we need to make a copy
3526 * of it in case we decide we want to dirty it again in this txg.
3527 */
3528 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3529 dn->dn_object != DMU_META_DNODE_OBJECT &&
3530 db->db_state == DB_CACHED && db->db_data_pending) {
3531 dbuf_dirty_record_t *dr = db->db_data_pending;
3532 if (dr->dt.dl.dr_data == db->db_buf)
3533 dbuf_hold_copy(dn, db);
3534 }
3535
3536 if (multilist_link_active(&db->db_cache_link)) {
3537 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3538 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3539 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3540
3541 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3542 (void) zfs_refcount_remove_many(
3543 &dbuf_caches[db->db_caching_status].size,
3544 db->db.db_size, db);
3545
3546 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3547 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3548 } else {
3549 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3550 DBUF_STAT_BUMPDOWN(cache_count);
3551 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3552 db->db.db_size);
3553 }
3554 db->db_caching_status = DB_NO_CACHE;
3555 }
3556 (void) zfs_refcount_add(&db->db_holds, tag);
3557 DBUF_VERIFY(db);
3558 mutex_exit(&db->db_mtx);
3559
3560 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3561 if (parent)
3562 dbuf_rele(parent, NULL);
3563
3564 ASSERT3P(DB_DNODE(db), ==, dn);
3565 ASSERT3U(db->db_blkid, ==, blkid);
3566 ASSERT3U(db->db_level, ==, level);
3567 *dbp = db;
3568
3569 return (0);
3570 }
3571
3572 dmu_buf_impl_t *
3573 dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
3574 {
3575 return (dbuf_hold_level(dn, 0, blkid, tag));
3576 }
3577
3578 dmu_buf_impl_t *
3579 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
3580 {
3581 dmu_buf_impl_t *db;
3582 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3583 return (err ? NULL : db);
3584 }
3585
3586 void
3587 dbuf_create_bonus(dnode_t *dn)
3588 {
3589 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3590
3591 ASSERT(dn->dn_bonus == NULL);
3592 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
3593 }
3594
3595 int
3596 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3597 {
3598 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3599
3600 if (db->db_blkid != DMU_SPILL_BLKID)
3601 return (SET_ERROR(ENOTSUP));
3602 if (blksz == 0)
3603 blksz = SPA_MINBLOCKSIZE;
3604 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3605 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3606
3607 dbuf_new_size(db, blksz, tx);
3608
3609 return (0);
3610 }
3611
3612 void
3613 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3614 {
3615 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3616 }
3617
3618 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3619 void
3620 dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
3621 {
3622 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3623 VERIFY3S(holds, >, 1);
3624 }
3625
3626 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3627 boolean_t
3628 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3629 void *tag)
3630 {
3631 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3632 dmu_buf_impl_t *found_db;
3633 boolean_t result = B_FALSE;
3634
3635 if (blkid == DMU_BONUS_BLKID)
3636 found_db = dbuf_find_bonus(os, obj);
3637 else
3638 found_db = dbuf_find(os, obj, 0, blkid);
3639
3640 if (found_db != NULL) {
3641 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3642 (void) zfs_refcount_add(&db->db_holds, tag);
3643 result = B_TRUE;
3644 }
3645 mutex_exit(&found_db->db_mtx);
3646 }
3647 return (result);
3648 }
3649
3650 /*
3651 * If you call dbuf_rele() you had better not be referencing the dnode handle
3652 * unless you have some other direct or indirect hold on the dnode. (An indirect
3653 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3654 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3655 * dnode's parent dbuf evicting its dnode handles.
3656 */
3657 void
3658 dbuf_rele(dmu_buf_impl_t *db, void *tag)
3659 {
3660 mutex_enter(&db->db_mtx);
3661 dbuf_rele_and_unlock(db, tag, B_FALSE);
3662 }
3663
3664 void
3665 dmu_buf_rele(dmu_buf_t *db, void *tag)
3666 {
3667 dbuf_rele((dmu_buf_impl_t *)db, tag);
3668 }
3669
3670 /*
3671 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3672 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3673 * argument should be set if we are already in the dbuf-evicting code
3674 * path, in which case we don't want to recursively evict. This allows us to
3675 * avoid deeply nested stacks that would have a call flow similar to this:
3676 *
3677 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3678 * ^ |
3679 * | |
3680 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3681 *
3682 */
3683 void
3684 dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag, boolean_t evicting)
3685 {
3686 int64_t holds;
3687 uint64_t size;
3688
3689 ASSERT(MUTEX_HELD(&db->db_mtx));
3690 DBUF_VERIFY(db);
3691
3692 /*
3693 * Remove the reference to the dbuf before removing its hold on the
3694 * dnode so we can guarantee in dnode_move() that a referenced bonus
3695 * buffer has a corresponding dnode hold.
3696 */
3697 holds = zfs_refcount_remove(&db->db_holds, tag);
3698 ASSERT(holds >= 0);
3699
3700 /*
3701 * We can't freeze indirects if there is a possibility that they
3702 * may be modified in the current syncing context.
3703 */
3704 if (db->db_buf != NULL &&
3705 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3706 arc_buf_freeze(db->db_buf);
3707 }
3708
3709 if (holds == db->db_dirtycnt &&
3710 db->db_level == 0 && db->db_user_immediate_evict)
3711 dbuf_evict_user(db);
3712
3713 if (holds == 0) {
3714 if (db->db_blkid == DMU_BONUS_BLKID) {
3715 dnode_t *dn;
3716 boolean_t evict_dbuf = db->db_pending_evict;
3717
3718 /*
3719 * If the dnode moves here, we cannot cross this
3720 * barrier until the move completes.
3721 */
3722 DB_DNODE_ENTER(db);
3723
3724 dn = DB_DNODE(db);
3725 atomic_dec_32(&dn->dn_dbufs_count);
3726
3727 /*
3728 * Decrementing the dbuf count means that the bonus
3729 * buffer's dnode hold is no longer discounted in
3730 * dnode_move(). The dnode cannot move until after
3731 * the dnode_rele() below.
3732 */
3733 DB_DNODE_EXIT(db);
3734
3735 /*
3736 * Do not reference db after its lock is dropped.
3737 * Another thread may evict it.
3738 */
3739 mutex_exit(&db->db_mtx);
3740
3741 if (evict_dbuf)
3742 dnode_evict_bonus(dn);
3743
3744 dnode_rele(dn, db);
3745 } else if (db->db_buf == NULL) {
3746 /*
3747 * This is a special case: we never associated this
3748 * dbuf with any data allocated from the ARC.
3749 */
3750 ASSERT(db->db_state == DB_UNCACHED ||
3751 db->db_state == DB_NOFILL);
3752 dbuf_destroy(db);
3753 } else if (arc_released(db->db_buf)) {
3754 /*
3755 * This dbuf has anonymous data associated with it.
3756 */
3757 dbuf_destroy(db);
3758 } else {
3759 boolean_t do_arc_evict = B_FALSE;
3760 blkptr_t bp;
3761 spa_t *spa = dmu_objset_spa(db->db_objset);
3762
3763 if (!DBUF_IS_CACHEABLE(db) &&
3764 db->db_blkptr != NULL &&
3765 !BP_IS_HOLE(db->db_blkptr) &&
3766 !BP_IS_EMBEDDED(db->db_blkptr)) {
3767 do_arc_evict = B_TRUE;
3768 bp = *db->db_blkptr;
3769 }
3770
3771 if (!DBUF_IS_CACHEABLE(db) ||
3772 db->db_pending_evict) {
3773 dbuf_destroy(db);
3774 } else if (!multilist_link_active(&db->db_cache_link)) {
3775 ASSERT3U(db->db_caching_status, ==,
3776 DB_NO_CACHE);
3777
3778 dbuf_cached_state_t dcs =
3779 dbuf_include_in_metadata_cache(db) ?
3780 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3781 db->db_caching_status = dcs;
3782
3783 multilist_insert(&dbuf_caches[dcs].cache, db);
3784 uint64_t db_size = db->db.db_size;
3785 size = zfs_refcount_add_many(
3786 &dbuf_caches[dcs].size, db_size, db);
3787 uint8_t db_level = db->db_level;
3788 mutex_exit(&db->db_mtx);
3789
3790 if (dcs == DB_DBUF_METADATA_CACHE) {
3791 DBUF_STAT_BUMP(metadata_cache_count);
3792 DBUF_STAT_MAX(
3793 metadata_cache_size_bytes_max,
3794 size);
3795 } else {
3796 DBUF_STAT_BUMP(cache_count);
3797 DBUF_STAT_MAX(cache_size_bytes_max,
3798 size);
3799 DBUF_STAT_BUMP(cache_levels[db_level]);
3800 DBUF_STAT_INCR(
3801 cache_levels_bytes[db_level],
3802 db_size);
3803 }
3804
3805 if (dcs == DB_DBUF_CACHE && !evicting)
3806 dbuf_evict_notify(size);
3807 }
3808
3809 if (do_arc_evict)
3810 arc_freed(spa, &bp);
3811 }
3812 } else {
3813 mutex_exit(&db->db_mtx);
3814 }
3815
3816 }
3817
3818 #pragma weak dmu_buf_refcount = dbuf_refcount
3819 uint64_t
3820 dbuf_refcount(dmu_buf_impl_t *db)
3821 {
3822 return (zfs_refcount_count(&db->db_holds));
3823 }
3824
3825 uint64_t
3826 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3827 {
3828 uint64_t holds;
3829 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3830
3831 mutex_enter(&db->db_mtx);
3832 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3833 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3834 mutex_exit(&db->db_mtx);
3835
3836 return (holds);
3837 }
3838
3839 void *
3840 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3841 dmu_buf_user_t *new_user)
3842 {
3843 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3844
3845 mutex_enter(&db->db_mtx);
3846 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3847 if (db->db_user == old_user)
3848 db->db_user = new_user;
3849 else
3850 old_user = db->db_user;
3851 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3852 mutex_exit(&db->db_mtx);
3853
3854 return (old_user);
3855 }
3856
3857 void *
3858 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3859 {
3860 return (dmu_buf_replace_user(db_fake, NULL, user));
3861 }
3862
3863 void *
3864 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3865 {
3866 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3867
3868 db->db_user_immediate_evict = TRUE;
3869 return (dmu_buf_set_user(db_fake, user));
3870 }
3871
3872 void *
3873 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3874 {
3875 return (dmu_buf_replace_user(db_fake, user, NULL));
3876 }
3877
3878 void *
3879 dmu_buf_get_user(dmu_buf_t *db_fake)
3880 {
3881 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3882
3883 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3884 return (db->db_user);
3885 }
3886
3887 void
3888 dmu_buf_user_evict_wait()
3889 {
3890 taskq_wait(dbu_evict_taskq);
3891 }
3892
3893 blkptr_t *
3894 dmu_buf_get_blkptr(dmu_buf_t *db)
3895 {
3896 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3897 return (dbi->db_blkptr);
3898 }
3899
3900 objset_t *
3901 dmu_buf_get_objset(dmu_buf_t *db)
3902 {
3903 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3904 return (dbi->db_objset);
3905 }
3906
3907 dnode_t *
3908 dmu_buf_dnode_enter(dmu_buf_t *db)
3909 {
3910 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3911 DB_DNODE_ENTER(dbi);
3912 return (DB_DNODE(dbi));
3913 }
3914
3915 void
3916 dmu_buf_dnode_exit(dmu_buf_t *db)
3917 {
3918 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
3919 DB_DNODE_EXIT(dbi);
3920 }
3921
3922 static void
3923 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
3924 {
3925 /* ASSERT(dmu_tx_is_syncing(tx) */
3926 ASSERT(MUTEX_HELD(&db->db_mtx));
3927
3928 if (db->db_blkptr != NULL)
3929 return;
3930
3931 if (db->db_blkid == DMU_SPILL_BLKID) {
3932 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
3933 BP_ZERO(db->db_blkptr);
3934 return;
3935 }
3936 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
3937 /*
3938 * This buffer was allocated at a time when there was
3939 * no available blkptrs from the dnode, or it was
3940 * inappropriate to hook it in (i.e., nlevels mismatch).
3941 */
3942 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
3943 ASSERT(db->db_parent == NULL);
3944 db->db_parent = dn->dn_dbuf;
3945 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
3946 DBUF_VERIFY(db);
3947 } else {
3948 dmu_buf_impl_t *parent = db->db_parent;
3949 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3950
3951 ASSERT(dn->dn_phys->dn_nlevels > 1);
3952 if (parent == NULL) {
3953 mutex_exit(&db->db_mtx);
3954 rw_enter(&dn->dn_struct_rwlock, RW_READER);
3955 parent = dbuf_hold_level(dn, db->db_level + 1,
3956 db->db_blkid >> epbs, db);
3957 rw_exit(&dn->dn_struct_rwlock);
3958 mutex_enter(&db->db_mtx);
3959 db->db_parent = parent;
3960 }
3961 db->db_blkptr = (blkptr_t *)parent->db.db_data +
3962 (db->db_blkid & ((1ULL << epbs) - 1));
3963 DBUF_VERIFY(db);
3964 }
3965 }
3966
3967 static void
3968 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
3969 {
3970 dmu_buf_impl_t *db = dr->dr_dbuf;
3971 void *data = dr->dt.dl.dr_data;
3972
3973 ASSERT0(db->db_level);
3974 ASSERT(MUTEX_HELD(&db->db_mtx));
3975 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
3976 ASSERT(data != NULL);
3977
3978 dnode_t *dn = dr->dr_dnode;
3979 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
3980 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
3981 bcopy(data, DN_BONUS(dn->dn_phys), DN_MAX_BONUS_LEN(dn->dn_phys));
3982
3983 dbuf_sync_leaf_verify_bonus_dnode(dr);
3984
3985 dbuf_undirty_bonus(dr);
3986 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
3987 }
3988
3989 /*
3990 * When syncing out a blocks of dnodes, adjust the block to deal with
3991 * encryption. Normally, we make sure the block is decrypted before writing
3992 * it. If we have crypt params, then we are writing a raw (encrypted) block,
3993 * from a raw receive. In this case, set the ARC buf's crypt params so
3994 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
3995 */
3996 static void
3997 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
3998 {
3999 int err;
4000 dmu_buf_impl_t *db = dr->dr_dbuf;
4001
4002 ASSERT(MUTEX_HELD(&db->db_mtx));
4003 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4004 ASSERT3U(db->db_level, ==, 0);
4005
4006 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4007 zbookmark_phys_t zb;
4008
4009 /*
4010 * Unfortunately, there is currently no mechanism for
4011 * syncing context to handle decryption errors. An error
4012 * here is only possible if an attacker maliciously
4013 * changed a dnode block and updated the associated
4014 * checksums going up the block tree.
4015 */
4016 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4017 db->db.db_object, db->db_level, db->db_blkid);
4018 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4019 &zb, B_TRUE);
4020 if (err)
4021 panic("Invalid dnode block MAC");
4022 } else if (dr->dt.dl.dr_has_raw_params) {
4023 (void) arc_release(dr->dt.dl.dr_data, db);
4024 arc_convert_to_raw(dr->dt.dl.dr_data,
4025 dmu_objset_id(db->db_objset),
4026 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4027 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4028 }
4029 }
4030
4031 /*
4032 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4033 * is critical the we not allow the compiler to inline this function in to
4034 * dbuf_sync_list() thereby drastically bloating the stack usage.
4035 */
4036 noinline static void
4037 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4038 {
4039 dmu_buf_impl_t *db = dr->dr_dbuf;
4040 dnode_t *dn = dr->dr_dnode;
4041
4042 ASSERT(dmu_tx_is_syncing(tx));
4043
4044 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4045
4046 mutex_enter(&db->db_mtx);
4047
4048 ASSERT(db->db_level > 0);
4049 DBUF_VERIFY(db);
4050
4051 /* Read the block if it hasn't been read yet. */
4052 if (db->db_buf == NULL) {
4053 mutex_exit(&db->db_mtx);
4054 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4055 mutex_enter(&db->db_mtx);
4056 }
4057 ASSERT3U(db->db_state, ==, DB_CACHED);
4058 ASSERT(db->db_buf != NULL);
4059
4060 /* Indirect block size must match what the dnode thinks it is. */
4061 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4062 dbuf_check_blkptr(dn, db);
4063
4064 /* Provide the pending dirty record to child dbufs */
4065 db->db_data_pending = dr;
4066
4067 mutex_exit(&db->db_mtx);
4068
4069 dbuf_write(dr, db->db_buf, tx);
4070
4071 zio_t *zio = dr->dr_zio;
4072 mutex_enter(&dr->dt.di.dr_mtx);
4073 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4074 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4075 mutex_exit(&dr->dt.di.dr_mtx);
4076 zio_nowait(zio);
4077 }
4078
4079 /*
4080 * Verify that the size of the data in our bonus buffer does not exceed
4081 * its recorded size.
4082 *
4083 * The purpose of this verification is to catch any cases in development
4084 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4085 * due to incorrect feature management, older pools expect to read more
4086 * data even though they didn't actually write it to begin with.
4087 *
4088 * For a example, this would catch an error in the feature logic where we
4089 * open an older pool and we expect to write the space map histogram of
4090 * a space map with size SPACE_MAP_SIZE_V0.
4091 */
4092 static void
4093 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4094 {
4095 #ifdef ZFS_DEBUG
4096 dnode_t *dn = dr->dr_dnode;
4097
4098 /*
4099 * Encrypted bonus buffers can have data past their bonuslen.
4100 * Skip the verification of these blocks.
4101 */
4102 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4103 return;
4104
4105 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4106 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4107 ASSERT3U(bonuslen, <=, maxbonuslen);
4108
4109 arc_buf_t *datap = dr->dt.dl.dr_data;
4110 char *datap_end = ((char *)datap) + bonuslen;
4111 char *datap_max = ((char *)datap) + maxbonuslen;
4112
4113 /* ensure that everything is zero after our data */
4114 for (; datap_end < datap_max; datap_end++)
4115 ASSERT(*datap_end == 0);
4116 #endif
4117 }
4118
4119 static blkptr_t *
4120 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4121 {
4122 /* This must be a lightweight dirty record. */
4123 ASSERT3P(dr->dr_dbuf, ==, NULL);
4124 dnode_t *dn = dr->dr_dnode;
4125
4126 if (dn->dn_phys->dn_nlevels == 1) {
4127 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4128 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4129 } else {
4130 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4131 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4132 VERIFY3U(parent_db->db_level, ==, 1);
4133 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4134 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4135 blkptr_t *bp = parent_db->db.db_data;
4136 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4137 }
4138 }
4139
4140 static void
4141 dbuf_lightweight_ready(zio_t *zio)
4142 {
4143 dbuf_dirty_record_t *dr = zio->io_private;
4144 blkptr_t *bp = zio->io_bp;
4145
4146 if (zio->io_error != 0)
4147 return;
4148
4149 dnode_t *dn = dr->dr_dnode;
4150
4151 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4152 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4153 int64_t delta = bp_get_dsize_sync(spa, bp) -
4154 bp_get_dsize_sync(spa, bp_orig);
4155 dnode_diduse_space(dn, delta);
4156
4157 uint64_t blkid = dr->dt.dll.dr_blkid;
4158 mutex_enter(&dn->dn_mtx);
4159 if (blkid > dn->dn_phys->dn_maxblkid) {
4160 ASSERT0(dn->dn_objset->os_raw_receive);
4161 dn->dn_phys->dn_maxblkid = blkid;
4162 }
4163 mutex_exit(&dn->dn_mtx);
4164
4165 if (!BP_IS_EMBEDDED(bp)) {
4166 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4167 BP_SET_FILL(bp, fill);
4168 }
4169
4170 dmu_buf_impl_t *parent_db;
4171 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4172 if (dr->dr_parent == NULL) {
4173 parent_db = dn->dn_dbuf;
4174 } else {
4175 parent_db = dr->dr_parent->dr_dbuf;
4176 }
4177 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4178 *bp_orig = *bp;
4179 rw_exit(&parent_db->db_rwlock);
4180 }
4181
4182 static void
4183 dbuf_lightweight_physdone(zio_t *zio)
4184 {
4185 dbuf_dirty_record_t *dr = zio->io_private;
4186 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4187 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4188
4189 /*
4190 * The callback will be called io_phys_children times. Retire one
4191 * portion of our dirty space each time we are called. Any rounding
4192 * error will be cleaned up by dbuf_lightweight_done().
4193 */
4194 int delta = dr->dr_accounted / zio->io_phys_children;
4195 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4196 }
4197
4198 static void
4199 dbuf_lightweight_done(zio_t *zio)
4200 {
4201 dbuf_dirty_record_t *dr = zio->io_private;
4202
4203 VERIFY0(zio->io_error);
4204
4205 objset_t *os = dr->dr_dnode->dn_objset;
4206 dmu_tx_t *tx = os->os_synctx;
4207
4208 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4209 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4210 } else {
4211 dsl_dataset_t *ds = os->os_dsl_dataset;
4212 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4213 dsl_dataset_block_born(ds, zio->io_bp, tx);
4214 }
4215
4216 /*
4217 * See comment in dbuf_write_done().
4218 */
4219 if (zio->io_phys_children == 0) {
4220 dsl_pool_undirty_space(dmu_objset_pool(os),
4221 dr->dr_accounted, zio->io_txg);
4222 } else {
4223 dsl_pool_undirty_space(dmu_objset_pool(os),
4224 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4225 }
4226
4227 abd_free(dr->dt.dll.dr_abd);
4228 kmem_free(dr, sizeof (*dr));
4229 }
4230
4231 noinline static void
4232 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4233 {
4234 dnode_t *dn = dr->dr_dnode;
4235 zio_t *pio;
4236 if (dn->dn_phys->dn_nlevels == 1) {
4237 pio = dn->dn_zio;
4238 } else {
4239 pio = dr->dr_parent->dr_zio;
4240 }
4241
4242 zbookmark_phys_t zb = {
4243 .zb_objset = dmu_objset_id(dn->dn_objset),
4244 .zb_object = dn->dn_object,
4245 .zb_level = 0,
4246 .zb_blkid = dr->dt.dll.dr_blkid,
4247 };
4248
4249 /*
4250 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4251 * will have the old BP in dbuf_lightweight_done().
4252 */
4253 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4254
4255 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4256 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4257 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4258 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4259 dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4260 ZIO_PRIORITY_ASYNC_WRITE,
4261 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4262
4263 zio_nowait(dr->dr_zio);
4264 }
4265
4266 /*
4267 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4268 * critical the we not allow the compiler to inline this function in to
4269 * dbuf_sync_list() thereby drastically bloating the stack usage.
4270 */
4271 noinline static void
4272 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4273 {
4274 arc_buf_t **datap = &dr->dt.dl.dr_data;
4275 dmu_buf_impl_t *db = dr->dr_dbuf;
4276 dnode_t *dn = dr->dr_dnode;
4277 objset_t *os;
4278 uint64_t txg = tx->tx_txg;
4279
4280 ASSERT(dmu_tx_is_syncing(tx));
4281
4282 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4283
4284 mutex_enter(&db->db_mtx);
4285 /*
4286 * To be synced, we must be dirtied. But we
4287 * might have been freed after the dirty.
4288 */
4289 if (db->db_state == DB_UNCACHED) {
4290 /* This buffer has been freed since it was dirtied */
4291 ASSERT(db->db.db_data == NULL);
4292 } else if (db->db_state == DB_FILL) {
4293 /* This buffer was freed and is now being re-filled */
4294 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4295 } else {
4296 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4297 }
4298 DBUF_VERIFY(db);
4299
4300 if (db->db_blkid == DMU_SPILL_BLKID) {
4301 mutex_enter(&dn->dn_mtx);
4302 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4303 /*
4304 * In the previous transaction group, the bonus buffer
4305 * was entirely used to store the attributes for the
4306 * dnode which overrode the dn_spill field. However,
4307 * when adding more attributes to the file a spill
4308 * block was required to hold the extra attributes.
4309 *
4310 * Make sure to clear the garbage left in the dn_spill
4311 * field from the previous attributes in the bonus
4312 * buffer. Otherwise, after writing out the spill
4313 * block to the new allocated dva, it will free
4314 * the old block pointed to by the invalid dn_spill.
4315 */
4316 db->db_blkptr = NULL;
4317 }
4318 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4319 mutex_exit(&dn->dn_mtx);
4320 }
4321
4322 /*
4323 * If this is a bonus buffer, simply copy the bonus data into the
4324 * dnode. It will be written out when the dnode is synced (and it
4325 * will be synced, since it must have been dirty for dbuf_sync to
4326 * be called).
4327 */
4328 if (db->db_blkid == DMU_BONUS_BLKID) {
4329 ASSERT(dr->dr_dbuf == db);
4330 dbuf_sync_bonus(dr, tx);
4331 return;
4332 }
4333
4334 os = dn->dn_objset;
4335
4336 /*
4337 * This function may have dropped the db_mtx lock allowing a dmu_sync
4338 * operation to sneak in. As a result, we need to ensure that we
4339 * don't check the dr_override_state until we have returned from
4340 * dbuf_check_blkptr.
4341 */
4342 dbuf_check_blkptr(dn, db);
4343
4344 /*
4345 * If this buffer is in the middle of an immediate write,
4346 * wait for the synchronous IO to complete.
4347 */
4348 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4349 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4350 cv_wait(&db->db_changed, &db->db_mtx);
4351 ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
4352 }
4353
4354 /*
4355 * If this is a dnode block, ensure it is appropriately encrypted
4356 * or decrypted, depending on what we are writing to it this txg.
4357 */
4358 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4359 dbuf_prepare_encrypted_dnode_leaf(dr);
4360
4361 if (db->db_state != DB_NOFILL &&
4362 dn->dn_object != DMU_META_DNODE_OBJECT &&
4363 zfs_refcount_count(&db->db_holds) > 1 &&
4364 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4365 *datap == db->db_buf) {
4366 /*
4367 * If this buffer is currently "in use" (i.e., there
4368 * are active holds and db_data still references it),
4369 * then make a copy before we start the write so that
4370 * any modifications from the open txg will not leak
4371 * into this write.
4372 *
4373 * NOTE: this copy does not need to be made for
4374 * objects only modified in the syncing context (e.g.
4375 * DNONE_DNODE blocks).
4376 */
4377 int psize = arc_buf_size(*datap);
4378 int lsize = arc_buf_lsize(*datap);
4379 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4380 enum zio_compress compress_type = arc_get_compression(*datap);
4381 uint8_t complevel = arc_get_complevel(*datap);
4382
4383 if (arc_is_encrypted(*datap)) {
4384 boolean_t byteorder;
4385 uint8_t salt[ZIO_DATA_SALT_LEN];
4386 uint8_t iv[ZIO_DATA_IV_LEN];
4387 uint8_t mac[ZIO_DATA_MAC_LEN];
4388
4389 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4390 *datap = arc_alloc_raw_buf(os->os_spa, db,
4391 dmu_objset_id(os), byteorder, salt, iv, mac,
4392 dn->dn_type, psize, lsize, compress_type,
4393 complevel);
4394 } else if (compress_type != ZIO_COMPRESS_OFF) {
4395 ASSERT3U(type, ==, ARC_BUFC_DATA);
4396 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4397 psize, lsize, compress_type, complevel);
4398 } else {
4399 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4400 }
4401 bcopy(db->db.db_data, (*datap)->b_data, psize);
4402 }
4403 db->db_data_pending = dr;
4404
4405 mutex_exit(&db->db_mtx);
4406
4407 dbuf_write(dr, *datap, tx);
4408
4409 ASSERT(!list_link_active(&dr->dr_dirty_node));
4410 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4411 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4412 } else {
4413 zio_nowait(dr->dr_zio);
4414 }
4415 }
4416
4417 void
4418 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4419 {
4420 dbuf_dirty_record_t *dr;
4421
4422 while ((dr = list_head(list))) {
4423 if (dr->dr_zio != NULL) {
4424 /*
4425 * If we find an already initialized zio then we
4426 * are processing the meta-dnode, and we have finished.
4427 * The dbufs for all dnodes are put back on the list
4428 * during processing, so that we can zio_wait()
4429 * these IOs after initiating all child IOs.
4430 */
4431 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4432 DMU_META_DNODE_OBJECT);
4433 break;
4434 }
4435 list_remove(list, dr);
4436 if (dr->dr_dbuf == NULL) {
4437 dbuf_sync_lightweight(dr, tx);
4438 } else {
4439 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4440 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4441 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4442 }
4443 if (dr->dr_dbuf->db_level > 0)
4444 dbuf_sync_indirect(dr, tx);
4445 else
4446 dbuf_sync_leaf(dr, tx);
4447 }
4448 }
4449 }
4450
4451 /* ARGSUSED */
4452 static void
4453 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4454 {
4455 dmu_buf_impl_t *db = vdb;
4456 dnode_t *dn;
4457 blkptr_t *bp = zio->io_bp;
4458 blkptr_t *bp_orig = &zio->io_bp_orig;
4459 spa_t *spa = zio->io_spa;
4460 int64_t delta;
4461 uint64_t fill = 0;
4462 int i;
4463
4464 ASSERT3P(db->db_blkptr, !=, NULL);
4465 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4466
4467 DB_DNODE_ENTER(db);
4468 dn = DB_DNODE(db);
4469 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4470 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4471 zio->io_prev_space_delta = delta;
4472
4473 if (bp->blk_birth != 0) {
4474 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4475 BP_GET_TYPE(bp) == dn->dn_type) ||
4476 (db->db_blkid == DMU_SPILL_BLKID &&
4477 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4478 BP_IS_EMBEDDED(bp));
4479 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4480 }
4481
4482 mutex_enter(&db->db_mtx);
4483
4484 #ifdef ZFS_DEBUG
4485 if (db->db_blkid == DMU_SPILL_BLKID) {
4486 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4487 ASSERT(!(BP_IS_HOLE(bp)) &&
4488 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4489 }
4490 #endif
4491
4492 if (db->db_level == 0) {
4493 mutex_enter(&dn->dn_mtx);
4494 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4495 db->db_blkid != DMU_SPILL_BLKID) {
4496 ASSERT0(db->db_objset->os_raw_receive);
4497 dn->dn_phys->dn_maxblkid = db->db_blkid;
4498 }
4499 mutex_exit(&dn->dn_mtx);
4500
4501 if (dn->dn_type == DMU_OT_DNODE) {
4502 i = 0;
4503 while (i < db->db.db_size) {
4504 dnode_phys_t *dnp =
4505 (void *)(((char *)db->db.db_data) + i);
4506
4507 i += DNODE_MIN_SIZE;
4508 if (dnp->dn_type != DMU_OT_NONE) {
4509 fill++;
4510 i += dnp->dn_extra_slots *
4511 DNODE_MIN_SIZE;
4512 }
4513 }
4514 } else {
4515 if (BP_IS_HOLE(bp)) {
4516 fill = 0;
4517 } else {
4518 fill = 1;
4519 }
4520 }
4521 } else {
4522 blkptr_t *ibp = db->db.db_data;
4523 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4524 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4525 if (BP_IS_HOLE(ibp))
4526 continue;
4527 fill += BP_GET_FILL(ibp);
4528 }
4529 }
4530 DB_DNODE_EXIT(db);
4531
4532 if (!BP_IS_EMBEDDED(bp))
4533 BP_SET_FILL(bp, fill);
4534
4535 mutex_exit(&db->db_mtx);
4536
4537 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4538 *db->db_blkptr = *bp;
4539 dmu_buf_unlock_parent(db, dblt, FTAG);
4540 }
4541
4542 /* ARGSUSED */
4543 /*
4544 * This function gets called just prior to running through the compression
4545 * stage of the zio pipeline. If we're an indirect block comprised of only
4546 * holes, then we want this indirect to be compressed away to a hole. In
4547 * order to do that we must zero out any information about the holes that
4548 * this indirect points to prior to before we try to compress it.
4549 */
4550 static void
4551 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4552 {
4553 dmu_buf_impl_t *db = vdb;
4554 dnode_t *dn;
4555 blkptr_t *bp;
4556 unsigned int epbs, i;
4557
4558 ASSERT3U(db->db_level, >, 0);
4559 DB_DNODE_ENTER(db);
4560 dn = DB_DNODE(db);
4561 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4562 ASSERT3U(epbs, <, 31);
4563
4564 /* Determine if all our children are holes */
4565 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4566 if (!BP_IS_HOLE(bp))
4567 break;
4568 }
4569
4570 /*
4571 * If all the children are holes, then zero them all out so that
4572 * we may get compressed away.
4573 */
4574 if (i == 1ULL << epbs) {
4575 /*
4576 * We only found holes. Grab the rwlock to prevent
4577 * anybody from reading the blocks we're about to
4578 * zero out.
4579 */
4580 rw_enter(&db->db_rwlock, RW_WRITER);
4581 bzero(db->db.db_data, db->db.db_size);
4582 rw_exit(&db->db_rwlock);
4583 }
4584 DB_DNODE_EXIT(db);
4585 }
4586
4587 /*
4588 * The SPA will call this callback several times for each zio - once
4589 * for every physical child i/o (zio->io_phys_children times). This
4590 * allows the DMU to monitor the progress of each logical i/o. For example,
4591 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4592 * block. There may be a long delay before all copies/fragments are completed,
4593 * so this callback allows us to retire dirty space gradually, as the physical
4594 * i/os complete.
4595 */
4596 /* ARGSUSED */
4597 static void
4598 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4599 {
4600 dmu_buf_impl_t *db = arg;
4601 objset_t *os = db->db_objset;
4602 dsl_pool_t *dp = dmu_objset_pool(os);
4603 dbuf_dirty_record_t *dr;
4604 int delta = 0;
4605
4606 dr = db->db_data_pending;
4607 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4608
4609 /*
4610 * The callback will be called io_phys_children times. Retire one
4611 * portion of our dirty space each time we are called. Any rounding
4612 * error will be cleaned up by dbuf_write_done().
4613 */
4614 delta = dr->dr_accounted / zio->io_phys_children;
4615 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4616 }
4617
4618 /* ARGSUSED */
4619 static void
4620 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4621 {
4622 dmu_buf_impl_t *db = vdb;
4623 blkptr_t *bp_orig = &zio->io_bp_orig;
4624 blkptr_t *bp = db->db_blkptr;
4625 objset_t *os = db->db_objset;
4626 dmu_tx_t *tx = os->os_synctx;
4627
4628 ASSERT0(zio->io_error);
4629 ASSERT(db->db_blkptr == bp);
4630
4631 /*
4632 * For nopwrites and rewrites we ensure that the bp matches our
4633 * original and bypass all the accounting.
4634 */
4635 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4636 ASSERT(BP_EQUAL(bp, bp_orig));
4637 } else {
4638 dsl_dataset_t *ds = os->os_dsl_dataset;
4639 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4640 dsl_dataset_block_born(ds, bp, tx);
4641 }
4642
4643 mutex_enter(&db->db_mtx);
4644
4645 DBUF_VERIFY(db);
4646
4647 dbuf_dirty_record_t *dr = db->db_data_pending;
4648 dnode_t *dn = dr->dr_dnode;
4649 ASSERT(!list_link_active(&dr->dr_dirty_node));
4650 ASSERT(dr->dr_dbuf == db);
4651 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4652 list_remove(&db->db_dirty_records, dr);
4653
4654 #ifdef ZFS_DEBUG
4655 if (db->db_blkid == DMU_SPILL_BLKID) {
4656 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4657 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4658 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4659 }
4660 #endif
4661
4662 if (db->db_level == 0) {
4663 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4664 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4665 if (db->db_state != DB_NOFILL) {
4666 if (dr->dt.dl.dr_data != db->db_buf)
4667 arc_buf_destroy(dr->dt.dl.dr_data, db);
4668 }
4669 } else {
4670 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4671 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4672 if (!BP_IS_HOLE(db->db_blkptr)) {
4673 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4674 SPA_BLKPTRSHIFT;
4675 ASSERT3U(db->db_blkid, <=,
4676 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4677 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4678 db->db.db_size);
4679 }
4680 mutex_destroy(&dr->dt.di.dr_mtx);
4681 list_destroy(&dr->dt.di.dr_children);
4682 }
4683
4684 cv_broadcast(&db->db_changed);
4685 ASSERT(db->db_dirtycnt > 0);
4686 db->db_dirtycnt -= 1;
4687 db->db_data_pending = NULL;
4688 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4689
4690 /*
4691 * If we didn't do a physical write in this ZIO and we
4692 * still ended up here, it means that the space of the
4693 * dbuf that we just released (and undirtied) above hasn't
4694 * been marked as undirtied in the pool's accounting.
4695 *
4696 * Thus, we undirty that space in the pool's view of the
4697 * world here. For physical writes this type of update
4698 * happens in dbuf_write_physdone().
4699 *
4700 * If we did a physical write, cleanup any rounding errors
4701 * that came up due to writing multiple copies of a block
4702 * on disk [see dbuf_write_physdone()].
4703 */
4704 if (zio->io_phys_children == 0) {
4705 dsl_pool_undirty_space(dmu_objset_pool(os),
4706 dr->dr_accounted, zio->io_txg);
4707 } else {
4708 dsl_pool_undirty_space(dmu_objset_pool(os),
4709 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4710 }
4711
4712 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4713 }
4714
4715 static void
4716 dbuf_write_nofill_ready(zio_t *zio)
4717 {
4718 dbuf_write_ready(zio, NULL, zio->io_private);
4719 }
4720
4721 static void
4722 dbuf_write_nofill_done(zio_t *zio)
4723 {
4724 dbuf_write_done(zio, NULL, zio->io_private);
4725 }
4726
4727 static void
4728 dbuf_write_override_ready(zio_t *zio)
4729 {
4730 dbuf_dirty_record_t *dr = zio->io_private;
4731 dmu_buf_impl_t *db = dr->dr_dbuf;
4732
4733 dbuf_write_ready(zio, NULL, db);
4734 }
4735
4736 static void
4737 dbuf_write_override_done(zio_t *zio)
4738 {
4739 dbuf_dirty_record_t *dr = zio->io_private;
4740 dmu_buf_impl_t *db = dr->dr_dbuf;
4741 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4742
4743 mutex_enter(&db->db_mtx);
4744 if (!BP_EQUAL(zio->io_bp, obp)) {
4745 if (!BP_IS_HOLE(obp))
4746 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4747 arc_release(dr->dt.dl.dr_data, db);
4748 }
4749 mutex_exit(&db->db_mtx);
4750
4751 dbuf_write_done(zio, NULL, db);
4752
4753 if (zio->io_abd != NULL)
4754 abd_free(zio->io_abd);
4755 }
4756
4757 typedef struct dbuf_remap_impl_callback_arg {
4758 objset_t *drica_os;
4759 uint64_t drica_blk_birth;
4760 dmu_tx_t *drica_tx;
4761 } dbuf_remap_impl_callback_arg_t;
4762
4763 static void
4764 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4765 void *arg)
4766 {
4767 dbuf_remap_impl_callback_arg_t *drica = arg;
4768 objset_t *os = drica->drica_os;
4769 spa_t *spa = dmu_objset_spa(os);
4770 dmu_tx_t *tx = drica->drica_tx;
4771
4772 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4773
4774 if (os == spa_meta_objset(spa)) {
4775 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4776 } else {
4777 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4778 size, drica->drica_blk_birth, tx);
4779 }
4780 }
4781
4782 static void
4783 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4784 {
4785 blkptr_t bp_copy = *bp;
4786 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4787 dbuf_remap_impl_callback_arg_t drica;
4788
4789 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4790
4791 drica.drica_os = dn->dn_objset;
4792 drica.drica_blk_birth = bp->blk_birth;
4793 drica.drica_tx = tx;
4794 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4795 &drica)) {
4796 /*
4797 * If the blkptr being remapped is tracked by a livelist,
4798 * then we need to make sure the livelist reflects the update.
4799 * First, cancel out the old blkptr by appending a 'FREE'
4800 * entry. Next, add an 'ALLOC' to track the new version. This
4801 * way we avoid trying to free an inaccurate blkptr at delete.
4802 * Note that embedded blkptrs are not tracked in livelists.
4803 */
4804 if (dn->dn_objset != spa_meta_objset(spa)) {
4805 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4806 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4807 bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4808 ASSERT(!BP_IS_EMBEDDED(bp));
4809 ASSERT(dsl_dir_is_clone(ds->ds_dir));
4810 ASSERT(spa_feature_is_enabled(spa,
4811 SPA_FEATURE_LIVELIST));
4812 bplist_append(&ds->ds_dir->dd_pending_frees,
4813 bp);
4814 bplist_append(&ds->ds_dir->dd_pending_allocs,
4815 &bp_copy);
4816 }
4817 }
4818
4819 /*
4820 * The db_rwlock prevents dbuf_read_impl() from
4821 * dereferencing the BP while we are changing it. To
4822 * avoid lock contention, only grab it when we are actually
4823 * changing the BP.
4824 */
4825 if (rw != NULL)
4826 rw_enter(rw, RW_WRITER);
4827 *bp = bp_copy;
4828 if (rw != NULL)
4829 rw_exit(rw);
4830 }
4831 }
4832
4833 /*
4834 * Remap any existing BP's to concrete vdevs, if possible.
4835 */
4836 static void
4837 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4838 {
4839 spa_t *spa = dmu_objset_spa(db->db_objset);
4840 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4841
4842 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4843 return;
4844
4845 if (db->db_level > 0) {
4846 blkptr_t *bp = db->db.db_data;
4847 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4848 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4849 }
4850 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4851 dnode_phys_t *dnp = db->db.db_data;
4852 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4853 DMU_OT_DNODE);
4854 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4855 i += dnp[i].dn_extra_slots + 1) {
4856 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4857 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4858 &dn->dn_dbuf->db_rwlock);
4859 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4860 tx);
4861 }
4862 }
4863 }
4864 }
4865
4866
4867 /* Issue I/O to commit a dirty buffer to disk. */
4868 static void
4869 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
4870 {
4871 dmu_buf_impl_t *db = dr->dr_dbuf;
4872 dnode_t *dn = dr->dr_dnode;
4873 objset_t *os;
4874 dmu_buf_impl_t *parent = db->db_parent;
4875 uint64_t txg = tx->tx_txg;
4876 zbookmark_phys_t zb;
4877 zio_prop_t zp;
4878 zio_t *pio; /* parent I/O */
4879 int wp_flag = 0;
4880
4881 ASSERT(dmu_tx_is_syncing(tx));
4882
4883 os = dn->dn_objset;
4884
4885 if (db->db_state != DB_NOFILL) {
4886 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
4887 /*
4888 * Private object buffers are released here rather
4889 * than in dbuf_dirty() since they are only modified
4890 * in the syncing context and we don't want the
4891 * overhead of making multiple copies of the data.
4892 */
4893 if (BP_IS_HOLE(db->db_blkptr)) {
4894 arc_buf_thaw(data);
4895 } else {
4896 dbuf_release_bp(db);
4897 }
4898 dbuf_remap(dn, db, tx);
4899 }
4900 }
4901
4902 if (parent != dn->dn_dbuf) {
4903 /* Our parent is an indirect block. */
4904 /* We have a dirty parent that has been scheduled for write. */
4905 ASSERT(parent && parent->db_data_pending);
4906 /* Our parent's buffer is one level closer to the dnode. */
4907 ASSERT(db->db_level == parent->db_level-1);
4908 /*
4909 * We're about to modify our parent's db_data by modifying
4910 * our block pointer, so the parent must be released.
4911 */
4912 ASSERT(arc_released(parent->db_buf));
4913 pio = parent->db_data_pending->dr_zio;
4914 } else {
4915 /* Our parent is the dnode itself. */
4916 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
4917 db->db_blkid != DMU_SPILL_BLKID) ||
4918 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
4919 if (db->db_blkid != DMU_SPILL_BLKID)
4920 ASSERT3P(db->db_blkptr, ==,
4921 &dn->dn_phys->dn_blkptr[db->db_blkid]);
4922 pio = dn->dn_zio;
4923 }
4924
4925 ASSERT(db->db_level == 0 || data == db->db_buf);
4926 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
4927 ASSERT(pio);
4928
4929 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
4930 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
4931 db->db.db_object, db->db_level, db->db_blkid);
4932
4933 if (db->db_blkid == DMU_SPILL_BLKID)
4934 wp_flag = WP_SPILL;
4935 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
4936
4937 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
4938
4939 /*
4940 * We copy the blkptr now (rather than when we instantiate the dirty
4941 * record), because its value can change between open context and
4942 * syncing context. We do not need to hold dn_struct_rwlock to read
4943 * db_blkptr because we are in syncing context.
4944 */
4945 dr->dr_bp_copy = *db->db_blkptr;
4946
4947 if (db->db_level == 0 &&
4948 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
4949 /*
4950 * The BP for this block has been provided by open context
4951 * (by dmu_sync() or dmu_buf_write_embedded()).
4952 */
4953 abd_t *contents = (data != NULL) ?
4954 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
4955
4956 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
4957 contents, db->db.db_size, db->db.db_size, &zp,
4958 dbuf_write_override_ready, NULL, NULL,
4959 dbuf_write_override_done,
4960 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
4961 mutex_enter(&db->db_mtx);
4962 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
4963 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
4964 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
4965 mutex_exit(&db->db_mtx);
4966 } else if (db->db_state == DB_NOFILL) {
4967 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
4968 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
4969 dr->dr_zio = zio_write(pio, os->os_spa, txg,
4970 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
4971 dbuf_write_nofill_ready, NULL, NULL,
4972 dbuf_write_nofill_done, db,
4973 ZIO_PRIORITY_ASYNC_WRITE,
4974 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
4975 } else {
4976 ASSERT(arc_released(data));
4977
4978 /*
4979 * For indirect blocks, we want to setup the children
4980 * ready callback so that we can properly handle an indirect
4981 * block that only contains holes.
4982 */
4983 arc_write_done_func_t *children_ready_cb = NULL;
4984 if (db->db_level != 0)
4985 children_ready_cb = dbuf_write_children_ready;
4986
4987 dr->dr_zio = arc_write(pio, os->os_spa, txg,
4988 &dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
4989 &zp, dbuf_write_ready,
4990 children_ready_cb, dbuf_write_physdone,
4991 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
4992 ZIO_FLAG_MUSTSUCCEED, &zb);
4993 }
4994 }
4995
4996 EXPORT_SYMBOL(dbuf_find);
4997 EXPORT_SYMBOL(dbuf_is_metadata);
4998 EXPORT_SYMBOL(dbuf_destroy);
4999 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5000 EXPORT_SYMBOL(dbuf_whichblock);
5001 EXPORT_SYMBOL(dbuf_read);
5002 EXPORT_SYMBOL(dbuf_unoverride);
5003 EXPORT_SYMBOL(dbuf_free_range);
5004 EXPORT_SYMBOL(dbuf_new_size);
5005 EXPORT_SYMBOL(dbuf_release_bp);
5006 EXPORT_SYMBOL(dbuf_dirty);
5007 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5008 EXPORT_SYMBOL(dmu_buf_will_dirty);
5009 EXPORT_SYMBOL(dmu_buf_is_dirty);
5010 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5011 EXPORT_SYMBOL(dmu_buf_will_fill);
5012 EXPORT_SYMBOL(dmu_buf_fill_done);
5013 EXPORT_SYMBOL(dmu_buf_rele);
5014 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5015 EXPORT_SYMBOL(dbuf_prefetch);
5016 EXPORT_SYMBOL(dbuf_hold_impl);
5017 EXPORT_SYMBOL(dbuf_hold);
5018 EXPORT_SYMBOL(dbuf_hold_level);
5019 EXPORT_SYMBOL(dbuf_create_bonus);
5020 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5021 EXPORT_SYMBOL(dbuf_rm_spill);
5022 EXPORT_SYMBOL(dbuf_add_ref);
5023 EXPORT_SYMBOL(dbuf_rele);
5024 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5025 EXPORT_SYMBOL(dbuf_refcount);
5026 EXPORT_SYMBOL(dbuf_sync_list);
5027 EXPORT_SYMBOL(dmu_buf_set_user);
5028 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5029 EXPORT_SYMBOL(dmu_buf_get_user);
5030 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5031
5032 /* BEGIN CSTYLED */
5033 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, ULONG, ZMOD_RW,
5034 "Maximum size in bytes of the dbuf cache.");
5035
5036 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5037 "Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
5038 "directly.");
5039
5040 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5041 "Percentage below dbuf_cache_max_bytes when the evict thread stops "
5042 "evicting dbufs.");
5043
5044 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, ULONG, ZMOD_RW,
5045 "Maximum size in bytes of the dbuf metadata cache.");
5046
5047 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, INT, ZMOD_RW,
5048 "Set the size of the dbuf cache to a log2 fraction of arc size.");
5049
5050 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, INT, ZMOD_RW,
5051 "Set the size of the dbuf metadata cache to a log2 fraction of arc "
5052 "size.");
5053 /* END CSTYLED */