]> git.proxmox.com Git - mirror_zfs.git/blob - module/zfs/dbuf.c
c7f76e8d96f873ae4e210b3c7c36c72946e57469
[mirror_zfs.git] / module / zfs / dbuf.c
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012, 2020 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
27 * Copyright (c) 2019, Klara Inc.
28 * Copyright (c) 2019, Allan Jude
29 * Copyright (c) 2021, 2022 by Pawel Jakub Dawidek
30 */
31
32 #include <sys/zfs_context.h>
33 #include <sys/arc.h>
34 #include <sys/dmu.h>
35 #include <sys/dmu_send.h>
36 #include <sys/dmu_impl.h>
37 #include <sys/dbuf.h>
38 #include <sys/dmu_objset.h>
39 #include <sys/dsl_dataset.h>
40 #include <sys/dsl_dir.h>
41 #include <sys/dmu_tx.h>
42 #include <sys/spa.h>
43 #include <sys/zio.h>
44 #include <sys/dmu_zfetch.h>
45 #include <sys/sa.h>
46 #include <sys/sa_impl.h>
47 #include <sys/zfeature.h>
48 #include <sys/blkptr.h>
49 #include <sys/range_tree.h>
50 #include <sys/trace_zfs.h>
51 #include <sys/callb.h>
52 #include <sys/abd.h>
53 #include <sys/brt.h>
54 #include <sys/vdev.h>
55 #include <cityhash.h>
56 #include <sys/spa_impl.h>
57 #include <sys/wmsum.h>
58 #include <sys/vdev_impl.h>
59
60 static kstat_t *dbuf_ksp;
61
62 typedef struct dbuf_stats {
63 /*
64 * Various statistics about the size of the dbuf cache.
65 */
66 kstat_named_t cache_count;
67 kstat_named_t cache_size_bytes;
68 kstat_named_t cache_size_bytes_max;
69 /*
70 * Statistics regarding the bounds on the dbuf cache size.
71 */
72 kstat_named_t cache_target_bytes;
73 kstat_named_t cache_lowater_bytes;
74 kstat_named_t cache_hiwater_bytes;
75 /*
76 * Total number of dbuf cache evictions that have occurred.
77 */
78 kstat_named_t cache_total_evicts;
79 /*
80 * The distribution of dbuf levels in the dbuf cache and
81 * the total size of all dbufs at each level.
82 */
83 kstat_named_t cache_levels[DN_MAX_LEVELS];
84 kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
85 /*
86 * Statistics about the dbuf hash table.
87 */
88 kstat_named_t hash_hits;
89 kstat_named_t hash_misses;
90 kstat_named_t hash_collisions;
91 kstat_named_t hash_elements;
92 kstat_named_t hash_elements_max;
93 /*
94 * Number of sublists containing more than one dbuf in the dbuf
95 * hash table. Keep track of the longest hash chain.
96 */
97 kstat_named_t hash_chains;
98 kstat_named_t hash_chain_max;
99 /*
100 * Number of times a dbuf_create() discovers that a dbuf was
101 * already created and in the dbuf hash table.
102 */
103 kstat_named_t hash_insert_race;
104 /*
105 * Number of entries in the hash table dbuf and mutex arrays.
106 */
107 kstat_named_t hash_table_count;
108 kstat_named_t hash_mutex_count;
109 /*
110 * Statistics about the size of the metadata dbuf cache.
111 */
112 kstat_named_t metadata_cache_count;
113 kstat_named_t metadata_cache_size_bytes;
114 kstat_named_t metadata_cache_size_bytes_max;
115 /*
116 * For diagnostic purposes, this is incremented whenever we can't add
117 * something to the metadata cache because it's full, and instead put
118 * the data in the regular dbuf cache.
119 */
120 kstat_named_t metadata_cache_overflow;
121 } dbuf_stats_t;
122
123 dbuf_stats_t dbuf_stats = {
124 { "cache_count", KSTAT_DATA_UINT64 },
125 { "cache_size_bytes", KSTAT_DATA_UINT64 },
126 { "cache_size_bytes_max", KSTAT_DATA_UINT64 },
127 { "cache_target_bytes", KSTAT_DATA_UINT64 },
128 { "cache_lowater_bytes", KSTAT_DATA_UINT64 },
129 { "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
130 { "cache_total_evicts", KSTAT_DATA_UINT64 },
131 { { "cache_levels_N", KSTAT_DATA_UINT64 } },
132 { { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
133 { "hash_hits", KSTAT_DATA_UINT64 },
134 { "hash_misses", KSTAT_DATA_UINT64 },
135 { "hash_collisions", KSTAT_DATA_UINT64 },
136 { "hash_elements", KSTAT_DATA_UINT64 },
137 { "hash_elements_max", KSTAT_DATA_UINT64 },
138 { "hash_chains", KSTAT_DATA_UINT64 },
139 { "hash_chain_max", KSTAT_DATA_UINT64 },
140 { "hash_insert_race", KSTAT_DATA_UINT64 },
141 { "hash_table_count", KSTAT_DATA_UINT64 },
142 { "hash_mutex_count", KSTAT_DATA_UINT64 },
143 { "metadata_cache_count", KSTAT_DATA_UINT64 },
144 { "metadata_cache_size_bytes", KSTAT_DATA_UINT64 },
145 { "metadata_cache_size_bytes_max", KSTAT_DATA_UINT64 },
146 { "metadata_cache_overflow", KSTAT_DATA_UINT64 }
147 };
148
149 struct {
150 wmsum_t cache_count;
151 wmsum_t cache_total_evicts;
152 wmsum_t cache_levels[DN_MAX_LEVELS];
153 wmsum_t cache_levels_bytes[DN_MAX_LEVELS];
154 wmsum_t hash_hits;
155 wmsum_t hash_misses;
156 wmsum_t hash_collisions;
157 wmsum_t hash_chains;
158 wmsum_t hash_insert_race;
159 wmsum_t metadata_cache_count;
160 wmsum_t metadata_cache_overflow;
161 } dbuf_sums;
162
163 #define DBUF_STAT_INCR(stat, val) \
164 wmsum_add(&dbuf_sums.stat, val);
165 #define DBUF_STAT_DECR(stat, val) \
166 DBUF_STAT_INCR(stat, -(val));
167 #define DBUF_STAT_BUMP(stat) \
168 DBUF_STAT_INCR(stat, 1);
169 #define DBUF_STAT_BUMPDOWN(stat) \
170 DBUF_STAT_INCR(stat, -1);
171 #define DBUF_STAT_MAX(stat, v) { \
172 uint64_t _m; \
173 while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
174 (_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
175 continue; \
176 }
177
178 static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
179 static void dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr);
180 static int dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags);
181
182 /*
183 * Global data structures and functions for the dbuf cache.
184 */
185 static kmem_cache_t *dbuf_kmem_cache;
186 static taskq_t *dbu_evict_taskq;
187
188 static kthread_t *dbuf_cache_evict_thread;
189 static kmutex_t dbuf_evict_lock;
190 static kcondvar_t dbuf_evict_cv;
191 static boolean_t dbuf_evict_thread_exit;
192
193 /*
194 * There are two dbuf caches; each dbuf can only be in one of them at a time.
195 *
196 * 1. Cache of metadata dbufs, to help make read-heavy administrative commands
197 * from /sbin/zfs run faster. The "metadata cache" specifically stores dbufs
198 * that represent the metadata that describes filesystems/snapshots/
199 * bookmarks/properties/etc. We only evict from this cache when we export a
200 * pool, to short-circuit as much I/O as possible for all administrative
201 * commands that need the metadata. There is no eviction policy for this
202 * cache, because we try to only include types in it which would occupy a
203 * very small amount of space per object but create a large impact on the
204 * performance of these commands. Instead, after it reaches a maximum size
205 * (which should only happen on very small memory systems with a very large
206 * number of filesystem objects), we stop taking new dbufs into the
207 * metadata cache, instead putting them in the normal dbuf cache.
208 *
209 * 2. LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
210 * are not currently held but have been recently released. These dbufs
211 * are not eligible for arc eviction until they are aged out of the cache.
212 * Dbufs that are aged out of the cache will be immediately destroyed and
213 * become eligible for arc eviction.
214 *
215 * Dbufs are added to these caches once the last hold is released. If a dbuf is
216 * later accessed and still exists in the dbuf cache, then it will be removed
217 * from the cache and later re-added to the head of the cache.
218 *
219 * If a given dbuf meets the requirements for the metadata cache, it will go
220 * there, otherwise it will be considered for the generic LRU dbuf cache. The
221 * caches and the refcounts tracking their sizes are stored in an array indexed
222 * by those caches' matching enum values (from dbuf_cached_state_t).
223 */
224 typedef struct dbuf_cache {
225 multilist_t cache;
226 zfs_refcount_t size ____cacheline_aligned;
227 } dbuf_cache_t;
228 dbuf_cache_t dbuf_caches[DB_CACHE_MAX];
229
230 /* Size limits for the caches */
231 static uint64_t dbuf_cache_max_bytes = UINT64_MAX;
232 static uint64_t dbuf_metadata_cache_max_bytes = UINT64_MAX;
233
234 /* Set the default sizes of the caches to log2 fraction of arc size */
235 static uint_t dbuf_cache_shift = 5;
236 static uint_t dbuf_metadata_cache_shift = 6;
237
238 /* Set the dbuf hash mutex count as log2 shift (dynamic by default) */
239 static uint_t dbuf_mutex_cache_shift = 0;
240
241 static unsigned long dbuf_cache_target_bytes(void);
242 static unsigned long dbuf_metadata_cache_target_bytes(void);
243
244 /*
245 * The LRU dbuf cache uses a three-stage eviction policy:
246 * - A low water marker designates when the dbuf eviction thread
247 * should stop evicting from the dbuf cache.
248 * - When we reach the maximum size (aka mid water mark), we
249 * signal the eviction thread to run.
250 * - The high water mark indicates when the eviction thread
251 * is unable to keep up with the incoming load and eviction must
252 * happen in the context of the calling thread.
253 *
254 * The dbuf cache:
255 * (max size)
256 * low water mid water hi water
257 * +----------------------------------------+----------+----------+
258 * | | | |
259 * | | | |
260 * | | | |
261 * | | | |
262 * +----------------------------------------+----------+----------+
263 * stop signal evict
264 * evicting eviction directly
265 * thread
266 *
267 * The high and low water marks indicate the operating range for the eviction
268 * thread. The low water mark is, by default, 90% of the total size of the
269 * cache and the high water mark is at 110% (both of these percentages can be
270 * changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
271 * respectively). The eviction thread will try to ensure that the cache remains
272 * within this range by waking up every second and checking if the cache is
273 * above the low water mark. The thread can also be woken up by callers adding
274 * elements into the cache if the cache is larger than the mid water (i.e max
275 * cache size). Once the eviction thread is woken up and eviction is required,
276 * it will continue evicting buffers until it's able to reduce the cache size
277 * to the low water mark. If the cache size continues to grow and hits the high
278 * water mark, then callers adding elements to the cache will begin to evict
279 * directly from the cache until the cache is no longer above the high water
280 * mark.
281 */
282
283 /*
284 * The percentage above and below the maximum cache size.
285 */
286 static uint_t dbuf_cache_hiwater_pct = 10;
287 static uint_t dbuf_cache_lowater_pct = 10;
288
289 static int
290 dbuf_cons(void *vdb, void *unused, int kmflag)
291 {
292 (void) unused, (void) kmflag;
293 dmu_buf_impl_t *db = vdb;
294 memset(db, 0, sizeof (dmu_buf_impl_t));
295
296 mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
297 rw_init(&db->db_rwlock, NULL, RW_DEFAULT, NULL);
298 cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
299 multilist_link_init(&db->db_cache_link);
300 zfs_refcount_create(&db->db_holds);
301
302 return (0);
303 }
304
305 static void
306 dbuf_dest(void *vdb, void *unused)
307 {
308 (void) unused;
309 dmu_buf_impl_t *db = vdb;
310 mutex_destroy(&db->db_mtx);
311 rw_destroy(&db->db_rwlock);
312 cv_destroy(&db->db_changed);
313 ASSERT(!multilist_link_active(&db->db_cache_link));
314 zfs_refcount_destroy(&db->db_holds);
315 }
316
317 /*
318 * dbuf hash table routines
319 */
320 static dbuf_hash_table_t dbuf_hash_table;
321
322 /*
323 * We use Cityhash for this. It's fast, and has good hash properties without
324 * requiring any large static buffers.
325 */
326 static uint64_t
327 dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
328 {
329 return (cityhash4((uintptr_t)os, obj, (uint64_t)lvl, blkid));
330 }
331
332 #define DTRACE_SET_STATE(db, why) \
333 DTRACE_PROBE2(dbuf__state_change, dmu_buf_impl_t *, db, \
334 const char *, why)
335
336 #define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
337 ((dbuf)->db.db_object == (obj) && \
338 (dbuf)->db_objset == (os) && \
339 (dbuf)->db_level == (level) && \
340 (dbuf)->db_blkid == (blkid))
341
342 dmu_buf_impl_t *
343 dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid,
344 uint64_t *hash_out)
345 {
346 dbuf_hash_table_t *h = &dbuf_hash_table;
347 uint64_t hv;
348 uint64_t idx;
349 dmu_buf_impl_t *db;
350
351 hv = dbuf_hash(os, obj, level, blkid);
352 idx = hv & h->hash_table_mask;
353
354 mutex_enter(DBUF_HASH_MUTEX(h, idx));
355 for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
356 if (DBUF_EQUAL(db, os, obj, level, blkid)) {
357 mutex_enter(&db->db_mtx);
358 if (db->db_state != DB_EVICTING) {
359 mutex_exit(DBUF_HASH_MUTEX(h, idx));
360 return (db);
361 }
362 mutex_exit(&db->db_mtx);
363 }
364 }
365 mutex_exit(DBUF_HASH_MUTEX(h, idx));
366 if (hash_out != NULL)
367 *hash_out = hv;
368 return (NULL);
369 }
370
371 static dmu_buf_impl_t *
372 dbuf_find_bonus(objset_t *os, uint64_t object)
373 {
374 dnode_t *dn;
375 dmu_buf_impl_t *db = NULL;
376
377 if (dnode_hold(os, object, FTAG, &dn) == 0) {
378 rw_enter(&dn->dn_struct_rwlock, RW_READER);
379 if (dn->dn_bonus != NULL) {
380 db = dn->dn_bonus;
381 mutex_enter(&db->db_mtx);
382 }
383 rw_exit(&dn->dn_struct_rwlock);
384 dnode_rele(dn, FTAG);
385 }
386 return (db);
387 }
388
389 /*
390 * Insert an entry into the hash table. If there is already an element
391 * equal to elem in the hash table, then the already existing element
392 * will be returned and the new element will not be inserted.
393 * Otherwise returns NULL.
394 */
395 static dmu_buf_impl_t *
396 dbuf_hash_insert(dmu_buf_impl_t *db)
397 {
398 dbuf_hash_table_t *h = &dbuf_hash_table;
399 objset_t *os = db->db_objset;
400 uint64_t obj = db->db.db_object;
401 int level = db->db_level;
402 uint64_t blkid, idx;
403 dmu_buf_impl_t *dbf;
404 uint32_t i;
405
406 blkid = db->db_blkid;
407 ASSERT3U(dbuf_hash(os, obj, level, blkid), ==, db->db_hash);
408 idx = db->db_hash & h->hash_table_mask;
409
410 mutex_enter(DBUF_HASH_MUTEX(h, idx));
411 for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
412 dbf = dbf->db_hash_next, i++) {
413 if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
414 mutex_enter(&dbf->db_mtx);
415 if (dbf->db_state != DB_EVICTING) {
416 mutex_exit(DBUF_HASH_MUTEX(h, idx));
417 return (dbf);
418 }
419 mutex_exit(&dbf->db_mtx);
420 }
421 }
422
423 if (i > 0) {
424 DBUF_STAT_BUMP(hash_collisions);
425 if (i == 1)
426 DBUF_STAT_BUMP(hash_chains);
427
428 DBUF_STAT_MAX(hash_chain_max, i);
429 }
430
431 mutex_enter(&db->db_mtx);
432 db->db_hash_next = h->hash_table[idx];
433 h->hash_table[idx] = db;
434 mutex_exit(DBUF_HASH_MUTEX(h, idx));
435 uint64_t he = atomic_inc_64_nv(&dbuf_stats.hash_elements.value.ui64);
436 DBUF_STAT_MAX(hash_elements_max, he);
437
438 return (NULL);
439 }
440
441 /*
442 * This returns whether this dbuf should be stored in the metadata cache, which
443 * is based on whether it's from one of the dnode types that store data related
444 * to traversing dataset hierarchies.
445 */
446 static boolean_t
447 dbuf_include_in_metadata_cache(dmu_buf_impl_t *db)
448 {
449 DB_DNODE_ENTER(db);
450 dmu_object_type_t type = DB_DNODE(db)->dn_type;
451 DB_DNODE_EXIT(db);
452
453 /* Check if this dbuf is one of the types we care about */
454 if (DMU_OT_IS_METADATA_CACHED(type)) {
455 /* If we hit this, then we set something up wrong in dmu_ot */
456 ASSERT(DMU_OT_IS_METADATA(type));
457
458 /*
459 * Sanity check for small-memory systems: don't allocate too
460 * much memory for this purpose.
461 */
462 if (zfs_refcount_count(
463 &dbuf_caches[DB_DBUF_METADATA_CACHE].size) >
464 dbuf_metadata_cache_target_bytes()) {
465 DBUF_STAT_BUMP(metadata_cache_overflow);
466 return (B_FALSE);
467 }
468
469 return (B_TRUE);
470 }
471
472 return (B_FALSE);
473 }
474
475 /*
476 * Remove an entry from the hash table. It must be in the EVICTING state.
477 */
478 static void
479 dbuf_hash_remove(dmu_buf_impl_t *db)
480 {
481 dbuf_hash_table_t *h = &dbuf_hash_table;
482 uint64_t idx;
483 dmu_buf_impl_t *dbf, **dbp;
484
485 ASSERT3U(dbuf_hash(db->db_objset, db->db.db_object, db->db_level,
486 db->db_blkid), ==, db->db_hash);
487 idx = db->db_hash & h->hash_table_mask;
488
489 /*
490 * We mustn't hold db_mtx to maintain lock ordering:
491 * DBUF_HASH_MUTEX > db_mtx.
492 */
493 ASSERT(zfs_refcount_is_zero(&db->db_holds));
494 ASSERT(db->db_state == DB_EVICTING);
495 ASSERT(!MUTEX_HELD(&db->db_mtx));
496
497 mutex_enter(DBUF_HASH_MUTEX(h, idx));
498 dbp = &h->hash_table[idx];
499 while ((dbf = *dbp) != db) {
500 dbp = &dbf->db_hash_next;
501 ASSERT(dbf != NULL);
502 }
503 *dbp = db->db_hash_next;
504 db->db_hash_next = NULL;
505 if (h->hash_table[idx] &&
506 h->hash_table[idx]->db_hash_next == NULL)
507 DBUF_STAT_BUMPDOWN(hash_chains);
508 mutex_exit(DBUF_HASH_MUTEX(h, idx));
509 atomic_dec_64(&dbuf_stats.hash_elements.value.ui64);
510 }
511
512 typedef enum {
513 DBVU_EVICTING,
514 DBVU_NOT_EVICTING
515 } dbvu_verify_type_t;
516
517 static void
518 dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
519 {
520 #ifdef ZFS_DEBUG
521 int64_t holds;
522
523 if (db->db_user == NULL)
524 return;
525
526 /* Only data blocks support the attachment of user data. */
527 ASSERT(db->db_level == 0);
528
529 /* Clients must resolve a dbuf before attaching user data. */
530 ASSERT(db->db.db_data != NULL);
531 ASSERT3U(db->db_state, ==, DB_CACHED);
532
533 holds = zfs_refcount_count(&db->db_holds);
534 if (verify_type == DBVU_EVICTING) {
535 /*
536 * Immediate eviction occurs when holds == dirtycnt.
537 * For normal eviction buffers, holds is zero on
538 * eviction, except when dbuf_fix_old_data() calls
539 * dbuf_clear_data(). However, the hold count can grow
540 * during eviction even though db_mtx is held (see
541 * dmu_bonus_hold() for an example), so we can only
542 * test the generic invariant that holds >= dirtycnt.
543 */
544 ASSERT3U(holds, >=, db->db_dirtycnt);
545 } else {
546 if (db->db_user_immediate_evict == TRUE)
547 ASSERT3U(holds, >=, db->db_dirtycnt);
548 else
549 ASSERT3U(holds, >, 0);
550 }
551 #endif
552 }
553
554 static void
555 dbuf_evict_user(dmu_buf_impl_t *db)
556 {
557 dmu_buf_user_t *dbu = db->db_user;
558
559 ASSERT(MUTEX_HELD(&db->db_mtx));
560
561 if (dbu == NULL)
562 return;
563
564 dbuf_verify_user(db, DBVU_EVICTING);
565 db->db_user = NULL;
566
567 #ifdef ZFS_DEBUG
568 if (dbu->dbu_clear_on_evict_dbufp != NULL)
569 *dbu->dbu_clear_on_evict_dbufp = NULL;
570 #endif
571
572 /*
573 * There are two eviction callbacks - one that we call synchronously
574 * and one that we invoke via a taskq. The async one is useful for
575 * avoiding lock order reversals and limiting stack depth.
576 *
577 * Note that if we have a sync callback but no async callback,
578 * it's likely that the sync callback will free the structure
579 * containing the dbu. In that case we need to take care to not
580 * dereference dbu after calling the sync evict func.
581 */
582 boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
583
584 if (dbu->dbu_evict_func_sync != NULL)
585 dbu->dbu_evict_func_sync(dbu);
586
587 if (has_async) {
588 taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
589 dbu, 0, &dbu->dbu_tqent);
590 }
591 }
592
593 boolean_t
594 dbuf_is_metadata(dmu_buf_impl_t *db)
595 {
596 /*
597 * Consider indirect blocks and spill blocks to be meta data.
598 */
599 if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
600 return (B_TRUE);
601 } else {
602 boolean_t is_metadata;
603
604 DB_DNODE_ENTER(db);
605 is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
606 DB_DNODE_EXIT(db);
607
608 return (is_metadata);
609 }
610 }
611
612 /*
613 * We want to exclude buffers that are on a special allocation class from
614 * L2ARC.
615 */
616 boolean_t
617 dbuf_is_l2cacheable(dmu_buf_impl_t *db)
618 {
619 if (db->db_objset->os_secondary_cache == ZFS_CACHE_ALL ||
620 (db->db_objset->os_secondary_cache ==
621 ZFS_CACHE_METADATA && dbuf_is_metadata(db))) {
622 if (l2arc_exclude_special == 0)
623 return (B_TRUE);
624
625 blkptr_t *bp = db->db_blkptr;
626 if (bp == NULL || BP_IS_HOLE(bp))
627 return (B_FALSE);
628 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
629 vdev_t *rvd = db->db_objset->os_spa->spa_root_vdev;
630 vdev_t *vd = NULL;
631
632 if (vdev < rvd->vdev_children)
633 vd = rvd->vdev_child[vdev];
634
635 if (vd == NULL)
636 return (B_TRUE);
637
638 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
639 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
640 return (B_TRUE);
641 }
642 return (B_FALSE);
643 }
644
645 static inline boolean_t
646 dnode_level_is_l2cacheable(blkptr_t *bp, dnode_t *dn, int64_t level)
647 {
648 if (dn->dn_objset->os_secondary_cache == ZFS_CACHE_ALL ||
649 (dn->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA &&
650 (level > 0 ||
651 DMU_OT_IS_METADATA(dn->dn_handle->dnh_dnode->dn_type)))) {
652 if (l2arc_exclude_special == 0)
653 return (B_TRUE);
654
655 if (bp == NULL || BP_IS_HOLE(bp))
656 return (B_FALSE);
657 uint64_t vdev = DVA_GET_VDEV(bp->blk_dva);
658 vdev_t *rvd = dn->dn_objset->os_spa->spa_root_vdev;
659 vdev_t *vd = NULL;
660
661 if (vdev < rvd->vdev_children)
662 vd = rvd->vdev_child[vdev];
663
664 if (vd == NULL)
665 return (B_TRUE);
666
667 if (vd->vdev_alloc_bias != VDEV_BIAS_SPECIAL &&
668 vd->vdev_alloc_bias != VDEV_BIAS_DEDUP)
669 return (B_TRUE);
670 }
671 return (B_FALSE);
672 }
673
674
675 /*
676 * This function *must* return indices evenly distributed between all
677 * sublists of the multilist. This is needed due to how the dbuf eviction
678 * code is laid out; dbuf_evict_thread() assumes dbufs are evenly
679 * distributed between all sublists and uses this assumption when
680 * deciding which sublist to evict from and how much to evict from it.
681 */
682 static unsigned int
683 dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
684 {
685 dmu_buf_impl_t *db = obj;
686
687 /*
688 * The assumption here, is the hash value for a given
689 * dmu_buf_impl_t will remain constant throughout it's lifetime
690 * (i.e. it's objset, object, level and blkid fields don't change).
691 * Thus, we don't need to store the dbuf's sublist index
692 * on insertion, as this index can be recalculated on removal.
693 *
694 * Also, the low order bits of the hash value are thought to be
695 * distributed evenly. Otherwise, in the case that the multilist
696 * has a power of two number of sublists, each sublists' usage
697 * would not be evenly distributed. In this context full 64bit
698 * division would be a waste of time, so limit it to 32 bits.
699 */
700 return ((unsigned int)dbuf_hash(db->db_objset, db->db.db_object,
701 db->db_level, db->db_blkid) %
702 multilist_get_num_sublists(ml));
703 }
704
705 /*
706 * The target size of the dbuf cache can grow with the ARC target,
707 * unless limited by the tunable dbuf_cache_max_bytes.
708 */
709 static inline unsigned long
710 dbuf_cache_target_bytes(void)
711 {
712 return (MIN(dbuf_cache_max_bytes,
713 arc_target_bytes() >> dbuf_cache_shift));
714 }
715
716 /*
717 * The target size of the dbuf metadata cache can grow with the ARC target,
718 * unless limited by the tunable dbuf_metadata_cache_max_bytes.
719 */
720 static inline unsigned long
721 dbuf_metadata_cache_target_bytes(void)
722 {
723 return (MIN(dbuf_metadata_cache_max_bytes,
724 arc_target_bytes() >> dbuf_metadata_cache_shift));
725 }
726
727 static inline uint64_t
728 dbuf_cache_hiwater_bytes(void)
729 {
730 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
731 return (dbuf_cache_target +
732 (dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
733 }
734
735 static inline uint64_t
736 dbuf_cache_lowater_bytes(void)
737 {
738 uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
739 return (dbuf_cache_target -
740 (dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
741 }
742
743 static inline boolean_t
744 dbuf_cache_above_lowater(void)
745 {
746 return (zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size) >
747 dbuf_cache_lowater_bytes());
748 }
749
750 /*
751 * Evict the oldest eligible dbuf from the dbuf cache.
752 */
753 static void
754 dbuf_evict_one(void)
755 {
756 int idx = multilist_get_random_index(&dbuf_caches[DB_DBUF_CACHE].cache);
757 multilist_sublist_t *mls = multilist_sublist_lock(
758 &dbuf_caches[DB_DBUF_CACHE].cache, idx);
759
760 ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
761
762 dmu_buf_impl_t *db = multilist_sublist_tail(mls);
763 while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
764 db = multilist_sublist_prev(mls, db);
765 }
766
767 DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
768 multilist_sublist_t *, mls);
769
770 if (db != NULL) {
771 multilist_sublist_remove(mls, db);
772 multilist_sublist_unlock(mls);
773 (void) zfs_refcount_remove_many(
774 &dbuf_caches[DB_DBUF_CACHE].size, db->db.db_size, db);
775 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
776 DBUF_STAT_BUMPDOWN(cache_count);
777 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
778 db->db.db_size);
779 ASSERT3U(db->db_caching_status, ==, DB_DBUF_CACHE);
780 db->db_caching_status = DB_NO_CACHE;
781 dbuf_destroy(db);
782 DBUF_STAT_BUMP(cache_total_evicts);
783 } else {
784 multilist_sublist_unlock(mls);
785 }
786 }
787
788 /*
789 * The dbuf evict thread is responsible for aging out dbufs from the
790 * cache. Once the cache has reached it's maximum size, dbufs are removed
791 * and destroyed. The eviction thread will continue running until the size
792 * of the dbuf cache is at or below the maximum size. Once the dbuf is aged
793 * out of the cache it is destroyed and becomes eligible for arc eviction.
794 */
795 static __attribute__((noreturn)) void
796 dbuf_evict_thread(void *unused)
797 {
798 (void) unused;
799 callb_cpr_t cpr;
800
801 CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
802
803 mutex_enter(&dbuf_evict_lock);
804 while (!dbuf_evict_thread_exit) {
805 while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
806 CALLB_CPR_SAFE_BEGIN(&cpr);
807 (void) cv_timedwait_idle_hires(&dbuf_evict_cv,
808 &dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
809 CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
810 }
811 mutex_exit(&dbuf_evict_lock);
812
813 /*
814 * Keep evicting as long as we're above the low water mark
815 * for the cache. We do this without holding the locks to
816 * minimize lock contention.
817 */
818 while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
819 dbuf_evict_one();
820 }
821
822 mutex_enter(&dbuf_evict_lock);
823 }
824
825 dbuf_evict_thread_exit = B_FALSE;
826 cv_broadcast(&dbuf_evict_cv);
827 CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
828 thread_exit();
829 }
830
831 /*
832 * Wake up the dbuf eviction thread if the dbuf cache is at its max size.
833 * If the dbuf cache is at its high water mark, then evict a dbuf from the
834 * dbuf cache using the caller's context.
835 */
836 static void
837 dbuf_evict_notify(uint64_t size)
838 {
839 /*
840 * We check if we should evict without holding the dbuf_evict_lock,
841 * because it's OK to occasionally make the wrong decision here,
842 * and grabbing the lock results in massive lock contention.
843 */
844 if (size > dbuf_cache_target_bytes()) {
845 if (size > dbuf_cache_hiwater_bytes())
846 dbuf_evict_one();
847 cv_signal(&dbuf_evict_cv);
848 }
849 }
850
851 static int
852 dbuf_kstat_update(kstat_t *ksp, int rw)
853 {
854 dbuf_stats_t *ds = ksp->ks_data;
855 dbuf_hash_table_t *h = &dbuf_hash_table;
856
857 if (rw == KSTAT_WRITE)
858 return (SET_ERROR(EACCES));
859
860 ds->cache_count.value.ui64 =
861 wmsum_value(&dbuf_sums.cache_count);
862 ds->cache_size_bytes.value.ui64 =
863 zfs_refcount_count(&dbuf_caches[DB_DBUF_CACHE].size);
864 ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
865 ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
866 ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
867 ds->cache_total_evicts.value.ui64 =
868 wmsum_value(&dbuf_sums.cache_total_evicts);
869 for (int i = 0; i < DN_MAX_LEVELS; i++) {
870 ds->cache_levels[i].value.ui64 =
871 wmsum_value(&dbuf_sums.cache_levels[i]);
872 ds->cache_levels_bytes[i].value.ui64 =
873 wmsum_value(&dbuf_sums.cache_levels_bytes[i]);
874 }
875 ds->hash_hits.value.ui64 =
876 wmsum_value(&dbuf_sums.hash_hits);
877 ds->hash_misses.value.ui64 =
878 wmsum_value(&dbuf_sums.hash_misses);
879 ds->hash_collisions.value.ui64 =
880 wmsum_value(&dbuf_sums.hash_collisions);
881 ds->hash_chains.value.ui64 =
882 wmsum_value(&dbuf_sums.hash_chains);
883 ds->hash_insert_race.value.ui64 =
884 wmsum_value(&dbuf_sums.hash_insert_race);
885 ds->hash_table_count.value.ui64 = h->hash_table_mask + 1;
886 ds->hash_mutex_count.value.ui64 = h->hash_mutex_mask + 1;
887 ds->metadata_cache_count.value.ui64 =
888 wmsum_value(&dbuf_sums.metadata_cache_count);
889 ds->metadata_cache_size_bytes.value.ui64 = zfs_refcount_count(
890 &dbuf_caches[DB_DBUF_METADATA_CACHE].size);
891 ds->metadata_cache_overflow.value.ui64 =
892 wmsum_value(&dbuf_sums.metadata_cache_overflow);
893 return (0);
894 }
895
896 void
897 dbuf_init(void)
898 {
899 uint64_t hmsize, hsize = 1ULL << 16;
900 dbuf_hash_table_t *h = &dbuf_hash_table;
901
902 /*
903 * The hash table is big enough to fill one eighth of physical memory
904 * with an average block size of zfs_arc_average_blocksize (default 8K).
905 * By default, the table will take up
906 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
907 */
908 while (hsize * zfs_arc_average_blocksize < arc_all_memory() / 8)
909 hsize <<= 1;
910
911 h->hash_table = NULL;
912 while (h->hash_table == NULL) {
913 h->hash_table_mask = hsize - 1;
914
915 h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
916 if (h->hash_table == NULL)
917 hsize >>= 1;
918
919 ASSERT3U(hsize, >=, 1ULL << 10);
920 }
921
922 /*
923 * The hash table buckets are protected by an array of mutexes where
924 * each mutex is reponsible for protecting 128 buckets. A minimum
925 * array size of 8192 is targeted to avoid contention.
926 */
927 if (dbuf_mutex_cache_shift == 0)
928 hmsize = MAX(hsize >> 7, 1ULL << 13);
929 else
930 hmsize = 1ULL << MIN(dbuf_mutex_cache_shift, 24);
931
932 h->hash_mutexes = NULL;
933 while (h->hash_mutexes == NULL) {
934 h->hash_mutex_mask = hmsize - 1;
935
936 h->hash_mutexes = vmem_zalloc(hmsize * sizeof (kmutex_t),
937 KM_SLEEP);
938 if (h->hash_mutexes == NULL)
939 hmsize >>= 1;
940 }
941
942 dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
943 sizeof (dmu_buf_impl_t),
944 0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
945
946 for (int i = 0; i < hmsize; i++)
947 mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
948
949 dbuf_stats_init(h);
950
951 /*
952 * All entries are queued via taskq_dispatch_ent(), so min/maxalloc
953 * configuration is not required.
954 */
955 dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
956
957 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
958 multilist_create(&dbuf_caches[dcs].cache,
959 sizeof (dmu_buf_impl_t),
960 offsetof(dmu_buf_impl_t, db_cache_link),
961 dbuf_cache_multilist_index_func);
962 zfs_refcount_create(&dbuf_caches[dcs].size);
963 }
964
965 dbuf_evict_thread_exit = B_FALSE;
966 mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
967 cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
968 dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
969 NULL, 0, &p0, TS_RUN, minclsyspri);
970
971 wmsum_init(&dbuf_sums.cache_count, 0);
972 wmsum_init(&dbuf_sums.cache_total_evicts, 0);
973 for (int i = 0; i < DN_MAX_LEVELS; i++) {
974 wmsum_init(&dbuf_sums.cache_levels[i], 0);
975 wmsum_init(&dbuf_sums.cache_levels_bytes[i], 0);
976 }
977 wmsum_init(&dbuf_sums.hash_hits, 0);
978 wmsum_init(&dbuf_sums.hash_misses, 0);
979 wmsum_init(&dbuf_sums.hash_collisions, 0);
980 wmsum_init(&dbuf_sums.hash_chains, 0);
981 wmsum_init(&dbuf_sums.hash_insert_race, 0);
982 wmsum_init(&dbuf_sums.metadata_cache_count, 0);
983 wmsum_init(&dbuf_sums.metadata_cache_overflow, 0);
984
985 dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
986 KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
987 KSTAT_FLAG_VIRTUAL);
988 if (dbuf_ksp != NULL) {
989 for (int i = 0; i < DN_MAX_LEVELS; i++) {
990 snprintf(dbuf_stats.cache_levels[i].name,
991 KSTAT_STRLEN, "cache_level_%d", i);
992 dbuf_stats.cache_levels[i].data_type =
993 KSTAT_DATA_UINT64;
994 snprintf(dbuf_stats.cache_levels_bytes[i].name,
995 KSTAT_STRLEN, "cache_level_%d_bytes", i);
996 dbuf_stats.cache_levels_bytes[i].data_type =
997 KSTAT_DATA_UINT64;
998 }
999 dbuf_ksp->ks_data = &dbuf_stats;
1000 dbuf_ksp->ks_update = dbuf_kstat_update;
1001 kstat_install(dbuf_ksp);
1002 }
1003 }
1004
1005 void
1006 dbuf_fini(void)
1007 {
1008 dbuf_hash_table_t *h = &dbuf_hash_table;
1009
1010 dbuf_stats_destroy();
1011
1012 for (int i = 0; i < (h->hash_mutex_mask + 1); i++)
1013 mutex_destroy(&h->hash_mutexes[i]);
1014
1015 vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
1016 vmem_free(h->hash_mutexes, (h->hash_mutex_mask + 1) *
1017 sizeof (kmutex_t));
1018
1019 kmem_cache_destroy(dbuf_kmem_cache);
1020 taskq_destroy(dbu_evict_taskq);
1021
1022 mutex_enter(&dbuf_evict_lock);
1023 dbuf_evict_thread_exit = B_TRUE;
1024 while (dbuf_evict_thread_exit) {
1025 cv_signal(&dbuf_evict_cv);
1026 cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
1027 }
1028 mutex_exit(&dbuf_evict_lock);
1029
1030 mutex_destroy(&dbuf_evict_lock);
1031 cv_destroy(&dbuf_evict_cv);
1032
1033 for (dbuf_cached_state_t dcs = 0; dcs < DB_CACHE_MAX; dcs++) {
1034 zfs_refcount_destroy(&dbuf_caches[dcs].size);
1035 multilist_destroy(&dbuf_caches[dcs].cache);
1036 }
1037
1038 if (dbuf_ksp != NULL) {
1039 kstat_delete(dbuf_ksp);
1040 dbuf_ksp = NULL;
1041 }
1042
1043 wmsum_fini(&dbuf_sums.cache_count);
1044 wmsum_fini(&dbuf_sums.cache_total_evicts);
1045 for (int i = 0; i < DN_MAX_LEVELS; i++) {
1046 wmsum_fini(&dbuf_sums.cache_levels[i]);
1047 wmsum_fini(&dbuf_sums.cache_levels_bytes[i]);
1048 }
1049 wmsum_fini(&dbuf_sums.hash_hits);
1050 wmsum_fini(&dbuf_sums.hash_misses);
1051 wmsum_fini(&dbuf_sums.hash_collisions);
1052 wmsum_fini(&dbuf_sums.hash_chains);
1053 wmsum_fini(&dbuf_sums.hash_insert_race);
1054 wmsum_fini(&dbuf_sums.metadata_cache_count);
1055 wmsum_fini(&dbuf_sums.metadata_cache_overflow);
1056 }
1057
1058 /*
1059 * Other stuff.
1060 */
1061
1062 #ifdef ZFS_DEBUG
1063 static void
1064 dbuf_verify(dmu_buf_impl_t *db)
1065 {
1066 dnode_t *dn;
1067 dbuf_dirty_record_t *dr;
1068 uint32_t txg_prev;
1069
1070 ASSERT(MUTEX_HELD(&db->db_mtx));
1071
1072 if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
1073 return;
1074
1075 ASSERT(db->db_objset != NULL);
1076 DB_DNODE_ENTER(db);
1077 dn = DB_DNODE(db);
1078 if (dn == NULL) {
1079 ASSERT(db->db_parent == NULL);
1080 ASSERT(db->db_blkptr == NULL);
1081 } else {
1082 ASSERT3U(db->db.db_object, ==, dn->dn_object);
1083 ASSERT3P(db->db_objset, ==, dn->dn_objset);
1084 ASSERT3U(db->db_level, <, dn->dn_nlevels);
1085 ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
1086 db->db_blkid == DMU_SPILL_BLKID ||
1087 !avl_is_empty(&dn->dn_dbufs));
1088 }
1089 if (db->db_blkid == DMU_BONUS_BLKID) {
1090 ASSERT(dn != NULL);
1091 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
1092 ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
1093 } else if (db->db_blkid == DMU_SPILL_BLKID) {
1094 ASSERT(dn != NULL);
1095 ASSERT0(db->db.db_offset);
1096 } else {
1097 ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
1098 }
1099
1100 if ((dr = list_head(&db->db_dirty_records)) != NULL) {
1101 ASSERT(dr->dr_dbuf == db);
1102 txg_prev = dr->dr_txg;
1103 for (dr = list_next(&db->db_dirty_records, dr); dr != NULL;
1104 dr = list_next(&db->db_dirty_records, dr)) {
1105 ASSERT(dr->dr_dbuf == db);
1106 ASSERT(txg_prev > dr->dr_txg);
1107 txg_prev = dr->dr_txg;
1108 }
1109 }
1110
1111 /*
1112 * We can't assert that db_size matches dn_datablksz because it
1113 * can be momentarily different when another thread is doing
1114 * dnode_set_blksz().
1115 */
1116 if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
1117 dr = db->db_data_pending;
1118 /*
1119 * It should only be modified in syncing context, so
1120 * make sure we only have one copy of the data.
1121 */
1122 ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
1123 }
1124
1125 /* verify db->db_blkptr */
1126 if (db->db_blkptr) {
1127 if (db->db_parent == dn->dn_dbuf) {
1128 /* db is pointed to by the dnode */
1129 /* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
1130 if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
1131 ASSERT(db->db_parent == NULL);
1132 else
1133 ASSERT(db->db_parent != NULL);
1134 if (db->db_blkid != DMU_SPILL_BLKID)
1135 ASSERT3P(db->db_blkptr, ==,
1136 &dn->dn_phys->dn_blkptr[db->db_blkid]);
1137 } else {
1138 /* db is pointed to by an indirect block */
1139 int epb __maybe_unused = db->db_parent->db.db_size >>
1140 SPA_BLKPTRSHIFT;
1141 ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
1142 ASSERT3U(db->db_parent->db.db_object, ==,
1143 db->db.db_object);
1144 /*
1145 * dnode_grow_indblksz() can make this fail if we don't
1146 * have the parent's rwlock. XXX indblksz no longer
1147 * grows. safe to do this now?
1148 */
1149 if (RW_LOCK_HELD(&db->db_parent->db_rwlock)) {
1150 ASSERT3P(db->db_blkptr, ==,
1151 ((blkptr_t *)db->db_parent->db.db_data +
1152 db->db_blkid % epb));
1153 }
1154 }
1155 }
1156 if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
1157 (db->db_buf == NULL || db->db_buf->b_data) &&
1158 db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
1159 db->db_state != DB_FILL && (dn == NULL || !dn->dn_free_txg)) {
1160 /*
1161 * If the blkptr isn't set but they have nonzero data,
1162 * it had better be dirty, otherwise we'll lose that
1163 * data when we evict this buffer.
1164 *
1165 * There is an exception to this rule for indirect blocks; in
1166 * this case, if the indirect block is a hole, we fill in a few
1167 * fields on each of the child blocks (importantly, birth time)
1168 * to prevent hole birth times from being lost when you
1169 * partially fill in a hole.
1170 */
1171 if (db->db_dirtycnt == 0) {
1172 if (db->db_level == 0) {
1173 uint64_t *buf = db->db.db_data;
1174 int i;
1175
1176 for (i = 0; i < db->db.db_size >> 3; i++) {
1177 ASSERT(buf[i] == 0);
1178 }
1179 } else {
1180 blkptr_t *bps = db->db.db_data;
1181 ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
1182 db->db.db_size);
1183 /*
1184 * We want to verify that all the blkptrs in the
1185 * indirect block are holes, but we may have
1186 * automatically set up a few fields for them.
1187 * We iterate through each blkptr and verify
1188 * they only have those fields set.
1189 */
1190 for (int i = 0;
1191 i < db->db.db_size / sizeof (blkptr_t);
1192 i++) {
1193 blkptr_t *bp = &bps[i];
1194 ASSERT(ZIO_CHECKSUM_IS_ZERO(
1195 &bp->blk_cksum));
1196 ASSERT(
1197 DVA_IS_EMPTY(&bp->blk_dva[0]) &&
1198 DVA_IS_EMPTY(&bp->blk_dva[1]) &&
1199 DVA_IS_EMPTY(&bp->blk_dva[2]));
1200 ASSERT0(bp->blk_fill);
1201 ASSERT0(bp->blk_pad[0]);
1202 ASSERT0(bp->blk_pad[1]);
1203 ASSERT(!BP_IS_EMBEDDED(bp));
1204 ASSERT(BP_IS_HOLE(bp));
1205 ASSERT0(bp->blk_phys_birth);
1206 }
1207 }
1208 }
1209 }
1210 DB_DNODE_EXIT(db);
1211 }
1212 #endif
1213
1214 static void
1215 dbuf_clear_data(dmu_buf_impl_t *db)
1216 {
1217 ASSERT(MUTEX_HELD(&db->db_mtx));
1218 dbuf_evict_user(db);
1219 ASSERT3P(db->db_buf, ==, NULL);
1220 db->db.db_data = NULL;
1221 if (db->db_state != DB_NOFILL) {
1222 db->db_state = DB_UNCACHED;
1223 DTRACE_SET_STATE(db, "clear data");
1224 }
1225 }
1226
1227 static void
1228 dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
1229 {
1230 ASSERT(MUTEX_HELD(&db->db_mtx));
1231 ASSERT(buf != NULL);
1232
1233 db->db_buf = buf;
1234 ASSERT(buf->b_data != NULL);
1235 db->db.db_data = buf->b_data;
1236 }
1237
1238 static arc_buf_t *
1239 dbuf_alloc_arcbuf(dmu_buf_impl_t *db)
1240 {
1241 spa_t *spa = db->db_objset->os_spa;
1242
1243 return (arc_alloc_buf(spa, db, DBUF_GET_BUFC_TYPE(db), db->db.db_size));
1244 }
1245
1246 /*
1247 * Loan out an arc_buf for read. Return the loaned arc_buf.
1248 */
1249 arc_buf_t *
1250 dbuf_loan_arcbuf(dmu_buf_impl_t *db)
1251 {
1252 arc_buf_t *abuf;
1253
1254 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1255 mutex_enter(&db->db_mtx);
1256 if (arc_released(db->db_buf) || zfs_refcount_count(&db->db_holds) > 1) {
1257 int blksz = db->db.db_size;
1258 spa_t *spa = db->db_objset->os_spa;
1259
1260 mutex_exit(&db->db_mtx);
1261 abuf = arc_loan_buf(spa, B_FALSE, blksz);
1262 memcpy(abuf->b_data, db->db.db_data, blksz);
1263 } else {
1264 abuf = db->db_buf;
1265 arc_loan_inuse_buf(abuf, db);
1266 db->db_buf = NULL;
1267 dbuf_clear_data(db);
1268 mutex_exit(&db->db_mtx);
1269 }
1270 return (abuf);
1271 }
1272
1273 /*
1274 * Calculate which level n block references the data at the level 0 offset
1275 * provided.
1276 */
1277 uint64_t
1278 dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
1279 {
1280 if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
1281 /*
1282 * The level n blkid is equal to the level 0 blkid divided by
1283 * the number of level 0s in a level n block.
1284 *
1285 * The level 0 blkid is offset >> datablkshift =
1286 * offset / 2^datablkshift.
1287 *
1288 * The number of level 0s in a level n is the number of block
1289 * pointers in an indirect block, raised to the power of level.
1290 * This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
1291 * 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
1292 *
1293 * Thus, the level n blkid is: offset /
1294 * ((2^datablkshift)*(2^(level*(indblkshift-SPA_BLKPTRSHIFT))))
1295 * = offset / 2^(datablkshift + level *
1296 * (indblkshift - SPA_BLKPTRSHIFT))
1297 * = offset >> (datablkshift + level *
1298 * (indblkshift - SPA_BLKPTRSHIFT))
1299 */
1300
1301 const unsigned exp = dn->dn_datablkshift +
1302 level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
1303
1304 if (exp >= 8 * sizeof (offset)) {
1305 /* This only happens on the highest indirection level */
1306 ASSERT3U(level, ==, dn->dn_nlevels - 1);
1307 return (0);
1308 }
1309
1310 ASSERT3U(exp, <, 8 * sizeof (offset));
1311
1312 return (offset >> exp);
1313 } else {
1314 ASSERT3U(offset, <, dn->dn_datablksz);
1315 return (0);
1316 }
1317 }
1318
1319 /*
1320 * This function is used to lock the parent of the provided dbuf. This should be
1321 * used when modifying or reading db_blkptr.
1322 */
1323 db_lock_type_t
1324 dmu_buf_lock_parent(dmu_buf_impl_t *db, krw_t rw, const void *tag)
1325 {
1326 enum db_lock_type ret = DLT_NONE;
1327 if (db->db_parent != NULL) {
1328 rw_enter(&db->db_parent->db_rwlock, rw);
1329 ret = DLT_PARENT;
1330 } else if (dmu_objset_ds(db->db_objset) != NULL) {
1331 rrw_enter(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, rw,
1332 tag);
1333 ret = DLT_OBJSET;
1334 }
1335 /*
1336 * We only return a DLT_NONE lock when it's the top-most indirect block
1337 * of the meta-dnode of the MOS.
1338 */
1339 return (ret);
1340 }
1341
1342 /*
1343 * We need to pass the lock type in because it's possible that the block will
1344 * move from being the topmost indirect block in a dnode (and thus, have no
1345 * parent) to not the top-most via an indirection increase. This would cause a
1346 * panic if we didn't pass the lock type in.
1347 */
1348 void
1349 dmu_buf_unlock_parent(dmu_buf_impl_t *db, db_lock_type_t type, const void *tag)
1350 {
1351 if (type == DLT_PARENT)
1352 rw_exit(&db->db_parent->db_rwlock);
1353 else if (type == DLT_OBJSET)
1354 rrw_exit(&dmu_objset_ds(db->db_objset)->ds_bp_rwlock, tag);
1355 }
1356
1357 static void
1358 dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
1359 arc_buf_t *buf, void *vdb)
1360 {
1361 (void) zb, (void) bp;
1362 dmu_buf_impl_t *db = vdb;
1363
1364 mutex_enter(&db->db_mtx);
1365 ASSERT3U(db->db_state, ==, DB_READ);
1366 /*
1367 * All reads are synchronous, so we must have a hold on the dbuf
1368 */
1369 ASSERT(zfs_refcount_count(&db->db_holds) > 0);
1370 ASSERT(db->db_buf == NULL);
1371 ASSERT(db->db.db_data == NULL);
1372 if (buf == NULL) {
1373 /* i/o error */
1374 ASSERT(zio == NULL || zio->io_error != 0);
1375 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1376 ASSERT3P(db->db_buf, ==, NULL);
1377 db->db_state = DB_UNCACHED;
1378 DTRACE_SET_STATE(db, "i/o error");
1379 } else if (db->db_level == 0 && db->db_freed_in_flight) {
1380 /* freed in flight */
1381 ASSERT(zio == NULL || zio->io_error == 0);
1382 arc_release(buf, db);
1383 memset(buf->b_data, 0, db->db.db_size);
1384 arc_buf_freeze(buf);
1385 db->db_freed_in_flight = FALSE;
1386 dbuf_set_data(db, buf);
1387 db->db_state = DB_CACHED;
1388 DTRACE_SET_STATE(db, "freed in flight");
1389 } else {
1390 /* success */
1391 ASSERT(zio == NULL || zio->io_error == 0);
1392 dbuf_set_data(db, buf);
1393 db->db_state = DB_CACHED;
1394 DTRACE_SET_STATE(db, "successful read");
1395 }
1396 cv_broadcast(&db->db_changed);
1397 dbuf_rele_and_unlock(db, NULL, B_FALSE);
1398 }
1399
1400 /*
1401 * Shortcut for performing reads on bonus dbufs. Returns
1402 * an error if we fail to verify the dnode associated with
1403 * a decrypted block. Otherwise success.
1404 */
1405 static int
1406 dbuf_read_bonus(dmu_buf_impl_t *db, dnode_t *dn, uint32_t flags)
1407 {
1408 int bonuslen, max_bonuslen, err;
1409
1410 err = dbuf_read_verify_dnode_crypt(db, flags);
1411 if (err)
1412 return (err);
1413
1414 bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
1415 max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1416 ASSERT(MUTEX_HELD(&db->db_mtx));
1417 ASSERT(DB_DNODE_HELD(db));
1418 ASSERT3U(bonuslen, <=, db->db.db_size);
1419 db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
1420 arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
1421 if (bonuslen < max_bonuslen)
1422 memset(db->db.db_data, 0, max_bonuslen);
1423 if (bonuslen)
1424 memcpy(db->db.db_data, DN_BONUS(dn->dn_phys), bonuslen);
1425 db->db_state = DB_CACHED;
1426 DTRACE_SET_STATE(db, "bonus buffer filled");
1427 return (0);
1428 }
1429
1430 static void
1431 dbuf_handle_indirect_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *dbbp)
1432 {
1433 blkptr_t *bps = db->db.db_data;
1434 uint32_t indbs = 1ULL << dn->dn_indblkshift;
1435 int n_bps = indbs >> SPA_BLKPTRSHIFT;
1436
1437 for (int i = 0; i < n_bps; i++) {
1438 blkptr_t *bp = &bps[i];
1439
1440 ASSERT3U(BP_GET_LSIZE(dbbp), ==, indbs);
1441 BP_SET_LSIZE(bp, BP_GET_LEVEL(dbbp) == 1 ?
1442 dn->dn_datablksz : BP_GET_LSIZE(dbbp));
1443 BP_SET_TYPE(bp, BP_GET_TYPE(dbbp));
1444 BP_SET_LEVEL(bp, BP_GET_LEVEL(dbbp) - 1);
1445 BP_SET_BIRTH(bp, dbbp->blk_birth, 0);
1446 }
1447 }
1448
1449 /*
1450 * Handle reads on dbufs that are holes, if necessary. This function
1451 * requires that the dbuf's mutex is held. Returns success (0) if action
1452 * was taken, ENOENT if no action was taken.
1453 */
1454 static int
1455 dbuf_read_hole(dmu_buf_impl_t *db, dnode_t *dn, blkptr_t *bp)
1456 {
1457 ASSERT(MUTEX_HELD(&db->db_mtx));
1458
1459 int is_hole = bp == NULL || BP_IS_HOLE(bp);
1460 /*
1461 * For level 0 blocks only, if the above check fails:
1462 * Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
1463 * processes the delete record and clears the bp while we are waiting
1464 * for the dn_mtx (resulting in a "no" from block_freed).
1465 */
1466 if (!is_hole && db->db_level == 0)
1467 is_hole = dnode_block_freed(dn, db->db_blkid) || BP_IS_HOLE(bp);
1468
1469 if (is_hole) {
1470 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1471 memset(db->db.db_data, 0, db->db.db_size);
1472
1473 if (bp != NULL && db->db_level > 0 && BP_IS_HOLE(bp) &&
1474 bp->blk_birth != 0) {
1475 dbuf_handle_indirect_hole(db, dn, bp);
1476 }
1477 db->db_state = DB_CACHED;
1478 DTRACE_SET_STATE(db, "hole read satisfied");
1479 return (0);
1480 }
1481 return (ENOENT);
1482 }
1483
1484 /*
1485 * This function ensures that, when doing a decrypting read of a block,
1486 * we make sure we have decrypted the dnode associated with it. We must do
1487 * this so that we ensure we are fully authenticating the checksum-of-MACs
1488 * tree from the root of the objset down to this block. Indirect blocks are
1489 * always verified against their secure checksum-of-MACs assuming that the
1490 * dnode containing them is correct. Now that we are doing a decrypting read,
1491 * we can be sure that the key is loaded and verify that assumption. This is
1492 * especially important considering that we always read encrypted dnode
1493 * blocks as raw data (without verifying their MACs) to start, and
1494 * decrypt / authenticate them when we need to read an encrypted bonus buffer.
1495 */
1496 static int
1497 dbuf_read_verify_dnode_crypt(dmu_buf_impl_t *db, uint32_t flags)
1498 {
1499 int err = 0;
1500 objset_t *os = db->db_objset;
1501 arc_buf_t *dnode_abuf;
1502 dnode_t *dn;
1503 zbookmark_phys_t zb;
1504
1505 ASSERT(MUTEX_HELD(&db->db_mtx));
1506
1507 if ((flags & DB_RF_NO_DECRYPT) != 0 ||
1508 !os->os_encrypted || os->os_raw_receive)
1509 return (0);
1510
1511 DB_DNODE_ENTER(db);
1512 dn = DB_DNODE(db);
1513 dnode_abuf = (dn->dn_dbuf != NULL) ? dn->dn_dbuf->db_buf : NULL;
1514
1515 if (dnode_abuf == NULL || !arc_is_encrypted(dnode_abuf)) {
1516 DB_DNODE_EXIT(db);
1517 return (0);
1518 }
1519
1520 SET_BOOKMARK(&zb, dmu_objset_id(os),
1521 DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
1522 err = arc_untransform(dnode_abuf, os->os_spa, &zb, B_TRUE);
1523
1524 /*
1525 * An error code of EACCES tells us that the key is still not
1526 * available. This is ok if we are only reading authenticated
1527 * (and therefore non-encrypted) blocks.
1528 */
1529 if (err == EACCES && ((db->db_blkid != DMU_BONUS_BLKID &&
1530 !DMU_OT_IS_ENCRYPTED(dn->dn_type)) ||
1531 (db->db_blkid == DMU_BONUS_BLKID &&
1532 !DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))))
1533 err = 0;
1534
1535 DB_DNODE_EXIT(db);
1536
1537 return (err);
1538 }
1539
1540 /*
1541 * Drops db_mtx and the parent lock specified by dblt and tag before
1542 * returning.
1543 */
1544 static int
1545 dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags,
1546 db_lock_type_t dblt, const void *tag)
1547 {
1548 dnode_t *dn;
1549 zbookmark_phys_t zb;
1550 uint32_t aflags = ARC_FLAG_NOWAIT;
1551 int err, zio_flags;
1552 blkptr_t bp, *bpp;
1553
1554 DB_DNODE_ENTER(db);
1555 dn = DB_DNODE(db);
1556 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1557 ASSERT(MUTEX_HELD(&db->db_mtx));
1558 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
1559 ASSERT(db->db_buf == NULL);
1560 ASSERT(db->db_parent == NULL ||
1561 RW_LOCK_HELD(&db->db_parent->db_rwlock));
1562
1563 if (db->db_blkid == DMU_BONUS_BLKID) {
1564 err = dbuf_read_bonus(db, dn, flags);
1565 goto early_unlock;
1566 }
1567
1568 if (db->db_state == DB_UNCACHED) {
1569 if (db->db_blkptr == NULL) {
1570 bpp = NULL;
1571 } else {
1572 bp = *db->db_blkptr;
1573 bpp = &bp;
1574 }
1575 } else {
1576 struct dirty_leaf *dl;
1577 dbuf_dirty_record_t *dr;
1578
1579 ASSERT3S(db->db_state, ==, DB_NOFILL);
1580
1581 dr = list_head(&db->db_dirty_records);
1582 if (dr == NULL) {
1583 err = EIO;
1584 goto early_unlock;
1585 } else {
1586 dl = &dr->dt.dl;
1587 if (!dl->dr_brtwrite) {
1588 err = EIO;
1589 goto early_unlock;
1590 }
1591 bp = dl->dr_overridden_by;
1592 bpp = &bp;
1593 }
1594 }
1595
1596 err = dbuf_read_hole(db, dn, bpp);
1597 if (err == 0)
1598 goto early_unlock;
1599
1600 ASSERT(bpp != NULL);
1601
1602 /*
1603 * Any attempt to read a redacted block should result in an error. This
1604 * will never happen under normal conditions, but can be useful for
1605 * debugging purposes.
1606 */
1607 if (BP_IS_REDACTED(bpp)) {
1608 ASSERT(dsl_dataset_feature_is_active(
1609 db->db_objset->os_dsl_dataset,
1610 SPA_FEATURE_REDACTED_DATASETS));
1611 err = SET_ERROR(EIO);
1612 goto early_unlock;
1613 }
1614
1615 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1616 db->db.db_object, db->db_level, db->db_blkid);
1617
1618 /*
1619 * All bps of an encrypted os should have the encryption bit set.
1620 * If this is not true it indicates tampering and we report an error.
1621 */
1622 if (db->db_objset->os_encrypted && !BP_USES_CRYPT(bpp)) {
1623 spa_log_error(db->db_objset->os_spa, &zb,
1624 &db->db_blkptr->blk_birth);
1625 zfs_panic_recover("unencrypted block in encrypted "
1626 "object set %llu", dmu_objset_id(db->db_objset));
1627 err = SET_ERROR(EIO);
1628 goto early_unlock;
1629 }
1630
1631 err = dbuf_read_verify_dnode_crypt(db, flags);
1632 if (err != 0)
1633 goto early_unlock;
1634
1635 DB_DNODE_EXIT(db);
1636
1637 db->db_state = DB_READ;
1638 DTRACE_SET_STATE(db, "read issued");
1639 mutex_exit(&db->db_mtx);
1640
1641 if (!DBUF_IS_CACHEABLE(db))
1642 aflags |= ARC_FLAG_UNCACHED;
1643 else if (dbuf_is_l2cacheable(db))
1644 aflags |= ARC_FLAG_L2CACHE;
1645
1646 dbuf_add_ref(db, NULL);
1647
1648 zio_flags = (flags & DB_RF_CANFAIL) ?
1649 ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
1650
1651 if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
1652 zio_flags |= ZIO_FLAG_RAW;
1653 /*
1654 * The zio layer will copy the provided blkptr later, but we have our
1655 * own copy so that we can release the parent's rwlock. We have to
1656 * do that so that if dbuf_read_done is called synchronously (on
1657 * an l1 cache hit) we don't acquire the db_mtx while holding the
1658 * parent's rwlock, which would be a lock ordering violation.
1659 */
1660 dmu_buf_unlock_parent(db, dblt, tag);
1661 (void) arc_read(zio, db->db_objset->os_spa, bpp,
1662 dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
1663 &aflags, &zb);
1664 return (err);
1665 early_unlock:
1666 DB_DNODE_EXIT(db);
1667 mutex_exit(&db->db_mtx);
1668 dmu_buf_unlock_parent(db, dblt, tag);
1669 return (err);
1670 }
1671
1672 /*
1673 * This is our just-in-time copy function. It makes a copy of buffers that
1674 * have been modified in a previous transaction group before we access them in
1675 * the current active group.
1676 *
1677 * This function is used in three places: when we are dirtying a buffer for the
1678 * first time in a txg, when we are freeing a range in a dnode that includes
1679 * this buffer, and when we are accessing a buffer which was received compressed
1680 * and later referenced in a WRITE_BYREF record.
1681 *
1682 * Note that when we are called from dbuf_free_range() we do not put a hold on
1683 * the buffer, we just traverse the active dbuf list for the dnode.
1684 */
1685 static void
1686 dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
1687 {
1688 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
1689
1690 ASSERT(MUTEX_HELD(&db->db_mtx));
1691 ASSERT(db->db.db_data != NULL);
1692 ASSERT(db->db_level == 0);
1693 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
1694
1695 if (dr == NULL ||
1696 (dr->dt.dl.dr_data !=
1697 ((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
1698 return;
1699
1700 /*
1701 * If the last dirty record for this dbuf has not yet synced
1702 * and its referencing the dbuf data, either:
1703 * reset the reference to point to a new copy,
1704 * or (if there a no active holders)
1705 * just null out the current db_data pointer.
1706 */
1707 ASSERT3U(dr->dr_txg, >=, txg - 2);
1708 if (db->db_blkid == DMU_BONUS_BLKID) {
1709 dnode_t *dn = DB_DNODE(db);
1710 int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
1711 dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
1712 arc_space_consume(bonuslen, ARC_SPACE_BONUS);
1713 memcpy(dr->dt.dl.dr_data, db->db.db_data, bonuslen);
1714 } else if (zfs_refcount_count(&db->db_holds) > db->db_dirtycnt) {
1715 dnode_t *dn = DB_DNODE(db);
1716 int size = arc_buf_size(db->db_buf);
1717 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
1718 spa_t *spa = db->db_objset->os_spa;
1719 enum zio_compress compress_type =
1720 arc_get_compression(db->db_buf);
1721 uint8_t complevel = arc_get_complevel(db->db_buf);
1722
1723 if (arc_is_encrypted(db->db_buf)) {
1724 boolean_t byteorder;
1725 uint8_t salt[ZIO_DATA_SALT_LEN];
1726 uint8_t iv[ZIO_DATA_IV_LEN];
1727 uint8_t mac[ZIO_DATA_MAC_LEN];
1728
1729 arc_get_raw_params(db->db_buf, &byteorder, salt,
1730 iv, mac);
1731 dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
1732 dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
1733 mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
1734 compress_type, complevel);
1735 } else if (compress_type != ZIO_COMPRESS_OFF) {
1736 ASSERT3U(type, ==, ARC_BUFC_DATA);
1737 dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
1738 size, arc_buf_lsize(db->db_buf), compress_type,
1739 complevel);
1740 } else {
1741 dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
1742 }
1743 memcpy(dr->dt.dl.dr_data->b_data, db->db.db_data, size);
1744 } else {
1745 db->db_buf = NULL;
1746 dbuf_clear_data(db);
1747 }
1748 }
1749
1750 int
1751 dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
1752 {
1753 int err = 0;
1754 boolean_t prefetch;
1755 dnode_t *dn;
1756
1757 /*
1758 * We don't have to hold the mutex to check db_state because it
1759 * can't be freed while we have a hold on the buffer.
1760 */
1761 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1762
1763 DB_DNODE_ENTER(db);
1764 dn = DB_DNODE(db);
1765
1766 prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
1767 (flags & DB_RF_NOPREFETCH) == 0 && dn != NULL;
1768
1769 mutex_enter(&db->db_mtx);
1770 if (flags & DB_RF_PARTIAL_FIRST)
1771 db->db_partial_read = B_TRUE;
1772 else if (!(flags & DB_RF_PARTIAL_MORE))
1773 db->db_partial_read = B_FALSE;
1774 if (db->db_state == DB_CACHED) {
1775 /*
1776 * Ensure that this block's dnode has been decrypted if
1777 * the caller has requested decrypted data.
1778 */
1779 err = dbuf_read_verify_dnode_crypt(db, flags);
1780
1781 /*
1782 * If the arc buf is compressed or encrypted and the caller
1783 * requested uncompressed data, we need to untransform it
1784 * before returning. We also call arc_untransform() on any
1785 * unauthenticated blocks, which will verify their MAC if
1786 * the key is now available.
1787 */
1788 if (err == 0 && db->db_buf != NULL &&
1789 (flags & DB_RF_NO_DECRYPT) == 0 &&
1790 (arc_is_encrypted(db->db_buf) ||
1791 arc_is_unauthenticated(db->db_buf) ||
1792 arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
1793 spa_t *spa = dn->dn_objset->os_spa;
1794 zbookmark_phys_t zb;
1795
1796 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
1797 db->db.db_object, db->db_level, db->db_blkid);
1798 dbuf_fix_old_data(db, spa_syncing_txg(spa));
1799 err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
1800 dbuf_set_data(db, db->db_buf);
1801 }
1802 mutex_exit(&db->db_mtx);
1803 if (err == 0 && prefetch) {
1804 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1805 B_FALSE, flags & DB_RF_HAVESTRUCT);
1806 }
1807 DB_DNODE_EXIT(db);
1808 DBUF_STAT_BUMP(hash_hits);
1809 } else if (db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL) {
1810 boolean_t need_wait = B_FALSE;
1811
1812 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
1813
1814 if (zio == NULL && (db->db_state == DB_NOFILL ||
1815 (db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)))) {
1816 spa_t *spa = dn->dn_objset->os_spa;
1817 zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
1818 need_wait = B_TRUE;
1819 }
1820 err = dbuf_read_impl(db, zio, flags, dblt, FTAG);
1821 /*
1822 * dbuf_read_impl has dropped db_mtx and our parent's rwlock
1823 * for us
1824 */
1825 if (!err && prefetch) {
1826 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1827 db->db_state != DB_CACHED,
1828 flags & DB_RF_HAVESTRUCT);
1829 }
1830
1831 DB_DNODE_EXIT(db);
1832 DBUF_STAT_BUMP(hash_misses);
1833
1834 /*
1835 * If we created a zio_root we must execute it to avoid
1836 * leaking it, even if it isn't attached to any work due
1837 * to an error in dbuf_read_impl().
1838 */
1839 if (need_wait) {
1840 if (err == 0)
1841 err = zio_wait(zio);
1842 else
1843 VERIFY0(zio_wait(zio));
1844 }
1845 } else {
1846 /*
1847 * Another reader came in while the dbuf was in flight
1848 * between UNCACHED and CACHED. Either a writer will finish
1849 * writing the buffer (sending the dbuf to CACHED) or the
1850 * first reader's request will reach the read_done callback
1851 * and send the dbuf to CACHED. Otherwise, a failure
1852 * occurred and the dbuf went to UNCACHED.
1853 */
1854 mutex_exit(&db->db_mtx);
1855 if (prefetch) {
1856 dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE,
1857 B_TRUE, flags & DB_RF_HAVESTRUCT);
1858 }
1859 DB_DNODE_EXIT(db);
1860 DBUF_STAT_BUMP(hash_misses);
1861
1862 /* Skip the wait per the caller's request. */
1863 if ((flags & DB_RF_NEVERWAIT) == 0) {
1864 mutex_enter(&db->db_mtx);
1865 while (db->db_state == DB_READ ||
1866 db->db_state == DB_FILL) {
1867 ASSERT(db->db_state == DB_READ ||
1868 (flags & DB_RF_HAVESTRUCT) == 0);
1869 DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
1870 db, zio_t *, zio);
1871 cv_wait(&db->db_changed, &db->db_mtx);
1872 }
1873 if (db->db_state == DB_UNCACHED)
1874 err = SET_ERROR(EIO);
1875 mutex_exit(&db->db_mtx);
1876 }
1877 }
1878
1879 return (err);
1880 }
1881
1882 static void
1883 dbuf_noread(dmu_buf_impl_t *db)
1884 {
1885 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
1886 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1887 mutex_enter(&db->db_mtx);
1888 while (db->db_state == DB_READ || db->db_state == DB_FILL)
1889 cv_wait(&db->db_changed, &db->db_mtx);
1890 if (db->db_state == DB_UNCACHED) {
1891 ASSERT(db->db_buf == NULL);
1892 ASSERT(db->db.db_data == NULL);
1893 dbuf_set_data(db, dbuf_alloc_arcbuf(db));
1894 db->db_state = DB_FILL;
1895 DTRACE_SET_STATE(db, "assigning filled buffer");
1896 } else if (db->db_state == DB_NOFILL) {
1897 dbuf_clear_data(db);
1898 } else {
1899 ASSERT3U(db->db_state, ==, DB_CACHED);
1900 }
1901 mutex_exit(&db->db_mtx);
1902 }
1903
1904 void
1905 dbuf_unoverride(dbuf_dirty_record_t *dr)
1906 {
1907 dmu_buf_impl_t *db = dr->dr_dbuf;
1908 blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
1909 uint64_t txg = dr->dr_txg;
1910
1911 ASSERT(MUTEX_HELD(&db->db_mtx));
1912 /*
1913 * This assert is valid because dmu_sync() expects to be called by
1914 * a zilog's get_data while holding a range lock. This call only
1915 * comes from dbuf_dirty() callers who must also hold a range lock.
1916 */
1917 ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
1918 ASSERT(db->db_level == 0);
1919
1920 if (db->db_blkid == DMU_BONUS_BLKID ||
1921 dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
1922 return;
1923
1924 ASSERT(db->db_data_pending != dr);
1925
1926 /* free this block */
1927 if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
1928 zio_free(db->db_objset->os_spa, txg, bp);
1929
1930 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
1931 dr->dt.dl.dr_nopwrite = B_FALSE;
1932 dr->dt.dl.dr_has_raw_params = B_FALSE;
1933
1934 /*
1935 * Release the already-written buffer, so we leave it in
1936 * a consistent dirty state. Note that all callers are
1937 * modifying the buffer, so they will immediately do
1938 * another (redundant) arc_release(). Therefore, leave
1939 * the buf thawed to save the effort of freezing &
1940 * immediately re-thawing it.
1941 */
1942 if (!dr->dt.dl.dr_brtwrite)
1943 arc_release(dr->dt.dl.dr_data, db);
1944 }
1945
1946 /*
1947 * Evict (if its unreferenced) or clear (if its referenced) any level-0
1948 * data blocks in the free range, so that any future readers will find
1949 * empty blocks.
1950 */
1951 void
1952 dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
1953 dmu_tx_t *tx)
1954 {
1955 dmu_buf_impl_t *db_search;
1956 dmu_buf_impl_t *db, *db_next;
1957 uint64_t txg = tx->tx_txg;
1958 avl_index_t where;
1959 dbuf_dirty_record_t *dr;
1960
1961 if (end_blkid > dn->dn_maxblkid &&
1962 !(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
1963 end_blkid = dn->dn_maxblkid;
1964 dprintf_dnode(dn, "start=%llu end=%llu\n", (u_longlong_t)start_blkid,
1965 (u_longlong_t)end_blkid);
1966
1967 db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
1968 db_search->db_level = 0;
1969 db_search->db_blkid = start_blkid;
1970 db_search->db_state = DB_SEARCH;
1971
1972 mutex_enter(&dn->dn_dbufs_mtx);
1973 db = avl_find(&dn->dn_dbufs, db_search, &where);
1974 ASSERT3P(db, ==, NULL);
1975
1976 db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
1977
1978 for (; db != NULL; db = db_next) {
1979 db_next = AVL_NEXT(&dn->dn_dbufs, db);
1980 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
1981
1982 if (db->db_level != 0 || db->db_blkid > end_blkid) {
1983 break;
1984 }
1985 ASSERT3U(db->db_blkid, >=, start_blkid);
1986
1987 /* found a level 0 buffer in the range */
1988 mutex_enter(&db->db_mtx);
1989 if (dbuf_undirty(db, tx)) {
1990 /* mutex has been dropped and dbuf destroyed */
1991 continue;
1992 }
1993
1994 if (db->db_state == DB_UNCACHED ||
1995 db->db_state == DB_NOFILL ||
1996 db->db_state == DB_EVICTING) {
1997 ASSERT(db->db.db_data == NULL);
1998 mutex_exit(&db->db_mtx);
1999 continue;
2000 }
2001 if (db->db_state == DB_READ || db->db_state == DB_FILL) {
2002 /* will be handled in dbuf_read_done or dbuf_rele */
2003 db->db_freed_in_flight = TRUE;
2004 mutex_exit(&db->db_mtx);
2005 continue;
2006 }
2007 if (zfs_refcount_count(&db->db_holds) == 0) {
2008 ASSERT(db->db_buf);
2009 dbuf_destroy(db);
2010 continue;
2011 }
2012 /* The dbuf is referenced */
2013
2014 dr = list_head(&db->db_dirty_records);
2015 if (dr != NULL) {
2016 if (dr->dr_txg == txg) {
2017 /*
2018 * This buffer is "in-use", re-adjust the file
2019 * size to reflect that this buffer may
2020 * contain new data when we sync.
2021 */
2022 if (db->db_blkid != DMU_SPILL_BLKID &&
2023 db->db_blkid > dn->dn_maxblkid)
2024 dn->dn_maxblkid = db->db_blkid;
2025 dbuf_unoverride(dr);
2026 if (dr->dt.dl.dr_brtwrite) {
2027 ASSERT(db->db.db_data == NULL);
2028 mutex_exit(&db->db_mtx);
2029 continue;
2030 }
2031 } else {
2032 /*
2033 * This dbuf is not dirty in the open context.
2034 * Either uncache it (if its not referenced in
2035 * the open context) or reset its contents to
2036 * empty.
2037 */
2038 dbuf_fix_old_data(db, txg);
2039 }
2040 }
2041 /* clear the contents if its cached */
2042 if (db->db_state == DB_CACHED) {
2043 ASSERT(db->db.db_data != NULL);
2044 arc_release(db->db_buf, db);
2045 rw_enter(&db->db_rwlock, RW_WRITER);
2046 memset(db->db.db_data, 0, db->db.db_size);
2047 rw_exit(&db->db_rwlock);
2048 arc_buf_freeze(db->db_buf);
2049 }
2050
2051 mutex_exit(&db->db_mtx);
2052 }
2053
2054 mutex_exit(&dn->dn_dbufs_mtx);
2055 kmem_free(db_search, sizeof (dmu_buf_impl_t));
2056 }
2057
2058 void
2059 dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
2060 {
2061 arc_buf_t *buf, *old_buf;
2062 dbuf_dirty_record_t *dr;
2063 int osize = db->db.db_size;
2064 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
2065 dnode_t *dn;
2066
2067 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2068
2069 DB_DNODE_ENTER(db);
2070 dn = DB_DNODE(db);
2071
2072 /*
2073 * XXX we should be doing a dbuf_read, checking the return
2074 * value and returning that up to our callers
2075 */
2076 dmu_buf_will_dirty(&db->db, tx);
2077
2078 /* create the data buffer for the new block */
2079 buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
2080
2081 /* copy old block data to the new block */
2082 old_buf = db->db_buf;
2083 memcpy(buf->b_data, old_buf->b_data, MIN(osize, size));
2084 /* zero the remainder */
2085 if (size > osize)
2086 memset((uint8_t *)buf->b_data + osize, 0, size - osize);
2087
2088 mutex_enter(&db->db_mtx);
2089 dbuf_set_data(db, buf);
2090 arc_buf_destroy(old_buf, db);
2091 db->db.db_size = size;
2092
2093 dr = list_head(&db->db_dirty_records);
2094 /* dirty record added by dmu_buf_will_dirty() */
2095 VERIFY(dr != NULL);
2096 if (db->db_level == 0)
2097 dr->dt.dl.dr_data = buf;
2098 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2099 ASSERT3U(dr->dr_accounted, ==, osize);
2100 dr->dr_accounted = size;
2101 mutex_exit(&db->db_mtx);
2102
2103 dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
2104 DB_DNODE_EXIT(db);
2105 }
2106
2107 void
2108 dbuf_release_bp(dmu_buf_impl_t *db)
2109 {
2110 objset_t *os __maybe_unused = db->db_objset;
2111
2112 ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
2113 ASSERT(arc_released(os->os_phys_buf) ||
2114 list_link_active(&os->os_dsl_dataset->ds_synced_link));
2115 ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
2116
2117 (void) arc_release(db->db_buf, db);
2118 }
2119
2120 /*
2121 * We already have a dirty record for this TXG, and we are being
2122 * dirtied again.
2123 */
2124 static void
2125 dbuf_redirty(dbuf_dirty_record_t *dr)
2126 {
2127 dmu_buf_impl_t *db = dr->dr_dbuf;
2128
2129 ASSERT(MUTEX_HELD(&db->db_mtx));
2130
2131 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
2132 /*
2133 * If this buffer has already been written out,
2134 * we now need to reset its state.
2135 */
2136 dbuf_unoverride(dr);
2137 if (db->db.db_object != DMU_META_DNODE_OBJECT &&
2138 db->db_state != DB_NOFILL) {
2139 /* Already released on initial dirty, so just thaw. */
2140 ASSERT(arc_released(db->db_buf));
2141 arc_buf_thaw(db->db_buf);
2142 }
2143 }
2144 }
2145
2146 dbuf_dirty_record_t *
2147 dbuf_dirty_lightweight(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx)
2148 {
2149 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2150 IMPLY(dn->dn_objset->os_raw_receive, dn->dn_maxblkid >= blkid);
2151 dnode_new_blkid(dn, blkid, tx, B_TRUE, B_FALSE);
2152 ASSERT(dn->dn_maxblkid >= blkid);
2153
2154 dbuf_dirty_record_t *dr = kmem_zalloc(sizeof (*dr), KM_SLEEP);
2155 list_link_init(&dr->dr_dirty_node);
2156 list_link_init(&dr->dr_dbuf_node);
2157 dr->dr_dnode = dn;
2158 dr->dr_txg = tx->tx_txg;
2159 dr->dt.dll.dr_blkid = blkid;
2160 dr->dr_accounted = dn->dn_datablksz;
2161
2162 /*
2163 * There should not be any dbuf for the block that we're dirtying.
2164 * Otherwise the buffer contents could be inconsistent between the
2165 * dbuf and the lightweight dirty record.
2166 */
2167 ASSERT3P(NULL, ==, dbuf_find(dn->dn_objset, dn->dn_object, 0, blkid,
2168 NULL));
2169
2170 mutex_enter(&dn->dn_mtx);
2171 int txgoff = tx->tx_txg & TXG_MASK;
2172 if (dn->dn_free_ranges[txgoff] != NULL) {
2173 range_tree_clear(dn->dn_free_ranges[txgoff], blkid, 1);
2174 }
2175
2176 if (dn->dn_nlevels == 1) {
2177 ASSERT3U(blkid, <, dn->dn_nblkptr);
2178 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2179 mutex_exit(&dn->dn_mtx);
2180 rw_exit(&dn->dn_struct_rwlock);
2181 dnode_setdirty(dn, tx);
2182 } else {
2183 mutex_exit(&dn->dn_mtx);
2184
2185 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2186 dmu_buf_impl_t *parent_db = dbuf_hold_level(dn,
2187 1, blkid >> epbs, FTAG);
2188 rw_exit(&dn->dn_struct_rwlock);
2189 if (parent_db == NULL) {
2190 kmem_free(dr, sizeof (*dr));
2191 return (NULL);
2192 }
2193 int err = dbuf_read(parent_db, NULL,
2194 (DB_RF_NOPREFETCH | DB_RF_CANFAIL));
2195 if (err != 0) {
2196 dbuf_rele(parent_db, FTAG);
2197 kmem_free(dr, sizeof (*dr));
2198 return (NULL);
2199 }
2200
2201 dbuf_dirty_record_t *parent_dr = dbuf_dirty(parent_db, tx);
2202 dbuf_rele(parent_db, FTAG);
2203 mutex_enter(&parent_dr->dt.di.dr_mtx);
2204 ASSERT3U(parent_dr->dr_txg, ==, tx->tx_txg);
2205 list_insert_tail(&parent_dr->dt.di.dr_children, dr);
2206 mutex_exit(&parent_dr->dt.di.dr_mtx);
2207 dr->dr_parent = parent_dr;
2208 }
2209
2210 dmu_objset_willuse_space(dn->dn_objset, dr->dr_accounted, tx);
2211
2212 return (dr);
2213 }
2214
2215 dbuf_dirty_record_t *
2216 dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2217 {
2218 dnode_t *dn;
2219 objset_t *os;
2220 dbuf_dirty_record_t *dr, *dr_next, *dr_head;
2221 int txgoff = tx->tx_txg & TXG_MASK;
2222 boolean_t drop_struct_rwlock = B_FALSE;
2223
2224 ASSERT(tx->tx_txg != 0);
2225 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2226 DMU_TX_DIRTY_BUF(tx, db);
2227
2228 DB_DNODE_ENTER(db);
2229 dn = DB_DNODE(db);
2230 /*
2231 * Shouldn't dirty a regular buffer in syncing context. Private
2232 * objects may be dirtied in syncing context, but only if they
2233 * were already pre-dirtied in open context.
2234 */
2235 #ifdef ZFS_DEBUG
2236 if (dn->dn_objset->os_dsl_dataset != NULL) {
2237 rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
2238 RW_READER, FTAG);
2239 }
2240 ASSERT(!dmu_tx_is_syncing(tx) ||
2241 BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
2242 DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2243 dn->dn_objset->os_dsl_dataset == NULL);
2244 if (dn->dn_objset->os_dsl_dataset != NULL)
2245 rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
2246 #endif
2247 /*
2248 * We make this assert for private objects as well, but after we
2249 * check if we're already dirty. They are allowed to re-dirty
2250 * in syncing context.
2251 */
2252 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
2253 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2254 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2255
2256 mutex_enter(&db->db_mtx);
2257 /*
2258 * XXX make this true for indirects too? The problem is that
2259 * transactions created with dmu_tx_create_assigned() from
2260 * syncing context don't bother holding ahead.
2261 */
2262 ASSERT(db->db_level != 0 ||
2263 db->db_state == DB_CACHED || db->db_state == DB_FILL ||
2264 db->db_state == DB_NOFILL);
2265
2266 mutex_enter(&dn->dn_mtx);
2267 dnode_set_dirtyctx(dn, tx, db);
2268 if (tx->tx_txg > dn->dn_dirty_txg)
2269 dn->dn_dirty_txg = tx->tx_txg;
2270 mutex_exit(&dn->dn_mtx);
2271
2272 if (db->db_blkid == DMU_SPILL_BLKID)
2273 dn->dn_have_spill = B_TRUE;
2274
2275 /*
2276 * If this buffer is already dirty, we're done.
2277 */
2278 dr_head = list_head(&db->db_dirty_records);
2279 ASSERT(dr_head == NULL || dr_head->dr_txg <= tx->tx_txg ||
2280 db->db.db_object == DMU_META_DNODE_OBJECT);
2281 dr_next = dbuf_find_dirty_lte(db, tx->tx_txg);
2282 if (dr_next && dr_next->dr_txg == tx->tx_txg) {
2283 DB_DNODE_EXIT(db);
2284
2285 dbuf_redirty(dr_next);
2286 mutex_exit(&db->db_mtx);
2287 return (dr_next);
2288 }
2289
2290 /*
2291 * Only valid if not already dirty.
2292 */
2293 ASSERT(dn->dn_object == 0 ||
2294 dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
2295 (dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
2296
2297 ASSERT3U(dn->dn_nlevels, >, db->db_level);
2298
2299 /*
2300 * We should only be dirtying in syncing context if it's the
2301 * mos or we're initializing the os or it's a special object.
2302 * However, we are allowed to dirty in syncing context provided
2303 * we already dirtied it in open context. Hence we must make
2304 * this assertion only if we're not already dirty.
2305 */
2306 os = dn->dn_objset;
2307 VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
2308 #ifdef ZFS_DEBUG
2309 if (dn->dn_objset->os_dsl_dataset != NULL)
2310 rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
2311 ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
2312 os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
2313 if (dn->dn_objset->os_dsl_dataset != NULL)
2314 rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
2315 #endif
2316 ASSERT(db->db.db_size != 0);
2317
2318 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2319
2320 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2321 dmu_objset_willuse_space(os, db->db.db_size, tx);
2322 }
2323
2324 /*
2325 * If this buffer is dirty in an old transaction group we need
2326 * to make a copy of it so that the changes we make in this
2327 * transaction group won't leak out when we sync the older txg.
2328 */
2329 dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
2330 list_link_init(&dr->dr_dirty_node);
2331 list_link_init(&dr->dr_dbuf_node);
2332 dr->dr_dnode = dn;
2333 if (db->db_level == 0) {
2334 void *data_old = db->db_buf;
2335
2336 if (db->db_state != DB_NOFILL) {
2337 if (db->db_blkid == DMU_BONUS_BLKID) {
2338 dbuf_fix_old_data(db, tx->tx_txg);
2339 data_old = db->db.db_data;
2340 } else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
2341 /*
2342 * Release the data buffer from the cache so
2343 * that we can modify it without impacting
2344 * possible other users of this cached data
2345 * block. Note that indirect blocks and
2346 * private objects are not released until the
2347 * syncing state (since they are only modified
2348 * then).
2349 */
2350 arc_release(db->db_buf, db);
2351 dbuf_fix_old_data(db, tx->tx_txg);
2352 data_old = db->db_buf;
2353 }
2354 ASSERT(data_old != NULL);
2355 }
2356 dr->dt.dl.dr_data = data_old;
2357 } else {
2358 mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
2359 list_create(&dr->dt.di.dr_children,
2360 sizeof (dbuf_dirty_record_t),
2361 offsetof(dbuf_dirty_record_t, dr_dirty_node));
2362 }
2363 if (db->db_blkid != DMU_BONUS_BLKID && db->db_state != DB_NOFILL) {
2364 dr->dr_accounted = db->db.db_size;
2365 }
2366 dr->dr_dbuf = db;
2367 dr->dr_txg = tx->tx_txg;
2368 list_insert_before(&db->db_dirty_records, dr_next, dr);
2369
2370 /*
2371 * We could have been freed_in_flight between the dbuf_noread
2372 * and dbuf_dirty. We win, as though the dbuf_noread() had
2373 * happened after the free.
2374 */
2375 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
2376 db->db_blkid != DMU_SPILL_BLKID) {
2377 mutex_enter(&dn->dn_mtx);
2378 if (dn->dn_free_ranges[txgoff] != NULL) {
2379 range_tree_clear(dn->dn_free_ranges[txgoff],
2380 db->db_blkid, 1);
2381 }
2382 mutex_exit(&dn->dn_mtx);
2383 db->db_freed_in_flight = FALSE;
2384 }
2385
2386 /*
2387 * This buffer is now part of this txg
2388 */
2389 dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
2390 db->db_dirtycnt += 1;
2391 ASSERT3U(db->db_dirtycnt, <=, 3);
2392
2393 mutex_exit(&db->db_mtx);
2394
2395 if (db->db_blkid == DMU_BONUS_BLKID ||
2396 db->db_blkid == DMU_SPILL_BLKID) {
2397 mutex_enter(&dn->dn_mtx);
2398 ASSERT(!list_link_active(&dr->dr_dirty_node));
2399 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2400 mutex_exit(&dn->dn_mtx);
2401 dnode_setdirty(dn, tx);
2402 DB_DNODE_EXIT(db);
2403 return (dr);
2404 }
2405
2406 if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
2407 rw_enter(&dn->dn_struct_rwlock, RW_READER);
2408 drop_struct_rwlock = B_TRUE;
2409 }
2410
2411 /*
2412 * If we are overwriting a dedup BP, then unless it is snapshotted,
2413 * when we get to syncing context we will need to decrement its
2414 * refcount in the DDT. Prefetch the relevant DDT block so that
2415 * syncing context won't have to wait for the i/o.
2416 */
2417 if (db->db_blkptr != NULL) {
2418 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_READER, FTAG);
2419 ddt_prefetch(os->os_spa, db->db_blkptr);
2420 dmu_buf_unlock_parent(db, dblt, FTAG);
2421 }
2422
2423 /*
2424 * We need to hold the dn_struct_rwlock to make this assertion,
2425 * because it protects dn_phys / dn_next_nlevels from changing.
2426 */
2427 ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
2428 dn->dn_phys->dn_nlevels > db->db_level ||
2429 dn->dn_next_nlevels[txgoff] > db->db_level ||
2430 dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
2431 dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
2432
2433
2434 if (db->db_level == 0) {
2435 ASSERT(!db->db_objset->os_raw_receive ||
2436 dn->dn_maxblkid >= db->db_blkid);
2437 dnode_new_blkid(dn, db->db_blkid, tx,
2438 drop_struct_rwlock, B_FALSE);
2439 ASSERT(dn->dn_maxblkid >= db->db_blkid);
2440 }
2441
2442 if (db->db_level+1 < dn->dn_nlevels) {
2443 dmu_buf_impl_t *parent = db->db_parent;
2444 dbuf_dirty_record_t *di;
2445 int parent_held = FALSE;
2446
2447 if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
2448 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
2449 parent = dbuf_hold_level(dn, db->db_level + 1,
2450 db->db_blkid >> epbs, FTAG);
2451 ASSERT(parent != NULL);
2452 parent_held = TRUE;
2453 }
2454 if (drop_struct_rwlock)
2455 rw_exit(&dn->dn_struct_rwlock);
2456 ASSERT3U(db->db_level + 1, ==, parent->db_level);
2457 di = dbuf_dirty(parent, tx);
2458 if (parent_held)
2459 dbuf_rele(parent, FTAG);
2460
2461 mutex_enter(&db->db_mtx);
2462 /*
2463 * Since we've dropped the mutex, it's possible that
2464 * dbuf_undirty() might have changed this out from under us.
2465 */
2466 if (list_head(&db->db_dirty_records) == dr ||
2467 dn->dn_object == DMU_META_DNODE_OBJECT) {
2468 mutex_enter(&di->dt.di.dr_mtx);
2469 ASSERT3U(di->dr_txg, ==, tx->tx_txg);
2470 ASSERT(!list_link_active(&dr->dr_dirty_node));
2471 list_insert_tail(&di->dt.di.dr_children, dr);
2472 mutex_exit(&di->dt.di.dr_mtx);
2473 dr->dr_parent = di;
2474 }
2475 mutex_exit(&db->db_mtx);
2476 } else {
2477 ASSERT(db->db_level + 1 == dn->dn_nlevels);
2478 ASSERT(db->db_blkid < dn->dn_nblkptr);
2479 ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
2480 mutex_enter(&dn->dn_mtx);
2481 ASSERT(!list_link_active(&dr->dr_dirty_node));
2482 list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
2483 mutex_exit(&dn->dn_mtx);
2484 if (drop_struct_rwlock)
2485 rw_exit(&dn->dn_struct_rwlock);
2486 }
2487
2488 dnode_setdirty(dn, tx);
2489 DB_DNODE_EXIT(db);
2490 return (dr);
2491 }
2492
2493 static void
2494 dbuf_undirty_bonus(dbuf_dirty_record_t *dr)
2495 {
2496 dmu_buf_impl_t *db = dr->dr_dbuf;
2497
2498 if (dr->dt.dl.dr_data != db->db.db_data) {
2499 struct dnode *dn = dr->dr_dnode;
2500 int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
2501
2502 kmem_free(dr->dt.dl.dr_data, max_bonuslen);
2503 arc_space_return(max_bonuslen, ARC_SPACE_BONUS);
2504 }
2505 db->db_data_pending = NULL;
2506 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
2507 list_remove(&db->db_dirty_records, dr);
2508 if (dr->dr_dbuf->db_level != 0) {
2509 mutex_destroy(&dr->dt.di.dr_mtx);
2510 list_destroy(&dr->dt.di.dr_children);
2511 }
2512 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2513 ASSERT3U(db->db_dirtycnt, >, 0);
2514 db->db_dirtycnt -= 1;
2515 }
2516
2517 /*
2518 * Undirty a buffer in the transaction group referenced by the given
2519 * transaction. Return whether this evicted the dbuf.
2520 */
2521 boolean_t
2522 dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
2523 {
2524 uint64_t txg = tx->tx_txg;
2525 boolean_t brtwrite;
2526
2527 ASSERT(txg != 0);
2528
2529 /*
2530 * Due to our use of dn_nlevels below, this can only be called
2531 * in open context, unless we are operating on the MOS.
2532 * From syncing context, dn_nlevels may be different from the
2533 * dn_nlevels used when dbuf was dirtied.
2534 */
2535 ASSERT(db->db_objset ==
2536 dmu_objset_pool(db->db_objset)->dp_meta_objset ||
2537 txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
2538 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2539 ASSERT0(db->db_level);
2540 ASSERT(MUTEX_HELD(&db->db_mtx));
2541
2542 /*
2543 * If this buffer is not dirty, we're done.
2544 */
2545 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, txg);
2546 if (dr == NULL)
2547 return (B_FALSE);
2548 ASSERT(dr->dr_dbuf == db);
2549
2550 brtwrite = dr->dt.dl.dr_brtwrite;
2551 if (brtwrite) {
2552 /*
2553 * We are freeing a block that we cloned in the same
2554 * transaction group.
2555 */
2556 brt_pending_remove(dmu_objset_spa(db->db_objset),
2557 &dr->dt.dl.dr_overridden_by, tx);
2558 }
2559
2560 dnode_t *dn = dr->dr_dnode;
2561
2562 dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
2563
2564 ASSERT(db->db.db_size != 0);
2565
2566 dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
2567 dr->dr_accounted, txg);
2568
2569 list_remove(&db->db_dirty_records, dr);
2570
2571 /*
2572 * Note that there are three places in dbuf_dirty()
2573 * where this dirty record may be put on a list.
2574 * Make sure to do a list_remove corresponding to
2575 * every one of those list_insert calls.
2576 */
2577 if (dr->dr_parent) {
2578 mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
2579 list_remove(&dr->dr_parent->dt.di.dr_children, dr);
2580 mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
2581 } else if (db->db_blkid == DMU_SPILL_BLKID ||
2582 db->db_level + 1 == dn->dn_nlevels) {
2583 ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
2584 mutex_enter(&dn->dn_mtx);
2585 list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
2586 mutex_exit(&dn->dn_mtx);
2587 }
2588
2589 if (db->db_state != DB_NOFILL && !brtwrite) {
2590 dbuf_unoverride(dr);
2591
2592 ASSERT(db->db_buf != NULL);
2593 ASSERT(dr->dt.dl.dr_data != NULL);
2594 if (dr->dt.dl.dr_data != db->db_buf)
2595 arc_buf_destroy(dr->dt.dl.dr_data, db);
2596 }
2597
2598 kmem_free(dr, sizeof (dbuf_dirty_record_t));
2599
2600 ASSERT(db->db_dirtycnt > 0);
2601 db->db_dirtycnt -= 1;
2602
2603 if (zfs_refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
2604 ASSERT(db->db_state == DB_NOFILL || brtwrite ||
2605 arc_released(db->db_buf));
2606 dbuf_destroy(db);
2607 return (B_TRUE);
2608 }
2609
2610 return (B_FALSE);
2611 }
2612
2613 static void
2614 dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
2615 {
2616 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2617
2618 ASSERT(tx->tx_txg != 0);
2619 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2620
2621 /*
2622 * Quick check for dirtiness. For already dirty blocks, this
2623 * reduces runtime of this function by >90%, and overall performance
2624 * by 50% for some workloads (e.g. file deletion with indirect blocks
2625 * cached).
2626 */
2627 mutex_enter(&db->db_mtx);
2628
2629 if (db->db_state == DB_CACHED) {
2630 dbuf_dirty_record_t *dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2631 /*
2632 * It's possible that it is already dirty but not cached,
2633 * because there are some calls to dbuf_dirty() that don't
2634 * go through dmu_buf_will_dirty().
2635 */
2636 if (dr != NULL) {
2637 /* This dbuf is already dirty and cached. */
2638 dbuf_redirty(dr);
2639 mutex_exit(&db->db_mtx);
2640 return;
2641 }
2642 }
2643 mutex_exit(&db->db_mtx);
2644
2645 DB_DNODE_ENTER(db);
2646 if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
2647 flags |= DB_RF_HAVESTRUCT;
2648 DB_DNODE_EXIT(db);
2649 (void) dbuf_read(db, NULL, flags);
2650 (void) dbuf_dirty(db, tx);
2651 }
2652
2653 void
2654 dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2655 {
2656 dmu_buf_will_dirty_impl(db_fake,
2657 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
2658 }
2659
2660 boolean_t
2661 dmu_buf_is_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
2662 {
2663 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2664 dbuf_dirty_record_t *dr;
2665
2666 mutex_enter(&db->db_mtx);
2667 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2668 mutex_exit(&db->db_mtx);
2669 return (dr != NULL);
2670 }
2671
2672 void
2673 dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2674 {
2675 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2676
2677 db->db_state = DB_NOFILL;
2678 DTRACE_SET_STATE(db, "allocating NOFILL buffer");
2679 dmu_buf_will_fill(db_fake, tx);
2680 }
2681
2682 void
2683 dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
2684 {
2685 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2686
2687 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2688 ASSERT(tx->tx_txg != 0);
2689 ASSERT(db->db_level == 0);
2690 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2691
2692 ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
2693 dmu_tx_private_ok(tx));
2694
2695 dbuf_noread(db);
2696 (void) dbuf_dirty(db, tx);
2697 }
2698
2699 /*
2700 * This function is effectively the same as dmu_buf_will_dirty(), but
2701 * indicates the caller expects raw encrypted data in the db, and provides
2702 * the crypt params (byteorder, salt, iv, mac) which should be stored in the
2703 * blkptr_t when this dbuf is written. This is only used for blocks of
2704 * dnodes, during raw receive.
2705 */
2706 void
2707 dmu_buf_set_crypt_params(dmu_buf_t *db_fake, boolean_t byteorder,
2708 const uint8_t *salt, const uint8_t *iv, const uint8_t *mac, dmu_tx_t *tx)
2709 {
2710 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
2711 dbuf_dirty_record_t *dr;
2712
2713 /*
2714 * dr_has_raw_params is only processed for blocks of dnodes
2715 * (see dbuf_sync_dnode_leaf_crypt()).
2716 */
2717 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
2718 ASSERT3U(db->db_level, ==, 0);
2719 ASSERT(db->db_objset->os_raw_receive);
2720
2721 dmu_buf_will_dirty_impl(db_fake,
2722 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
2723
2724 dr = dbuf_find_dirty_eq(db, tx->tx_txg);
2725
2726 ASSERT3P(dr, !=, NULL);
2727
2728 dr->dt.dl.dr_has_raw_params = B_TRUE;
2729 dr->dt.dl.dr_byteorder = byteorder;
2730 memcpy(dr->dt.dl.dr_salt, salt, ZIO_DATA_SALT_LEN);
2731 memcpy(dr->dt.dl.dr_iv, iv, ZIO_DATA_IV_LEN);
2732 memcpy(dr->dt.dl.dr_mac, mac, ZIO_DATA_MAC_LEN);
2733 }
2734
2735 static void
2736 dbuf_override_impl(dmu_buf_impl_t *db, const blkptr_t *bp, dmu_tx_t *tx)
2737 {
2738 struct dirty_leaf *dl;
2739 dbuf_dirty_record_t *dr;
2740
2741 dr = list_head(&db->db_dirty_records);
2742 ASSERT3P(dr, !=, NULL);
2743 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2744 dl = &dr->dt.dl;
2745 dl->dr_overridden_by = *bp;
2746 dl->dr_override_state = DR_OVERRIDDEN;
2747 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2748 }
2749
2750 void
2751 dmu_buf_fill_done(dmu_buf_t *dbuf, dmu_tx_t *tx)
2752 {
2753 (void) tx;
2754 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2755 dbuf_states_t old_state;
2756 mutex_enter(&db->db_mtx);
2757 DBUF_VERIFY(db);
2758
2759 old_state = db->db_state;
2760 db->db_state = DB_CACHED;
2761 if (old_state == DB_FILL) {
2762 if (db->db_level == 0 && db->db_freed_in_flight) {
2763 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2764 /* we were freed while filling */
2765 /* XXX dbuf_undirty? */
2766 memset(db->db.db_data, 0, db->db.db_size);
2767 db->db_freed_in_flight = FALSE;
2768 DTRACE_SET_STATE(db,
2769 "fill done handling freed in flight");
2770 } else {
2771 DTRACE_SET_STATE(db, "fill done");
2772 }
2773 cv_broadcast(&db->db_changed);
2774 }
2775 mutex_exit(&db->db_mtx);
2776 }
2777
2778 void
2779 dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
2780 bp_embedded_type_t etype, enum zio_compress comp,
2781 int uncompressed_size, int compressed_size, int byteorder,
2782 dmu_tx_t *tx)
2783 {
2784 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2785 struct dirty_leaf *dl;
2786 dmu_object_type_t type;
2787 dbuf_dirty_record_t *dr;
2788
2789 if (etype == BP_EMBEDDED_TYPE_DATA) {
2790 ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
2791 SPA_FEATURE_EMBEDDED_DATA));
2792 }
2793
2794 DB_DNODE_ENTER(db);
2795 type = DB_DNODE(db)->dn_type;
2796 DB_DNODE_EXIT(db);
2797
2798 ASSERT0(db->db_level);
2799 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2800
2801 dmu_buf_will_not_fill(dbuf, tx);
2802
2803 dr = list_head(&db->db_dirty_records);
2804 ASSERT3P(dr, !=, NULL);
2805 ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
2806 dl = &dr->dt.dl;
2807 encode_embedded_bp_compressed(&dl->dr_overridden_by,
2808 data, comp, uncompressed_size, compressed_size);
2809 BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
2810 BP_SET_TYPE(&dl->dr_overridden_by, type);
2811 BP_SET_LEVEL(&dl->dr_overridden_by, 0);
2812 BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
2813
2814 dl->dr_override_state = DR_OVERRIDDEN;
2815 dl->dr_overridden_by.blk_birth = dr->dr_txg;
2816 }
2817
2818 void
2819 dmu_buf_redact(dmu_buf_t *dbuf, dmu_tx_t *tx)
2820 {
2821 dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
2822 dmu_object_type_t type;
2823 ASSERT(dsl_dataset_feature_is_active(db->db_objset->os_dsl_dataset,
2824 SPA_FEATURE_REDACTED_DATASETS));
2825
2826 DB_DNODE_ENTER(db);
2827 type = DB_DNODE(db)->dn_type;
2828 DB_DNODE_EXIT(db);
2829
2830 ASSERT0(db->db_level);
2831 dmu_buf_will_not_fill(dbuf, tx);
2832
2833 blkptr_t bp = { { { {0} } } };
2834 BP_SET_TYPE(&bp, type);
2835 BP_SET_LEVEL(&bp, 0);
2836 BP_SET_BIRTH(&bp, tx->tx_txg, 0);
2837 BP_SET_REDACTED(&bp);
2838 BPE_SET_LSIZE(&bp, dbuf->db_size);
2839
2840 dbuf_override_impl(db, &bp, tx);
2841 }
2842
2843 /*
2844 * Directly assign a provided arc buf to a given dbuf if it's not referenced
2845 * by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
2846 */
2847 void
2848 dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
2849 {
2850 ASSERT(!zfs_refcount_is_zero(&db->db_holds));
2851 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
2852 ASSERT(db->db_level == 0);
2853 ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
2854 ASSERT(buf != NULL);
2855 ASSERT3U(arc_buf_lsize(buf), ==, db->db.db_size);
2856 ASSERT(tx->tx_txg != 0);
2857
2858 arc_return_buf(buf, db);
2859 ASSERT(arc_released(buf));
2860
2861 mutex_enter(&db->db_mtx);
2862
2863 while (db->db_state == DB_READ || db->db_state == DB_FILL)
2864 cv_wait(&db->db_changed, &db->db_mtx);
2865
2866 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
2867
2868 if (db->db_state == DB_CACHED &&
2869 zfs_refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
2870 /*
2871 * In practice, we will never have a case where we have an
2872 * encrypted arc buffer while additional holds exist on the
2873 * dbuf. We don't handle this here so we simply assert that
2874 * fact instead.
2875 */
2876 ASSERT(!arc_is_encrypted(buf));
2877 mutex_exit(&db->db_mtx);
2878 (void) dbuf_dirty(db, tx);
2879 memcpy(db->db.db_data, buf->b_data, db->db.db_size);
2880 arc_buf_destroy(buf, db);
2881 return;
2882 }
2883
2884 if (db->db_state == DB_CACHED) {
2885 dbuf_dirty_record_t *dr = list_head(&db->db_dirty_records);
2886
2887 ASSERT(db->db_buf != NULL);
2888 if (dr != NULL && dr->dr_txg == tx->tx_txg) {
2889 ASSERT(dr->dt.dl.dr_data == db->db_buf);
2890
2891 if (!arc_released(db->db_buf)) {
2892 ASSERT(dr->dt.dl.dr_override_state ==
2893 DR_OVERRIDDEN);
2894 arc_release(db->db_buf, db);
2895 }
2896 dr->dt.dl.dr_data = buf;
2897 arc_buf_destroy(db->db_buf, db);
2898 } else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
2899 arc_release(db->db_buf, db);
2900 arc_buf_destroy(db->db_buf, db);
2901 }
2902 db->db_buf = NULL;
2903 }
2904 ASSERT(db->db_buf == NULL);
2905 dbuf_set_data(db, buf);
2906 db->db_state = DB_FILL;
2907 DTRACE_SET_STATE(db, "filling assigned arcbuf");
2908 mutex_exit(&db->db_mtx);
2909 (void) dbuf_dirty(db, tx);
2910 dmu_buf_fill_done(&db->db, tx);
2911 }
2912
2913 void
2914 dbuf_destroy(dmu_buf_impl_t *db)
2915 {
2916 dnode_t *dn;
2917 dmu_buf_impl_t *parent = db->db_parent;
2918 dmu_buf_impl_t *dndb;
2919
2920 ASSERT(MUTEX_HELD(&db->db_mtx));
2921 ASSERT(zfs_refcount_is_zero(&db->db_holds));
2922
2923 if (db->db_buf != NULL) {
2924 arc_buf_destroy(db->db_buf, db);
2925 db->db_buf = NULL;
2926 }
2927
2928 if (db->db_blkid == DMU_BONUS_BLKID) {
2929 int slots = DB_DNODE(db)->dn_num_slots;
2930 int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
2931 if (db->db.db_data != NULL) {
2932 kmem_free(db->db.db_data, bonuslen);
2933 arc_space_return(bonuslen, ARC_SPACE_BONUS);
2934 db->db_state = DB_UNCACHED;
2935 DTRACE_SET_STATE(db, "buffer cleared");
2936 }
2937 }
2938
2939 dbuf_clear_data(db);
2940
2941 if (multilist_link_active(&db->db_cache_link)) {
2942 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
2943 db->db_caching_status == DB_DBUF_METADATA_CACHE);
2944
2945 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
2946 (void) zfs_refcount_remove_many(
2947 &dbuf_caches[db->db_caching_status].size,
2948 db->db.db_size, db);
2949
2950 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
2951 DBUF_STAT_BUMPDOWN(metadata_cache_count);
2952 } else {
2953 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
2954 DBUF_STAT_BUMPDOWN(cache_count);
2955 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
2956 db->db.db_size);
2957 }
2958 db->db_caching_status = DB_NO_CACHE;
2959 }
2960
2961 ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
2962 ASSERT(db->db_data_pending == NULL);
2963 ASSERT(list_is_empty(&db->db_dirty_records));
2964
2965 db->db_state = DB_EVICTING;
2966 DTRACE_SET_STATE(db, "buffer eviction started");
2967 db->db_blkptr = NULL;
2968
2969 /*
2970 * Now that db_state is DB_EVICTING, nobody else can find this via
2971 * the hash table. We can now drop db_mtx, which allows us to
2972 * acquire the dn_dbufs_mtx.
2973 */
2974 mutex_exit(&db->db_mtx);
2975
2976 DB_DNODE_ENTER(db);
2977 dn = DB_DNODE(db);
2978 dndb = dn->dn_dbuf;
2979 if (db->db_blkid != DMU_BONUS_BLKID) {
2980 boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
2981 if (needlock)
2982 mutex_enter_nested(&dn->dn_dbufs_mtx,
2983 NESTED_SINGLE);
2984 avl_remove(&dn->dn_dbufs, db);
2985 membar_producer();
2986 DB_DNODE_EXIT(db);
2987 if (needlock)
2988 mutex_exit(&dn->dn_dbufs_mtx);
2989 /*
2990 * Decrementing the dbuf count means that the hold corresponding
2991 * to the removed dbuf is no longer discounted in dnode_move(),
2992 * so the dnode cannot be moved until after we release the hold.
2993 * The membar_producer() ensures visibility of the decremented
2994 * value in dnode_move(), since DB_DNODE_EXIT doesn't actually
2995 * release any lock.
2996 */
2997 mutex_enter(&dn->dn_mtx);
2998 dnode_rele_and_unlock(dn, db, B_TRUE);
2999 db->db_dnode_handle = NULL;
3000
3001 dbuf_hash_remove(db);
3002 } else {
3003 DB_DNODE_EXIT(db);
3004 }
3005
3006 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3007
3008 db->db_parent = NULL;
3009
3010 ASSERT(db->db_buf == NULL);
3011 ASSERT(db->db.db_data == NULL);
3012 ASSERT(db->db_hash_next == NULL);
3013 ASSERT(db->db_blkptr == NULL);
3014 ASSERT(db->db_data_pending == NULL);
3015 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3016 ASSERT(!multilist_link_active(&db->db_cache_link));
3017
3018 /*
3019 * If this dbuf is referenced from an indirect dbuf,
3020 * decrement the ref count on the indirect dbuf.
3021 */
3022 if (parent && parent != dndb) {
3023 mutex_enter(&parent->db_mtx);
3024 dbuf_rele_and_unlock(parent, db, B_TRUE);
3025 }
3026
3027 kmem_cache_free(dbuf_kmem_cache, db);
3028 arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3029 }
3030
3031 /*
3032 * Note: While bpp will always be updated if the function returns success,
3033 * parentp will not be updated if the dnode does not have dn_dbuf filled in;
3034 * this happens when the dnode is the meta-dnode, or {user|group|project}used
3035 * object.
3036 */
3037 __attribute__((always_inline))
3038 static inline int
3039 dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
3040 dmu_buf_impl_t **parentp, blkptr_t **bpp)
3041 {
3042 *parentp = NULL;
3043 *bpp = NULL;
3044
3045 ASSERT(blkid != DMU_BONUS_BLKID);
3046
3047 if (blkid == DMU_SPILL_BLKID) {
3048 mutex_enter(&dn->dn_mtx);
3049 if (dn->dn_have_spill &&
3050 (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
3051 *bpp = DN_SPILL_BLKPTR(dn->dn_phys);
3052 else
3053 *bpp = NULL;
3054 dbuf_add_ref(dn->dn_dbuf, NULL);
3055 *parentp = dn->dn_dbuf;
3056 mutex_exit(&dn->dn_mtx);
3057 return (0);
3058 }
3059
3060 int nlevels =
3061 (dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
3062 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
3063
3064 ASSERT3U(level * epbs, <, 64);
3065 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3066 /*
3067 * This assertion shouldn't trip as long as the max indirect block size
3068 * is less than 1M. The reason for this is that up to that point,
3069 * the number of levels required to address an entire object with blocks
3070 * of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
3071 * other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
3072 * (i.e. we can address the entire object), objects will all use at most
3073 * N-1 levels and the assertion won't overflow. However, once epbs is
3074 * 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
3075 * enough to address an entire object, so objects will have 5 levels,
3076 * but then this assertion will overflow.
3077 *
3078 * All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
3079 * need to redo this logic to handle overflows.
3080 */
3081 ASSERT(level >= nlevels ||
3082 ((nlevels - level - 1) * epbs) +
3083 highbit64(dn->dn_phys->dn_nblkptr) <= 64);
3084 if (level >= nlevels ||
3085 blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
3086 ((nlevels - level - 1) * epbs)) ||
3087 (fail_sparse &&
3088 blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
3089 /* the buffer has no parent yet */
3090 return (SET_ERROR(ENOENT));
3091 } else if (level < nlevels-1) {
3092 /* this block is referenced from an indirect block */
3093 int err;
3094
3095 err = dbuf_hold_impl(dn, level + 1,
3096 blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
3097
3098 if (err)
3099 return (err);
3100 err = dbuf_read(*parentp, NULL,
3101 (DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
3102 if (err) {
3103 dbuf_rele(*parentp, NULL);
3104 *parentp = NULL;
3105 return (err);
3106 }
3107 rw_enter(&(*parentp)->db_rwlock, RW_READER);
3108 *bpp = ((blkptr_t *)(*parentp)->db.db_data) +
3109 (blkid & ((1ULL << epbs) - 1));
3110 if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
3111 ASSERT(BP_IS_HOLE(*bpp));
3112 rw_exit(&(*parentp)->db_rwlock);
3113 return (0);
3114 } else {
3115 /* the block is referenced from the dnode */
3116 ASSERT3U(level, ==, nlevels-1);
3117 ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
3118 blkid < dn->dn_phys->dn_nblkptr);
3119 if (dn->dn_dbuf) {
3120 dbuf_add_ref(dn->dn_dbuf, NULL);
3121 *parentp = dn->dn_dbuf;
3122 }
3123 *bpp = &dn->dn_phys->dn_blkptr[blkid];
3124 return (0);
3125 }
3126 }
3127
3128 static dmu_buf_impl_t *
3129 dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
3130 dmu_buf_impl_t *parent, blkptr_t *blkptr, uint64_t hash)
3131 {
3132 objset_t *os = dn->dn_objset;
3133 dmu_buf_impl_t *db, *odb;
3134
3135 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3136 ASSERT(dn->dn_type != DMU_OT_NONE);
3137
3138 db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
3139
3140 list_create(&db->db_dirty_records, sizeof (dbuf_dirty_record_t),
3141 offsetof(dbuf_dirty_record_t, dr_dbuf_node));
3142
3143 db->db_objset = os;
3144 db->db.db_object = dn->dn_object;
3145 db->db_level = level;
3146 db->db_blkid = blkid;
3147 db->db_dirtycnt = 0;
3148 db->db_dnode_handle = dn->dn_handle;
3149 db->db_parent = parent;
3150 db->db_blkptr = blkptr;
3151 db->db_hash = hash;
3152
3153 db->db_user = NULL;
3154 db->db_user_immediate_evict = FALSE;
3155 db->db_freed_in_flight = FALSE;
3156 db->db_pending_evict = FALSE;
3157
3158 if (blkid == DMU_BONUS_BLKID) {
3159 ASSERT3P(parent, ==, dn->dn_dbuf);
3160 db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
3161 (dn->dn_nblkptr-1) * sizeof (blkptr_t);
3162 ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
3163 db->db.db_offset = DMU_BONUS_BLKID;
3164 db->db_state = DB_UNCACHED;
3165 DTRACE_SET_STATE(db, "bonus buffer created");
3166 db->db_caching_status = DB_NO_CACHE;
3167 /* the bonus dbuf is not placed in the hash table */
3168 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3169 return (db);
3170 } else if (blkid == DMU_SPILL_BLKID) {
3171 db->db.db_size = (blkptr != NULL) ?
3172 BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
3173 db->db.db_offset = 0;
3174 } else {
3175 int blocksize =
3176 db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
3177 db->db.db_size = blocksize;
3178 db->db.db_offset = db->db_blkid * blocksize;
3179 }
3180
3181 /*
3182 * Hold the dn_dbufs_mtx while we get the new dbuf
3183 * in the hash table *and* added to the dbufs list.
3184 * This prevents a possible deadlock with someone
3185 * trying to look up this dbuf before it's added to the
3186 * dn_dbufs list.
3187 */
3188 mutex_enter(&dn->dn_dbufs_mtx);
3189 db->db_state = DB_EVICTING; /* not worth logging this state change */
3190 if ((odb = dbuf_hash_insert(db)) != NULL) {
3191 /* someone else inserted it first */
3192 mutex_exit(&dn->dn_dbufs_mtx);
3193 kmem_cache_free(dbuf_kmem_cache, db);
3194 DBUF_STAT_BUMP(hash_insert_race);
3195 return (odb);
3196 }
3197 avl_add(&dn->dn_dbufs, db);
3198
3199 db->db_state = DB_UNCACHED;
3200 DTRACE_SET_STATE(db, "regular buffer created");
3201 db->db_caching_status = DB_NO_CACHE;
3202 mutex_exit(&dn->dn_dbufs_mtx);
3203 arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
3204
3205 if (parent && parent != dn->dn_dbuf)
3206 dbuf_add_ref(parent, db);
3207
3208 ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
3209 zfs_refcount_count(&dn->dn_holds) > 0);
3210 (void) zfs_refcount_add(&dn->dn_holds, db);
3211
3212 dprintf_dbuf(db, "db=%p\n", db);
3213
3214 return (db);
3215 }
3216
3217 /*
3218 * This function returns a block pointer and information about the object,
3219 * given a dnode and a block. This is a publicly accessible version of
3220 * dbuf_findbp that only returns some information, rather than the
3221 * dbuf. Note that the dnode passed in must be held, and the dn_struct_rwlock
3222 * should be locked as (at least) a reader.
3223 */
3224 int
3225 dbuf_dnode_findbp(dnode_t *dn, uint64_t level, uint64_t blkid,
3226 blkptr_t *bp, uint16_t *datablkszsec, uint8_t *indblkshift)
3227 {
3228 dmu_buf_impl_t *dbp = NULL;
3229 blkptr_t *bp2;
3230 int err = 0;
3231 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3232
3233 err = dbuf_findbp(dn, level, blkid, B_FALSE, &dbp, &bp2);
3234 if (err == 0) {
3235 ASSERT3P(bp2, !=, NULL);
3236 *bp = *bp2;
3237 if (dbp != NULL)
3238 dbuf_rele(dbp, NULL);
3239 if (datablkszsec != NULL)
3240 *datablkszsec = dn->dn_phys->dn_datablkszsec;
3241 if (indblkshift != NULL)
3242 *indblkshift = dn->dn_phys->dn_indblkshift;
3243 }
3244
3245 return (err);
3246 }
3247
3248 typedef struct dbuf_prefetch_arg {
3249 spa_t *dpa_spa; /* The spa to issue the prefetch in. */
3250 zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
3251 int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
3252 int dpa_curlevel; /* The current level that we're reading */
3253 dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
3254 zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
3255 zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
3256 arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
3257 dbuf_prefetch_fn dpa_cb; /* prefetch completion callback */
3258 void *dpa_arg; /* prefetch completion arg */
3259 } dbuf_prefetch_arg_t;
3260
3261 static void
3262 dbuf_prefetch_fini(dbuf_prefetch_arg_t *dpa, boolean_t io_done)
3263 {
3264 if (dpa->dpa_cb != NULL) {
3265 dpa->dpa_cb(dpa->dpa_arg, dpa->dpa_zb.zb_level,
3266 dpa->dpa_zb.zb_blkid, io_done);
3267 }
3268 kmem_free(dpa, sizeof (*dpa));
3269 }
3270
3271 static void
3272 dbuf_issue_final_prefetch_done(zio_t *zio, const zbookmark_phys_t *zb,
3273 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3274 {
3275 (void) zio, (void) zb, (void) iobp;
3276 dbuf_prefetch_arg_t *dpa = private;
3277
3278 if (abuf != NULL)
3279 arc_buf_destroy(abuf, private);
3280
3281 dbuf_prefetch_fini(dpa, B_TRUE);
3282 }
3283
3284 /*
3285 * Actually issue the prefetch read for the block given.
3286 */
3287 static void
3288 dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
3289 {
3290 ASSERT(!BP_IS_REDACTED(bp) ||
3291 dsl_dataset_feature_is_active(
3292 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3293 SPA_FEATURE_REDACTED_DATASETS));
3294
3295 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
3296 return (dbuf_prefetch_fini(dpa, B_FALSE));
3297
3298 int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
3299 arc_flags_t aflags =
3300 dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH |
3301 ARC_FLAG_NO_BUF;
3302
3303 /* dnodes are always read as raw and then converted later */
3304 if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
3305 dpa->dpa_curlevel == 0)
3306 zio_flags |= ZIO_FLAG_RAW;
3307
3308 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3309 ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
3310 ASSERT(dpa->dpa_zio != NULL);
3311 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp,
3312 dbuf_issue_final_prefetch_done, dpa,
3313 dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
3314 }
3315
3316 /*
3317 * Called when an indirect block above our prefetch target is read in. This
3318 * will either read in the next indirect block down the tree or issue the actual
3319 * prefetch if the next block down is our target.
3320 */
3321 static void
3322 dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
3323 const blkptr_t *iobp, arc_buf_t *abuf, void *private)
3324 {
3325 (void) zb, (void) iobp;
3326 dbuf_prefetch_arg_t *dpa = private;
3327
3328 ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
3329 ASSERT3S(dpa->dpa_curlevel, >, 0);
3330
3331 if (abuf == NULL) {
3332 ASSERT(zio == NULL || zio->io_error != 0);
3333 dbuf_prefetch_fini(dpa, B_TRUE);
3334 return;
3335 }
3336 ASSERT(zio == NULL || zio->io_error == 0);
3337
3338 /*
3339 * The dpa_dnode is only valid if we are called with a NULL
3340 * zio. This indicates that the arc_read() returned without
3341 * first calling zio_read() to issue a physical read. Once
3342 * a physical read is made the dpa_dnode must be invalidated
3343 * as the locks guarding it may have been dropped. If the
3344 * dpa_dnode is still valid, then we want to add it to the dbuf
3345 * cache. To do so, we must hold the dbuf associated with the block
3346 * we just prefetched, read its contents so that we associate it
3347 * with an arc_buf_t, and then release it.
3348 */
3349 if (zio != NULL) {
3350 ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
3351 if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
3352 ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
3353 } else {
3354 ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
3355 }
3356 ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
3357
3358 dpa->dpa_dnode = NULL;
3359 } else if (dpa->dpa_dnode != NULL) {
3360 uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
3361 (dpa->dpa_epbs * (dpa->dpa_curlevel -
3362 dpa->dpa_zb.zb_level));
3363 dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
3364 dpa->dpa_curlevel, curblkid, FTAG);
3365 if (db == NULL) {
3366 arc_buf_destroy(abuf, private);
3367 dbuf_prefetch_fini(dpa, B_TRUE);
3368 return;
3369 }
3370 (void) dbuf_read(db, NULL,
3371 DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
3372 dbuf_rele(db, FTAG);
3373 }
3374
3375 dpa->dpa_curlevel--;
3376 uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
3377 (dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
3378 blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
3379 P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
3380
3381 ASSERT(!BP_IS_REDACTED(bp) || (dpa->dpa_dnode &&
3382 dsl_dataset_feature_is_active(
3383 dpa->dpa_dnode->dn_objset->os_dsl_dataset,
3384 SPA_FEATURE_REDACTED_DATASETS)));
3385 if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp)) {
3386 arc_buf_destroy(abuf, private);
3387 dbuf_prefetch_fini(dpa, B_TRUE);
3388 return;
3389 } else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
3390 ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
3391 dbuf_issue_final_prefetch(dpa, bp);
3392 } else {
3393 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3394 zbookmark_phys_t zb;
3395
3396 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3397 if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
3398 iter_aflags |= ARC_FLAG_L2CACHE;
3399
3400 ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
3401
3402 SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
3403 dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
3404
3405 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3406 bp, dbuf_prefetch_indirect_done, dpa,
3407 ZIO_PRIORITY_SYNC_READ,
3408 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3409 &iter_aflags, &zb);
3410 }
3411
3412 arc_buf_destroy(abuf, private);
3413 }
3414
3415 /*
3416 * Issue prefetch reads for the given block on the given level. If the indirect
3417 * blocks above that block are not in memory, we will read them in
3418 * asynchronously. As a result, this call never blocks waiting for a read to
3419 * complete. Note that the prefetch might fail if the dataset is encrypted and
3420 * the encryption key is unmapped before the IO completes.
3421 */
3422 int
3423 dbuf_prefetch_impl(dnode_t *dn, int64_t level, uint64_t blkid,
3424 zio_priority_t prio, arc_flags_t aflags, dbuf_prefetch_fn cb,
3425 void *arg)
3426 {
3427 blkptr_t bp;
3428 int epbs, nlevels, curlevel;
3429 uint64_t curblkid;
3430
3431 ASSERT(blkid != DMU_BONUS_BLKID);
3432 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3433
3434 if (blkid > dn->dn_maxblkid)
3435 goto no_issue;
3436
3437 if (level == 0 && dnode_block_freed(dn, blkid))
3438 goto no_issue;
3439
3440 /*
3441 * This dnode hasn't been written to disk yet, so there's nothing to
3442 * prefetch.
3443 */
3444 nlevels = dn->dn_phys->dn_nlevels;
3445 if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
3446 goto no_issue;
3447
3448 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
3449 if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
3450 goto no_issue;
3451
3452 dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
3453 level, blkid, NULL);
3454 if (db != NULL) {
3455 mutex_exit(&db->db_mtx);
3456 /*
3457 * This dbuf already exists. It is either CACHED, or
3458 * (we assume) about to be read or filled.
3459 */
3460 goto no_issue;
3461 }
3462
3463 /*
3464 * Find the closest ancestor (indirect block) of the target block
3465 * that is present in the cache. In this indirect block, we will
3466 * find the bp that is at curlevel, curblkid.
3467 */
3468 curlevel = level;
3469 curblkid = blkid;
3470 while (curlevel < nlevels - 1) {
3471 int parent_level = curlevel + 1;
3472 uint64_t parent_blkid = curblkid >> epbs;
3473 dmu_buf_impl_t *db;
3474
3475 if (dbuf_hold_impl(dn, parent_level, parent_blkid,
3476 FALSE, TRUE, FTAG, &db) == 0) {
3477 blkptr_t *bpp = db->db_buf->b_data;
3478 bp = bpp[P2PHASE(curblkid, 1 << epbs)];
3479 dbuf_rele(db, FTAG);
3480 break;
3481 }
3482
3483 curlevel = parent_level;
3484 curblkid = parent_blkid;
3485 }
3486
3487 if (curlevel == nlevels - 1) {
3488 /* No cached indirect blocks found. */
3489 ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
3490 bp = dn->dn_phys->dn_blkptr[curblkid];
3491 }
3492 ASSERT(!BP_IS_REDACTED(&bp) ||
3493 dsl_dataset_feature_is_active(dn->dn_objset->os_dsl_dataset,
3494 SPA_FEATURE_REDACTED_DATASETS));
3495 if (BP_IS_HOLE(&bp) || BP_IS_REDACTED(&bp))
3496 goto no_issue;
3497
3498 ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
3499
3500 zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
3501 ZIO_FLAG_CANFAIL);
3502
3503 dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
3504 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
3505 SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3506 dn->dn_object, level, blkid);
3507 dpa->dpa_curlevel = curlevel;
3508 dpa->dpa_prio = prio;
3509 dpa->dpa_aflags = aflags;
3510 dpa->dpa_spa = dn->dn_objset->os_spa;
3511 dpa->dpa_dnode = dn;
3512 dpa->dpa_epbs = epbs;
3513 dpa->dpa_zio = pio;
3514 dpa->dpa_cb = cb;
3515 dpa->dpa_arg = arg;
3516
3517 if (!DNODE_LEVEL_IS_CACHEABLE(dn, level))
3518 dpa->dpa_aflags |= ARC_FLAG_UNCACHED;
3519 else if (dnode_level_is_l2cacheable(&bp, dn, level))
3520 dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
3521
3522 /*
3523 * If we have the indirect just above us, no need to do the asynchronous
3524 * prefetch chain; we'll just run the last step ourselves. If we're at
3525 * a higher level, though, we want to issue the prefetches for all the
3526 * indirect blocks asynchronously, so we can go on with whatever we were
3527 * doing.
3528 */
3529 if (curlevel == level) {
3530 ASSERT3U(curblkid, ==, blkid);
3531 dbuf_issue_final_prefetch(dpa, &bp);
3532 } else {
3533 arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
3534 zbookmark_phys_t zb;
3535
3536 /* flag if L2ARC eligible, l2arc_noprefetch then decides */
3537 if (dnode_level_is_l2cacheable(&bp, dn, level))
3538 iter_aflags |= ARC_FLAG_L2CACHE;
3539
3540 SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
3541 dn->dn_object, curlevel, curblkid);
3542 (void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
3543 &bp, dbuf_prefetch_indirect_done, dpa,
3544 ZIO_PRIORITY_SYNC_READ,
3545 ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
3546 &iter_aflags, &zb);
3547 }
3548 /*
3549 * We use pio here instead of dpa_zio since it's possible that
3550 * dpa may have already been freed.
3551 */
3552 zio_nowait(pio);
3553 return (1);
3554 no_issue:
3555 if (cb != NULL)
3556 cb(arg, level, blkid, B_FALSE);
3557 return (0);
3558 }
3559
3560 int
3561 dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
3562 arc_flags_t aflags)
3563 {
3564
3565 return (dbuf_prefetch_impl(dn, level, blkid, prio, aflags, NULL, NULL));
3566 }
3567
3568 /*
3569 * Helper function for dbuf_hold_impl() to copy a buffer. Handles
3570 * the case of encrypted, compressed and uncompressed buffers by
3571 * allocating the new buffer, respectively, with arc_alloc_raw_buf(),
3572 * arc_alloc_compressed_buf() or arc_alloc_buf().*
3573 *
3574 * NOTE: Declared noinline to avoid stack bloat in dbuf_hold_impl().
3575 */
3576 noinline static void
3577 dbuf_hold_copy(dnode_t *dn, dmu_buf_impl_t *db)
3578 {
3579 dbuf_dirty_record_t *dr = db->db_data_pending;
3580 arc_buf_t *data = dr->dt.dl.dr_data;
3581 enum zio_compress compress_type = arc_get_compression(data);
3582 uint8_t complevel = arc_get_complevel(data);
3583
3584 if (arc_is_encrypted(data)) {
3585 boolean_t byteorder;
3586 uint8_t salt[ZIO_DATA_SALT_LEN];
3587 uint8_t iv[ZIO_DATA_IV_LEN];
3588 uint8_t mac[ZIO_DATA_MAC_LEN];
3589
3590 arc_get_raw_params(data, &byteorder, salt, iv, mac);
3591 dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
3592 dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
3593 dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
3594 compress_type, complevel));
3595 } else if (compress_type != ZIO_COMPRESS_OFF) {
3596 dbuf_set_data(db, arc_alloc_compressed_buf(
3597 dn->dn_objset->os_spa, db, arc_buf_size(data),
3598 arc_buf_lsize(data), compress_type, complevel));
3599 } else {
3600 dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
3601 DBUF_GET_BUFC_TYPE(db), db->db.db_size));
3602 }
3603
3604 rw_enter(&db->db_rwlock, RW_WRITER);
3605 memcpy(db->db.db_data, data->b_data, arc_buf_size(data));
3606 rw_exit(&db->db_rwlock);
3607 }
3608
3609 /*
3610 * Returns with db_holds incremented, and db_mtx not held.
3611 * Note: dn_struct_rwlock must be held.
3612 */
3613 int
3614 dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
3615 boolean_t fail_sparse, boolean_t fail_uncached,
3616 const void *tag, dmu_buf_impl_t **dbp)
3617 {
3618 dmu_buf_impl_t *db, *parent = NULL;
3619 uint64_t hv;
3620
3621 /* If the pool has been created, verify the tx_sync_lock is not held */
3622 spa_t *spa = dn->dn_objset->os_spa;
3623 dsl_pool_t *dp = spa->spa_dsl_pool;
3624 if (dp != NULL) {
3625 ASSERT(!MUTEX_HELD(&dp->dp_tx.tx_sync_lock));
3626 }
3627
3628 ASSERT(blkid != DMU_BONUS_BLKID);
3629 ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
3630 ASSERT3U(dn->dn_nlevels, >, level);
3631
3632 *dbp = NULL;
3633
3634 /* dbuf_find() returns with db_mtx held */
3635 db = dbuf_find(dn->dn_objset, dn->dn_object, level, blkid, &hv);
3636
3637 if (db == NULL) {
3638 blkptr_t *bp = NULL;
3639 int err;
3640
3641 if (fail_uncached)
3642 return (SET_ERROR(ENOENT));
3643
3644 ASSERT3P(parent, ==, NULL);
3645 err = dbuf_findbp(dn, level, blkid, fail_sparse, &parent, &bp);
3646 if (fail_sparse) {
3647 if (err == 0 && bp && BP_IS_HOLE(bp))
3648 err = SET_ERROR(ENOENT);
3649 if (err) {
3650 if (parent)
3651 dbuf_rele(parent, NULL);
3652 return (err);
3653 }
3654 }
3655 if (err && err != ENOENT)
3656 return (err);
3657 db = dbuf_create(dn, level, blkid, parent, bp, hv);
3658 }
3659
3660 if (fail_uncached && db->db_state != DB_CACHED) {
3661 mutex_exit(&db->db_mtx);
3662 return (SET_ERROR(ENOENT));
3663 }
3664
3665 if (db->db_buf != NULL) {
3666 arc_buf_access(db->db_buf);
3667 ASSERT3P(db->db.db_data, ==, db->db_buf->b_data);
3668 }
3669
3670 ASSERT(db->db_buf == NULL || arc_referenced(db->db_buf));
3671
3672 /*
3673 * If this buffer is currently syncing out, and we are
3674 * still referencing it from db_data, we need to make a copy
3675 * of it in case we decide we want to dirty it again in this txg.
3676 */
3677 if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
3678 dn->dn_object != DMU_META_DNODE_OBJECT &&
3679 db->db_state == DB_CACHED && db->db_data_pending) {
3680 dbuf_dirty_record_t *dr = db->db_data_pending;
3681 if (dr->dt.dl.dr_data == db->db_buf) {
3682 ASSERT3P(db->db_buf, !=, NULL);
3683 dbuf_hold_copy(dn, db);
3684 }
3685 }
3686
3687 if (multilist_link_active(&db->db_cache_link)) {
3688 ASSERT(zfs_refcount_is_zero(&db->db_holds));
3689 ASSERT(db->db_caching_status == DB_DBUF_CACHE ||
3690 db->db_caching_status == DB_DBUF_METADATA_CACHE);
3691
3692 multilist_remove(&dbuf_caches[db->db_caching_status].cache, db);
3693 (void) zfs_refcount_remove_many(
3694 &dbuf_caches[db->db_caching_status].size,
3695 db->db.db_size, db);
3696
3697 if (db->db_caching_status == DB_DBUF_METADATA_CACHE) {
3698 DBUF_STAT_BUMPDOWN(metadata_cache_count);
3699 } else {
3700 DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
3701 DBUF_STAT_BUMPDOWN(cache_count);
3702 DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
3703 db->db.db_size);
3704 }
3705 db->db_caching_status = DB_NO_CACHE;
3706 }
3707 (void) zfs_refcount_add(&db->db_holds, tag);
3708 DBUF_VERIFY(db);
3709 mutex_exit(&db->db_mtx);
3710
3711 /* NOTE: we can't rele the parent until after we drop the db_mtx */
3712 if (parent)
3713 dbuf_rele(parent, NULL);
3714
3715 ASSERT3P(DB_DNODE(db), ==, dn);
3716 ASSERT3U(db->db_blkid, ==, blkid);
3717 ASSERT3U(db->db_level, ==, level);
3718 *dbp = db;
3719
3720 return (0);
3721 }
3722
3723 dmu_buf_impl_t *
3724 dbuf_hold(dnode_t *dn, uint64_t blkid, const void *tag)
3725 {
3726 return (dbuf_hold_level(dn, 0, blkid, tag));
3727 }
3728
3729 dmu_buf_impl_t *
3730 dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, const void *tag)
3731 {
3732 dmu_buf_impl_t *db;
3733 int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
3734 return (err ? NULL : db);
3735 }
3736
3737 void
3738 dbuf_create_bonus(dnode_t *dn)
3739 {
3740 ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
3741
3742 ASSERT(dn->dn_bonus == NULL);
3743 dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL,
3744 dbuf_hash(dn->dn_objset, dn->dn_object, 0, DMU_BONUS_BLKID));
3745 }
3746
3747 int
3748 dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
3749 {
3750 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3751
3752 if (db->db_blkid != DMU_SPILL_BLKID)
3753 return (SET_ERROR(ENOTSUP));
3754 if (blksz == 0)
3755 blksz = SPA_MINBLOCKSIZE;
3756 ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
3757 blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
3758
3759 dbuf_new_size(db, blksz, tx);
3760
3761 return (0);
3762 }
3763
3764 void
3765 dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
3766 {
3767 dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
3768 }
3769
3770 #pragma weak dmu_buf_add_ref = dbuf_add_ref
3771 void
3772 dbuf_add_ref(dmu_buf_impl_t *db, const void *tag)
3773 {
3774 int64_t holds = zfs_refcount_add(&db->db_holds, tag);
3775 VERIFY3S(holds, >, 1);
3776 }
3777
3778 #pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
3779 boolean_t
3780 dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
3781 const void *tag)
3782 {
3783 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3784 dmu_buf_impl_t *found_db;
3785 boolean_t result = B_FALSE;
3786
3787 if (blkid == DMU_BONUS_BLKID)
3788 found_db = dbuf_find_bonus(os, obj);
3789 else
3790 found_db = dbuf_find(os, obj, 0, blkid, NULL);
3791
3792 if (found_db != NULL) {
3793 if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
3794 (void) zfs_refcount_add(&db->db_holds, tag);
3795 result = B_TRUE;
3796 }
3797 mutex_exit(&found_db->db_mtx);
3798 }
3799 return (result);
3800 }
3801
3802 /*
3803 * If you call dbuf_rele() you had better not be referencing the dnode handle
3804 * unless you have some other direct or indirect hold on the dnode. (An indirect
3805 * hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
3806 * Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
3807 * dnode's parent dbuf evicting its dnode handles.
3808 */
3809 void
3810 dbuf_rele(dmu_buf_impl_t *db, const void *tag)
3811 {
3812 mutex_enter(&db->db_mtx);
3813 dbuf_rele_and_unlock(db, tag, B_FALSE);
3814 }
3815
3816 void
3817 dmu_buf_rele(dmu_buf_t *db, const void *tag)
3818 {
3819 dbuf_rele((dmu_buf_impl_t *)db, tag);
3820 }
3821
3822 /*
3823 * dbuf_rele() for an already-locked dbuf. This is necessary to allow
3824 * db_dirtycnt and db_holds to be updated atomically. The 'evicting'
3825 * argument should be set if we are already in the dbuf-evicting code
3826 * path, in which case we don't want to recursively evict. This allows us to
3827 * avoid deeply nested stacks that would have a call flow similar to this:
3828 *
3829 * dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
3830 * ^ |
3831 * | |
3832 * +-----dbuf_destroy()<--dbuf_evict_one()<--------+
3833 *
3834 */
3835 void
3836 dbuf_rele_and_unlock(dmu_buf_impl_t *db, const void *tag, boolean_t evicting)
3837 {
3838 int64_t holds;
3839 uint64_t size;
3840
3841 ASSERT(MUTEX_HELD(&db->db_mtx));
3842 DBUF_VERIFY(db);
3843
3844 /*
3845 * Remove the reference to the dbuf before removing its hold on the
3846 * dnode so we can guarantee in dnode_move() that a referenced bonus
3847 * buffer has a corresponding dnode hold.
3848 */
3849 holds = zfs_refcount_remove(&db->db_holds, tag);
3850 ASSERT(holds >= 0);
3851
3852 /*
3853 * We can't freeze indirects if there is a possibility that they
3854 * may be modified in the current syncing context.
3855 */
3856 if (db->db_buf != NULL &&
3857 holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
3858 arc_buf_freeze(db->db_buf);
3859 }
3860
3861 if (holds == db->db_dirtycnt &&
3862 db->db_level == 0 && db->db_user_immediate_evict)
3863 dbuf_evict_user(db);
3864
3865 if (holds == 0) {
3866 if (db->db_blkid == DMU_BONUS_BLKID) {
3867 dnode_t *dn;
3868 boolean_t evict_dbuf = db->db_pending_evict;
3869
3870 /*
3871 * If the dnode moves here, we cannot cross this
3872 * barrier until the move completes.
3873 */
3874 DB_DNODE_ENTER(db);
3875
3876 dn = DB_DNODE(db);
3877 atomic_dec_32(&dn->dn_dbufs_count);
3878
3879 /*
3880 * Decrementing the dbuf count means that the bonus
3881 * buffer's dnode hold is no longer discounted in
3882 * dnode_move(). The dnode cannot move until after
3883 * the dnode_rele() below.
3884 */
3885 DB_DNODE_EXIT(db);
3886
3887 /*
3888 * Do not reference db after its lock is dropped.
3889 * Another thread may evict it.
3890 */
3891 mutex_exit(&db->db_mtx);
3892
3893 if (evict_dbuf)
3894 dnode_evict_bonus(dn);
3895
3896 dnode_rele(dn, db);
3897 } else if (db->db_buf == NULL) {
3898 /*
3899 * This is a special case: we never associated this
3900 * dbuf with any data allocated from the ARC.
3901 */
3902 ASSERT(db->db_state == DB_UNCACHED ||
3903 db->db_state == DB_NOFILL);
3904 dbuf_destroy(db);
3905 } else if (arc_released(db->db_buf)) {
3906 /*
3907 * This dbuf has anonymous data associated with it.
3908 */
3909 dbuf_destroy(db);
3910 } else if (!(DBUF_IS_CACHEABLE(db) || db->db_partial_read) ||
3911 db->db_pending_evict) {
3912 dbuf_destroy(db);
3913 } else if (!multilist_link_active(&db->db_cache_link)) {
3914 ASSERT3U(db->db_caching_status, ==, DB_NO_CACHE);
3915
3916 dbuf_cached_state_t dcs =
3917 dbuf_include_in_metadata_cache(db) ?
3918 DB_DBUF_METADATA_CACHE : DB_DBUF_CACHE;
3919 db->db_caching_status = dcs;
3920
3921 multilist_insert(&dbuf_caches[dcs].cache, db);
3922 uint64_t db_size = db->db.db_size;
3923 size = zfs_refcount_add_many(
3924 &dbuf_caches[dcs].size, db_size, db);
3925 uint8_t db_level = db->db_level;
3926 mutex_exit(&db->db_mtx);
3927
3928 if (dcs == DB_DBUF_METADATA_CACHE) {
3929 DBUF_STAT_BUMP(metadata_cache_count);
3930 DBUF_STAT_MAX(metadata_cache_size_bytes_max,
3931 size);
3932 } else {
3933 DBUF_STAT_BUMP(cache_count);
3934 DBUF_STAT_MAX(cache_size_bytes_max, size);
3935 DBUF_STAT_BUMP(cache_levels[db_level]);
3936 DBUF_STAT_INCR(cache_levels_bytes[db_level],
3937 db_size);
3938 }
3939
3940 if (dcs == DB_DBUF_CACHE && !evicting)
3941 dbuf_evict_notify(size);
3942 }
3943 } else {
3944 mutex_exit(&db->db_mtx);
3945 }
3946
3947 }
3948
3949 #pragma weak dmu_buf_refcount = dbuf_refcount
3950 uint64_t
3951 dbuf_refcount(dmu_buf_impl_t *db)
3952 {
3953 return (zfs_refcount_count(&db->db_holds));
3954 }
3955
3956 uint64_t
3957 dmu_buf_user_refcount(dmu_buf_t *db_fake)
3958 {
3959 uint64_t holds;
3960 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3961
3962 mutex_enter(&db->db_mtx);
3963 ASSERT3U(zfs_refcount_count(&db->db_holds), >=, db->db_dirtycnt);
3964 holds = zfs_refcount_count(&db->db_holds) - db->db_dirtycnt;
3965 mutex_exit(&db->db_mtx);
3966
3967 return (holds);
3968 }
3969
3970 void *
3971 dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
3972 dmu_buf_user_t *new_user)
3973 {
3974 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3975
3976 mutex_enter(&db->db_mtx);
3977 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3978 if (db->db_user == old_user)
3979 db->db_user = new_user;
3980 else
3981 old_user = db->db_user;
3982 dbuf_verify_user(db, DBVU_NOT_EVICTING);
3983 mutex_exit(&db->db_mtx);
3984
3985 return (old_user);
3986 }
3987
3988 void *
3989 dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3990 {
3991 return (dmu_buf_replace_user(db_fake, NULL, user));
3992 }
3993
3994 void *
3995 dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
3996 {
3997 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
3998
3999 db->db_user_immediate_evict = TRUE;
4000 return (dmu_buf_set_user(db_fake, user));
4001 }
4002
4003 void *
4004 dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
4005 {
4006 return (dmu_buf_replace_user(db_fake, user, NULL));
4007 }
4008
4009 void *
4010 dmu_buf_get_user(dmu_buf_t *db_fake)
4011 {
4012 dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
4013
4014 dbuf_verify_user(db, DBVU_NOT_EVICTING);
4015 return (db->db_user);
4016 }
4017
4018 void
4019 dmu_buf_user_evict_wait(void)
4020 {
4021 taskq_wait(dbu_evict_taskq);
4022 }
4023
4024 blkptr_t *
4025 dmu_buf_get_blkptr(dmu_buf_t *db)
4026 {
4027 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4028 return (dbi->db_blkptr);
4029 }
4030
4031 objset_t *
4032 dmu_buf_get_objset(dmu_buf_t *db)
4033 {
4034 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4035 return (dbi->db_objset);
4036 }
4037
4038 dnode_t *
4039 dmu_buf_dnode_enter(dmu_buf_t *db)
4040 {
4041 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4042 DB_DNODE_ENTER(dbi);
4043 return (DB_DNODE(dbi));
4044 }
4045
4046 void
4047 dmu_buf_dnode_exit(dmu_buf_t *db)
4048 {
4049 dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
4050 DB_DNODE_EXIT(dbi);
4051 }
4052
4053 static void
4054 dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
4055 {
4056 /* ASSERT(dmu_tx_is_syncing(tx) */
4057 ASSERT(MUTEX_HELD(&db->db_mtx));
4058
4059 if (db->db_blkptr != NULL)
4060 return;
4061
4062 if (db->db_blkid == DMU_SPILL_BLKID) {
4063 db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
4064 BP_ZERO(db->db_blkptr);
4065 return;
4066 }
4067 if (db->db_level == dn->dn_phys->dn_nlevels-1) {
4068 /*
4069 * This buffer was allocated at a time when there was
4070 * no available blkptrs from the dnode, or it was
4071 * inappropriate to hook it in (i.e., nlevels mismatch).
4072 */
4073 ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
4074 ASSERT(db->db_parent == NULL);
4075 db->db_parent = dn->dn_dbuf;
4076 db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
4077 DBUF_VERIFY(db);
4078 } else {
4079 dmu_buf_impl_t *parent = db->db_parent;
4080 int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4081
4082 ASSERT(dn->dn_phys->dn_nlevels > 1);
4083 if (parent == NULL) {
4084 mutex_exit(&db->db_mtx);
4085 rw_enter(&dn->dn_struct_rwlock, RW_READER);
4086 parent = dbuf_hold_level(dn, db->db_level + 1,
4087 db->db_blkid >> epbs, db);
4088 rw_exit(&dn->dn_struct_rwlock);
4089 mutex_enter(&db->db_mtx);
4090 db->db_parent = parent;
4091 }
4092 db->db_blkptr = (blkptr_t *)parent->db.db_data +
4093 (db->db_blkid & ((1ULL << epbs) - 1));
4094 DBUF_VERIFY(db);
4095 }
4096 }
4097
4098 static void
4099 dbuf_sync_bonus(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4100 {
4101 dmu_buf_impl_t *db = dr->dr_dbuf;
4102 void *data = dr->dt.dl.dr_data;
4103
4104 ASSERT0(db->db_level);
4105 ASSERT(MUTEX_HELD(&db->db_mtx));
4106 ASSERT(db->db_blkid == DMU_BONUS_BLKID);
4107 ASSERT(data != NULL);
4108
4109 dnode_t *dn = dr->dr_dnode;
4110 ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
4111 DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
4112 memcpy(DN_BONUS(dn->dn_phys), data, DN_MAX_BONUS_LEN(dn->dn_phys));
4113
4114 dbuf_sync_leaf_verify_bonus_dnode(dr);
4115
4116 dbuf_undirty_bonus(dr);
4117 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4118 }
4119
4120 /*
4121 * When syncing out a blocks of dnodes, adjust the block to deal with
4122 * encryption. Normally, we make sure the block is decrypted before writing
4123 * it. If we have crypt params, then we are writing a raw (encrypted) block,
4124 * from a raw receive. In this case, set the ARC buf's crypt params so
4125 * that the BP will be filled with the correct byteorder, salt, iv, and mac.
4126 */
4127 static void
4128 dbuf_prepare_encrypted_dnode_leaf(dbuf_dirty_record_t *dr)
4129 {
4130 int err;
4131 dmu_buf_impl_t *db = dr->dr_dbuf;
4132
4133 ASSERT(MUTEX_HELD(&db->db_mtx));
4134 ASSERT3U(db->db.db_object, ==, DMU_META_DNODE_OBJECT);
4135 ASSERT3U(db->db_level, ==, 0);
4136
4137 if (!db->db_objset->os_raw_receive && arc_is_encrypted(db->db_buf)) {
4138 zbookmark_phys_t zb;
4139
4140 /*
4141 * Unfortunately, there is currently no mechanism for
4142 * syncing context to handle decryption errors. An error
4143 * here is only possible if an attacker maliciously
4144 * changed a dnode block and updated the associated
4145 * checksums going up the block tree.
4146 */
4147 SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
4148 db->db.db_object, db->db_level, db->db_blkid);
4149 err = arc_untransform(db->db_buf, db->db_objset->os_spa,
4150 &zb, B_TRUE);
4151 if (err)
4152 panic("Invalid dnode block MAC");
4153 } else if (dr->dt.dl.dr_has_raw_params) {
4154 (void) arc_release(dr->dt.dl.dr_data, db);
4155 arc_convert_to_raw(dr->dt.dl.dr_data,
4156 dmu_objset_id(db->db_objset),
4157 dr->dt.dl.dr_byteorder, DMU_OT_DNODE,
4158 dr->dt.dl.dr_salt, dr->dt.dl.dr_iv, dr->dt.dl.dr_mac);
4159 }
4160 }
4161
4162 /*
4163 * dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
4164 * is critical the we not allow the compiler to inline this function in to
4165 * dbuf_sync_list() thereby drastically bloating the stack usage.
4166 */
4167 noinline static void
4168 dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4169 {
4170 dmu_buf_impl_t *db = dr->dr_dbuf;
4171 dnode_t *dn = dr->dr_dnode;
4172
4173 ASSERT(dmu_tx_is_syncing(tx));
4174
4175 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4176
4177 mutex_enter(&db->db_mtx);
4178
4179 ASSERT(db->db_level > 0);
4180 DBUF_VERIFY(db);
4181
4182 /* Read the block if it hasn't been read yet. */
4183 if (db->db_buf == NULL) {
4184 mutex_exit(&db->db_mtx);
4185 (void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
4186 mutex_enter(&db->db_mtx);
4187 }
4188 ASSERT3U(db->db_state, ==, DB_CACHED);
4189 ASSERT(db->db_buf != NULL);
4190
4191 /* Indirect block size must match what the dnode thinks it is. */
4192 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4193 dbuf_check_blkptr(dn, db);
4194
4195 /* Provide the pending dirty record to child dbufs */
4196 db->db_data_pending = dr;
4197
4198 mutex_exit(&db->db_mtx);
4199
4200 dbuf_write(dr, db->db_buf, tx);
4201
4202 zio_t *zio = dr->dr_zio;
4203 mutex_enter(&dr->dt.di.dr_mtx);
4204 dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
4205 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4206 mutex_exit(&dr->dt.di.dr_mtx);
4207 zio_nowait(zio);
4208 }
4209
4210 /*
4211 * Verify that the size of the data in our bonus buffer does not exceed
4212 * its recorded size.
4213 *
4214 * The purpose of this verification is to catch any cases in development
4215 * where the size of a phys structure (i.e space_map_phys_t) grows and,
4216 * due to incorrect feature management, older pools expect to read more
4217 * data even though they didn't actually write it to begin with.
4218 *
4219 * For a example, this would catch an error in the feature logic where we
4220 * open an older pool and we expect to write the space map histogram of
4221 * a space map with size SPACE_MAP_SIZE_V0.
4222 */
4223 static void
4224 dbuf_sync_leaf_verify_bonus_dnode(dbuf_dirty_record_t *dr)
4225 {
4226 #ifdef ZFS_DEBUG
4227 dnode_t *dn = dr->dr_dnode;
4228
4229 /*
4230 * Encrypted bonus buffers can have data past their bonuslen.
4231 * Skip the verification of these blocks.
4232 */
4233 if (DMU_OT_IS_ENCRYPTED(dn->dn_bonustype))
4234 return;
4235
4236 uint16_t bonuslen = dn->dn_phys->dn_bonuslen;
4237 uint16_t maxbonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
4238 ASSERT3U(bonuslen, <=, maxbonuslen);
4239
4240 arc_buf_t *datap = dr->dt.dl.dr_data;
4241 char *datap_end = ((char *)datap) + bonuslen;
4242 char *datap_max = ((char *)datap) + maxbonuslen;
4243
4244 /* ensure that everything is zero after our data */
4245 for (; datap_end < datap_max; datap_end++)
4246 ASSERT(*datap_end == 0);
4247 #endif
4248 }
4249
4250 static blkptr_t *
4251 dbuf_lightweight_bp(dbuf_dirty_record_t *dr)
4252 {
4253 /* This must be a lightweight dirty record. */
4254 ASSERT3P(dr->dr_dbuf, ==, NULL);
4255 dnode_t *dn = dr->dr_dnode;
4256
4257 if (dn->dn_phys->dn_nlevels == 1) {
4258 VERIFY3U(dr->dt.dll.dr_blkid, <, dn->dn_phys->dn_nblkptr);
4259 return (&dn->dn_phys->dn_blkptr[dr->dt.dll.dr_blkid]);
4260 } else {
4261 dmu_buf_impl_t *parent_db = dr->dr_parent->dr_dbuf;
4262 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
4263 VERIFY3U(parent_db->db_level, ==, 1);
4264 VERIFY3P(parent_db->db_dnode_handle->dnh_dnode, ==, dn);
4265 VERIFY3U(dr->dt.dll.dr_blkid >> epbs, ==, parent_db->db_blkid);
4266 blkptr_t *bp = parent_db->db.db_data;
4267 return (&bp[dr->dt.dll.dr_blkid & ((1 << epbs) - 1)]);
4268 }
4269 }
4270
4271 static void
4272 dbuf_lightweight_ready(zio_t *zio)
4273 {
4274 dbuf_dirty_record_t *dr = zio->io_private;
4275 blkptr_t *bp = zio->io_bp;
4276
4277 if (zio->io_error != 0)
4278 return;
4279
4280 dnode_t *dn = dr->dr_dnode;
4281
4282 blkptr_t *bp_orig = dbuf_lightweight_bp(dr);
4283 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4284 int64_t delta = bp_get_dsize_sync(spa, bp) -
4285 bp_get_dsize_sync(spa, bp_orig);
4286 dnode_diduse_space(dn, delta);
4287
4288 uint64_t blkid = dr->dt.dll.dr_blkid;
4289 mutex_enter(&dn->dn_mtx);
4290 if (blkid > dn->dn_phys->dn_maxblkid) {
4291 ASSERT0(dn->dn_objset->os_raw_receive);
4292 dn->dn_phys->dn_maxblkid = blkid;
4293 }
4294 mutex_exit(&dn->dn_mtx);
4295
4296 if (!BP_IS_EMBEDDED(bp)) {
4297 uint64_t fill = BP_IS_HOLE(bp) ? 0 : 1;
4298 BP_SET_FILL(bp, fill);
4299 }
4300
4301 dmu_buf_impl_t *parent_db;
4302 EQUIV(dr->dr_parent == NULL, dn->dn_phys->dn_nlevels == 1);
4303 if (dr->dr_parent == NULL) {
4304 parent_db = dn->dn_dbuf;
4305 } else {
4306 parent_db = dr->dr_parent->dr_dbuf;
4307 }
4308 rw_enter(&parent_db->db_rwlock, RW_WRITER);
4309 *bp_orig = *bp;
4310 rw_exit(&parent_db->db_rwlock);
4311 }
4312
4313 static void
4314 dbuf_lightweight_physdone(zio_t *zio)
4315 {
4316 dbuf_dirty_record_t *dr = zio->io_private;
4317 dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
4318 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4319
4320 /*
4321 * The callback will be called io_phys_children times. Retire one
4322 * portion of our dirty space each time we are called. Any rounding
4323 * error will be cleaned up by dbuf_lightweight_done().
4324 */
4325 int delta = dr->dr_accounted / zio->io_phys_children;
4326 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4327 }
4328
4329 static void
4330 dbuf_lightweight_done(zio_t *zio)
4331 {
4332 dbuf_dirty_record_t *dr = zio->io_private;
4333
4334 VERIFY0(zio->io_error);
4335
4336 objset_t *os = dr->dr_dnode->dn_objset;
4337 dmu_tx_t *tx = os->os_synctx;
4338
4339 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4340 ASSERT(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
4341 } else {
4342 dsl_dataset_t *ds = os->os_dsl_dataset;
4343 (void) dsl_dataset_block_kill(ds, &zio->io_bp_orig, tx, B_TRUE);
4344 dsl_dataset_block_born(ds, zio->io_bp, tx);
4345 }
4346
4347 /*
4348 * See comment in dbuf_write_done().
4349 */
4350 if (zio->io_phys_children == 0) {
4351 dsl_pool_undirty_space(dmu_objset_pool(os),
4352 dr->dr_accounted, zio->io_txg);
4353 } else {
4354 dsl_pool_undirty_space(dmu_objset_pool(os),
4355 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4356 }
4357
4358 abd_free(dr->dt.dll.dr_abd);
4359 kmem_free(dr, sizeof (*dr));
4360 }
4361
4362 noinline static void
4363 dbuf_sync_lightweight(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4364 {
4365 dnode_t *dn = dr->dr_dnode;
4366 zio_t *pio;
4367 if (dn->dn_phys->dn_nlevels == 1) {
4368 pio = dn->dn_zio;
4369 } else {
4370 pio = dr->dr_parent->dr_zio;
4371 }
4372
4373 zbookmark_phys_t zb = {
4374 .zb_objset = dmu_objset_id(dn->dn_objset),
4375 .zb_object = dn->dn_object,
4376 .zb_level = 0,
4377 .zb_blkid = dr->dt.dll.dr_blkid,
4378 };
4379
4380 /*
4381 * See comment in dbuf_write(). This is so that zio->io_bp_orig
4382 * will have the old BP in dbuf_lightweight_done().
4383 */
4384 dr->dr_bp_copy = *dbuf_lightweight_bp(dr);
4385
4386 dr->dr_zio = zio_write(pio, dmu_objset_spa(dn->dn_objset),
4387 dmu_tx_get_txg(tx), &dr->dr_bp_copy, dr->dt.dll.dr_abd,
4388 dn->dn_datablksz, abd_get_size(dr->dt.dll.dr_abd),
4389 &dr->dt.dll.dr_props, dbuf_lightweight_ready, NULL,
4390 dbuf_lightweight_physdone, dbuf_lightweight_done, dr,
4391 ZIO_PRIORITY_ASYNC_WRITE,
4392 ZIO_FLAG_MUSTSUCCEED | dr->dt.dll.dr_flags, &zb);
4393
4394 zio_nowait(dr->dr_zio);
4395 }
4396
4397 /*
4398 * dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
4399 * critical the we not allow the compiler to inline this function in to
4400 * dbuf_sync_list() thereby drastically bloating the stack usage.
4401 */
4402 noinline static void
4403 dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
4404 {
4405 arc_buf_t **datap = &dr->dt.dl.dr_data;
4406 dmu_buf_impl_t *db = dr->dr_dbuf;
4407 dnode_t *dn = dr->dr_dnode;
4408 objset_t *os;
4409 uint64_t txg = tx->tx_txg;
4410
4411 ASSERT(dmu_tx_is_syncing(tx));
4412
4413 dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
4414
4415 mutex_enter(&db->db_mtx);
4416 /*
4417 * To be synced, we must be dirtied. But we
4418 * might have been freed after the dirty.
4419 */
4420 if (db->db_state == DB_UNCACHED) {
4421 /* This buffer has been freed since it was dirtied */
4422 ASSERT(db->db.db_data == NULL);
4423 } else if (db->db_state == DB_FILL) {
4424 /* This buffer was freed and is now being re-filled */
4425 ASSERT(db->db.db_data != dr->dt.dl.dr_data);
4426 } else {
4427 ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
4428 }
4429 DBUF_VERIFY(db);
4430
4431 if (db->db_blkid == DMU_SPILL_BLKID) {
4432 mutex_enter(&dn->dn_mtx);
4433 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
4434 /*
4435 * In the previous transaction group, the bonus buffer
4436 * was entirely used to store the attributes for the
4437 * dnode which overrode the dn_spill field. However,
4438 * when adding more attributes to the file a spill
4439 * block was required to hold the extra attributes.
4440 *
4441 * Make sure to clear the garbage left in the dn_spill
4442 * field from the previous attributes in the bonus
4443 * buffer. Otherwise, after writing out the spill
4444 * block to the new allocated dva, it will free
4445 * the old block pointed to by the invalid dn_spill.
4446 */
4447 db->db_blkptr = NULL;
4448 }
4449 dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
4450 mutex_exit(&dn->dn_mtx);
4451 }
4452
4453 /*
4454 * If this is a bonus buffer, simply copy the bonus data into the
4455 * dnode. It will be written out when the dnode is synced (and it
4456 * will be synced, since it must have been dirty for dbuf_sync to
4457 * be called).
4458 */
4459 if (db->db_blkid == DMU_BONUS_BLKID) {
4460 ASSERT(dr->dr_dbuf == db);
4461 dbuf_sync_bonus(dr, tx);
4462 return;
4463 }
4464
4465 os = dn->dn_objset;
4466
4467 /*
4468 * This function may have dropped the db_mtx lock allowing a dmu_sync
4469 * operation to sneak in. As a result, we need to ensure that we
4470 * don't check the dr_override_state until we have returned from
4471 * dbuf_check_blkptr.
4472 */
4473 dbuf_check_blkptr(dn, db);
4474
4475 /*
4476 * If this buffer is in the middle of an immediate write,
4477 * wait for the synchronous IO to complete.
4478 */
4479 while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
4480 ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
4481 cv_wait(&db->db_changed, &db->db_mtx);
4482 }
4483
4484 /*
4485 * If this is a dnode block, ensure it is appropriately encrypted
4486 * or decrypted, depending on what we are writing to it this txg.
4487 */
4488 if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
4489 dbuf_prepare_encrypted_dnode_leaf(dr);
4490
4491 if (db->db_state != DB_NOFILL &&
4492 dn->dn_object != DMU_META_DNODE_OBJECT &&
4493 zfs_refcount_count(&db->db_holds) > 1 &&
4494 dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
4495 *datap == db->db_buf) {
4496 /*
4497 * If this buffer is currently "in use" (i.e., there
4498 * are active holds and db_data still references it),
4499 * then make a copy before we start the write so that
4500 * any modifications from the open txg will not leak
4501 * into this write.
4502 *
4503 * NOTE: this copy does not need to be made for
4504 * objects only modified in the syncing context (e.g.
4505 * DNONE_DNODE blocks).
4506 */
4507 int psize = arc_buf_size(*datap);
4508 int lsize = arc_buf_lsize(*datap);
4509 arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
4510 enum zio_compress compress_type = arc_get_compression(*datap);
4511 uint8_t complevel = arc_get_complevel(*datap);
4512
4513 if (arc_is_encrypted(*datap)) {
4514 boolean_t byteorder;
4515 uint8_t salt[ZIO_DATA_SALT_LEN];
4516 uint8_t iv[ZIO_DATA_IV_LEN];
4517 uint8_t mac[ZIO_DATA_MAC_LEN];
4518
4519 arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
4520 *datap = arc_alloc_raw_buf(os->os_spa, db,
4521 dmu_objset_id(os), byteorder, salt, iv, mac,
4522 dn->dn_type, psize, lsize, compress_type,
4523 complevel);
4524 } else if (compress_type != ZIO_COMPRESS_OFF) {
4525 ASSERT3U(type, ==, ARC_BUFC_DATA);
4526 *datap = arc_alloc_compressed_buf(os->os_spa, db,
4527 psize, lsize, compress_type, complevel);
4528 } else {
4529 *datap = arc_alloc_buf(os->os_spa, db, type, psize);
4530 }
4531 memcpy((*datap)->b_data, db->db.db_data, psize);
4532 }
4533 db->db_data_pending = dr;
4534
4535 mutex_exit(&db->db_mtx);
4536
4537 dbuf_write(dr, *datap, tx);
4538
4539 ASSERT(!list_link_active(&dr->dr_dirty_node));
4540 if (dn->dn_object == DMU_META_DNODE_OBJECT) {
4541 list_insert_tail(&dn->dn_dirty_records[txg & TXG_MASK], dr);
4542 } else {
4543 zio_nowait(dr->dr_zio);
4544 }
4545 }
4546
4547 void
4548 dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
4549 {
4550 dbuf_dirty_record_t *dr;
4551
4552 while ((dr = list_head(list))) {
4553 if (dr->dr_zio != NULL) {
4554 /*
4555 * If we find an already initialized zio then we
4556 * are processing the meta-dnode, and we have finished.
4557 * The dbufs for all dnodes are put back on the list
4558 * during processing, so that we can zio_wait()
4559 * these IOs after initiating all child IOs.
4560 */
4561 ASSERT3U(dr->dr_dbuf->db.db_object, ==,
4562 DMU_META_DNODE_OBJECT);
4563 break;
4564 }
4565 list_remove(list, dr);
4566 if (dr->dr_dbuf == NULL) {
4567 dbuf_sync_lightweight(dr, tx);
4568 } else {
4569 if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
4570 dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
4571 VERIFY3U(dr->dr_dbuf->db_level, ==, level);
4572 }
4573 if (dr->dr_dbuf->db_level > 0)
4574 dbuf_sync_indirect(dr, tx);
4575 else
4576 dbuf_sync_leaf(dr, tx);
4577 }
4578 }
4579 }
4580
4581 static void
4582 dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4583 {
4584 (void) buf;
4585 dmu_buf_impl_t *db = vdb;
4586 dnode_t *dn;
4587 blkptr_t *bp = zio->io_bp;
4588 blkptr_t *bp_orig = &zio->io_bp_orig;
4589 spa_t *spa = zio->io_spa;
4590 int64_t delta;
4591 uint64_t fill = 0;
4592 int i;
4593
4594 ASSERT3P(db->db_blkptr, !=, NULL);
4595 ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
4596
4597 DB_DNODE_ENTER(db);
4598 dn = DB_DNODE(db);
4599 delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
4600 dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
4601 zio->io_prev_space_delta = delta;
4602
4603 if (bp->blk_birth != 0) {
4604 ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
4605 BP_GET_TYPE(bp) == dn->dn_type) ||
4606 (db->db_blkid == DMU_SPILL_BLKID &&
4607 BP_GET_TYPE(bp) == dn->dn_bonustype) ||
4608 BP_IS_EMBEDDED(bp));
4609 ASSERT(BP_GET_LEVEL(bp) == db->db_level);
4610 }
4611
4612 mutex_enter(&db->db_mtx);
4613
4614 #ifdef ZFS_DEBUG
4615 if (db->db_blkid == DMU_SPILL_BLKID) {
4616 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4617 ASSERT(!(BP_IS_HOLE(bp)) &&
4618 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4619 }
4620 #endif
4621
4622 if (db->db_level == 0) {
4623 mutex_enter(&dn->dn_mtx);
4624 if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
4625 db->db_blkid != DMU_SPILL_BLKID) {
4626 ASSERT0(db->db_objset->os_raw_receive);
4627 dn->dn_phys->dn_maxblkid = db->db_blkid;
4628 }
4629 mutex_exit(&dn->dn_mtx);
4630
4631 if (dn->dn_type == DMU_OT_DNODE) {
4632 i = 0;
4633 while (i < db->db.db_size) {
4634 dnode_phys_t *dnp =
4635 (void *)(((char *)db->db.db_data) + i);
4636
4637 i += DNODE_MIN_SIZE;
4638 if (dnp->dn_type != DMU_OT_NONE) {
4639 fill++;
4640 i += dnp->dn_extra_slots *
4641 DNODE_MIN_SIZE;
4642 }
4643 }
4644 } else {
4645 if (BP_IS_HOLE(bp)) {
4646 fill = 0;
4647 } else {
4648 fill = 1;
4649 }
4650 }
4651 } else {
4652 blkptr_t *ibp = db->db.db_data;
4653 ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
4654 for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
4655 if (BP_IS_HOLE(ibp))
4656 continue;
4657 fill += BP_GET_FILL(ibp);
4658 }
4659 }
4660 DB_DNODE_EXIT(db);
4661
4662 if (!BP_IS_EMBEDDED(bp))
4663 BP_SET_FILL(bp, fill);
4664
4665 mutex_exit(&db->db_mtx);
4666
4667 db_lock_type_t dblt = dmu_buf_lock_parent(db, RW_WRITER, FTAG);
4668 *db->db_blkptr = *bp;
4669 dmu_buf_unlock_parent(db, dblt, FTAG);
4670 }
4671
4672 /*
4673 * This function gets called just prior to running through the compression
4674 * stage of the zio pipeline. If we're an indirect block comprised of only
4675 * holes, then we want this indirect to be compressed away to a hole. In
4676 * order to do that we must zero out any information about the holes that
4677 * this indirect points to prior to before we try to compress it.
4678 */
4679 static void
4680 dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
4681 {
4682 (void) zio, (void) buf;
4683 dmu_buf_impl_t *db = vdb;
4684 dnode_t *dn;
4685 blkptr_t *bp;
4686 unsigned int epbs, i;
4687
4688 ASSERT3U(db->db_level, >, 0);
4689 DB_DNODE_ENTER(db);
4690 dn = DB_DNODE(db);
4691 epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
4692 ASSERT3U(epbs, <, 31);
4693
4694 /* Determine if all our children are holes */
4695 for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
4696 if (!BP_IS_HOLE(bp))
4697 break;
4698 }
4699
4700 /*
4701 * If all the children are holes, then zero them all out so that
4702 * we may get compressed away.
4703 */
4704 if (i == 1ULL << epbs) {
4705 /*
4706 * We only found holes. Grab the rwlock to prevent
4707 * anybody from reading the blocks we're about to
4708 * zero out.
4709 */
4710 rw_enter(&db->db_rwlock, RW_WRITER);
4711 memset(db->db.db_data, 0, db->db.db_size);
4712 rw_exit(&db->db_rwlock);
4713 }
4714 DB_DNODE_EXIT(db);
4715 }
4716
4717 /*
4718 * The SPA will call this callback several times for each zio - once
4719 * for every physical child i/o (zio->io_phys_children times). This
4720 * allows the DMU to monitor the progress of each logical i/o. For example,
4721 * there may be 2 copies of an indirect block, or many fragments of a RAID-Z
4722 * block. There may be a long delay before all copies/fragments are completed,
4723 * so this callback allows us to retire dirty space gradually, as the physical
4724 * i/os complete.
4725 */
4726 static void
4727 dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
4728 {
4729 (void) buf;
4730 dmu_buf_impl_t *db = arg;
4731 objset_t *os = db->db_objset;
4732 dsl_pool_t *dp = dmu_objset_pool(os);
4733 dbuf_dirty_record_t *dr;
4734 int delta = 0;
4735
4736 dr = db->db_data_pending;
4737 ASSERT3U(dr->dr_txg, ==, zio->io_txg);
4738
4739 /*
4740 * The callback will be called io_phys_children times. Retire one
4741 * portion of our dirty space each time we are called. Any rounding
4742 * error will be cleaned up by dbuf_write_done().
4743 */
4744 delta = dr->dr_accounted / zio->io_phys_children;
4745 dsl_pool_undirty_space(dp, delta, zio->io_txg);
4746 }
4747
4748 static void
4749 dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
4750 {
4751 (void) buf;
4752 dmu_buf_impl_t *db = vdb;
4753 blkptr_t *bp_orig = &zio->io_bp_orig;
4754 blkptr_t *bp = db->db_blkptr;
4755 objset_t *os = db->db_objset;
4756 dmu_tx_t *tx = os->os_synctx;
4757
4758 ASSERT0(zio->io_error);
4759 ASSERT(db->db_blkptr == bp);
4760
4761 /*
4762 * For nopwrites and rewrites we ensure that the bp matches our
4763 * original and bypass all the accounting.
4764 */
4765 if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
4766 ASSERT(BP_EQUAL(bp, bp_orig));
4767 } else {
4768 dsl_dataset_t *ds = os->os_dsl_dataset;
4769 (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
4770 dsl_dataset_block_born(ds, bp, tx);
4771 }
4772
4773 mutex_enter(&db->db_mtx);
4774
4775 DBUF_VERIFY(db);
4776
4777 dbuf_dirty_record_t *dr = db->db_data_pending;
4778 dnode_t *dn = dr->dr_dnode;
4779 ASSERT(!list_link_active(&dr->dr_dirty_node));
4780 ASSERT(dr->dr_dbuf == db);
4781 ASSERT(list_next(&db->db_dirty_records, dr) == NULL);
4782 list_remove(&db->db_dirty_records, dr);
4783
4784 #ifdef ZFS_DEBUG
4785 if (db->db_blkid == DMU_SPILL_BLKID) {
4786 ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
4787 ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
4788 db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
4789 }
4790 #endif
4791
4792 if (db->db_level == 0) {
4793 ASSERT(db->db_blkid != DMU_BONUS_BLKID);
4794 ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
4795 if (db->db_state != DB_NOFILL) {
4796 if (dr->dt.dl.dr_data != NULL &&
4797 dr->dt.dl.dr_data != db->db_buf) {
4798 arc_buf_destroy(dr->dt.dl.dr_data, db);
4799 }
4800 }
4801 } else {
4802 ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
4803 ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
4804 if (!BP_IS_HOLE(db->db_blkptr)) {
4805 int epbs __maybe_unused = dn->dn_phys->dn_indblkshift -
4806 SPA_BLKPTRSHIFT;
4807 ASSERT3U(db->db_blkid, <=,
4808 dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
4809 ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
4810 db->db.db_size);
4811 }
4812 mutex_destroy(&dr->dt.di.dr_mtx);
4813 list_destroy(&dr->dt.di.dr_children);
4814 }
4815
4816 cv_broadcast(&db->db_changed);
4817 ASSERT(db->db_dirtycnt > 0);
4818 db->db_dirtycnt -= 1;
4819 db->db_data_pending = NULL;
4820 dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg, B_FALSE);
4821
4822 /*
4823 * If we didn't do a physical write in this ZIO and we
4824 * still ended up here, it means that the space of the
4825 * dbuf that we just released (and undirtied) above hasn't
4826 * been marked as undirtied in the pool's accounting.
4827 *
4828 * Thus, we undirty that space in the pool's view of the
4829 * world here. For physical writes this type of update
4830 * happens in dbuf_write_physdone().
4831 *
4832 * If we did a physical write, cleanup any rounding errors
4833 * that came up due to writing multiple copies of a block
4834 * on disk [see dbuf_write_physdone()].
4835 */
4836 if (zio->io_phys_children == 0) {
4837 dsl_pool_undirty_space(dmu_objset_pool(os),
4838 dr->dr_accounted, zio->io_txg);
4839 } else {
4840 dsl_pool_undirty_space(dmu_objset_pool(os),
4841 dr->dr_accounted % zio->io_phys_children, zio->io_txg);
4842 }
4843
4844 kmem_free(dr, sizeof (dbuf_dirty_record_t));
4845 }
4846
4847 static void
4848 dbuf_write_nofill_ready(zio_t *zio)
4849 {
4850 dbuf_write_ready(zio, NULL, zio->io_private);
4851 }
4852
4853 static void
4854 dbuf_write_nofill_done(zio_t *zio)
4855 {
4856 dbuf_write_done(zio, NULL, zio->io_private);
4857 }
4858
4859 static void
4860 dbuf_write_override_ready(zio_t *zio)
4861 {
4862 dbuf_dirty_record_t *dr = zio->io_private;
4863 dmu_buf_impl_t *db = dr->dr_dbuf;
4864
4865 dbuf_write_ready(zio, NULL, db);
4866 }
4867
4868 static void
4869 dbuf_write_override_done(zio_t *zio)
4870 {
4871 dbuf_dirty_record_t *dr = zio->io_private;
4872 dmu_buf_impl_t *db = dr->dr_dbuf;
4873 blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
4874
4875 mutex_enter(&db->db_mtx);
4876 if (!BP_EQUAL(zio->io_bp, obp)) {
4877 if (!BP_IS_HOLE(obp))
4878 dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
4879 arc_release(dr->dt.dl.dr_data, db);
4880 }
4881 mutex_exit(&db->db_mtx);
4882
4883 dbuf_write_done(zio, NULL, db);
4884
4885 if (zio->io_abd != NULL)
4886 abd_free(zio->io_abd);
4887 }
4888
4889 typedef struct dbuf_remap_impl_callback_arg {
4890 objset_t *drica_os;
4891 uint64_t drica_blk_birth;
4892 dmu_tx_t *drica_tx;
4893 } dbuf_remap_impl_callback_arg_t;
4894
4895 static void
4896 dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
4897 void *arg)
4898 {
4899 dbuf_remap_impl_callback_arg_t *drica = arg;
4900 objset_t *os = drica->drica_os;
4901 spa_t *spa = dmu_objset_spa(os);
4902 dmu_tx_t *tx = drica->drica_tx;
4903
4904 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4905
4906 if (os == spa_meta_objset(spa)) {
4907 spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
4908 } else {
4909 dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
4910 size, drica->drica_blk_birth, tx);
4911 }
4912 }
4913
4914 static void
4915 dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, krwlock_t *rw, dmu_tx_t *tx)
4916 {
4917 blkptr_t bp_copy = *bp;
4918 spa_t *spa = dmu_objset_spa(dn->dn_objset);
4919 dbuf_remap_impl_callback_arg_t drica;
4920
4921 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4922
4923 drica.drica_os = dn->dn_objset;
4924 drica.drica_blk_birth = bp->blk_birth;
4925 drica.drica_tx = tx;
4926 if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
4927 &drica)) {
4928 /*
4929 * If the blkptr being remapped is tracked by a livelist,
4930 * then we need to make sure the livelist reflects the update.
4931 * First, cancel out the old blkptr by appending a 'FREE'
4932 * entry. Next, add an 'ALLOC' to track the new version. This
4933 * way we avoid trying to free an inaccurate blkptr at delete.
4934 * Note that embedded blkptrs are not tracked in livelists.
4935 */
4936 if (dn->dn_objset != spa_meta_objset(spa)) {
4937 dsl_dataset_t *ds = dmu_objset_ds(dn->dn_objset);
4938 if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
4939 bp->blk_birth > ds->ds_dir->dd_origin_txg) {
4940 ASSERT(!BP_IS_EMBEDDED(bp));
4941 ASSERT(dsl_dir_is_clone(ds->ds_dir));
4942 ASSERT(spa_feature_is_enabled(spa,
4943 SPA_FEATURE_LIVELIST));
4944 bplist_append(&ds->ds_dir->dd_pending_frees,
4945 bp);
4946 bplist_append(&ds->ds_dir->dd_pending_allocs,
4947 &bp_copy);
4948 }
4949 }
4950
4951 /*
4952 * The db_rwlock prevents dbuf_read_impl() from
4953 * dereferencing the BP while we are changing it. To
4954 * avoid lock contention, only grab it when we are actually
4955 * changing the BP.
4956 */
4957 if (rw != NULL)
4958 rw_enter(rw, RW_WRITER);
4959 *bp = bp_copy;
4960 if (rw != NULL)
4961 rw_exit(rw);
4962 }
4963 }
4964
4965 /*
4966 * Remap any existing BP's to concrete vdevs, if possible.
4967 */
4968 static void
4969 dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
4970 {
4971 spa_t *spa = dmu_objset_spa(db->db_objset);
4972 ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
4973
4974 if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
4975 return;
4976
4977 if (db->db_level > 0) {
4978 blkptr_t *bp = db->db.db_data;
4979 for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
4980 dbuf_remap_impl(dn, &bp[i], &db->db_rwlock, tx);
4981 }
4982 } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
4983 dnode_phys_t *dnp = db->db.db_data;
4984 ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
4985 DMU_OT_DNODE);
4986 for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
4987 i += dnp[i].dn_extra_slots + 1) {
4988 for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
4989 krwlock_t *lock = (dn->dn_dbuf == NULL ? NULL :
4990 &dn->dn_dbuf->db_rwlock);
4991 dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], lock,
4992 tx);
4993 }
4994 }
4995 }
4996 }
4997
4998
4999 /* Issue I/O to commit a dirty buffer to disk. */
5000 static void
5001 dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
5002 {
5003 dmu_buf_impl_t *db = dr->dr_dbuf;
5004 dnode_t *dn = dr->dr_dnode;
5005 objset_t *os;
5006 dmu_buf_impl_t *parent = db->db_parent;
5007 uint64_t txg = tx->tx_txg;
5008 zbookmark_phys_t zb;
5009 zio_prop_t zp;
5010 zio_t *pio; /* parent I/O */
5011 int wp_flag = 0;
5012
5013 ASSERT(dmu_tx_is_syncing(tx));
5014
5015 os = dn->dn_objset;
5016
5017 if (db->db_state != DB_NOFILL) {
5018 if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
5019 /*
5020 * Private object buffers are released here rather
5021 * than in dbuf_dirty() since they are only modified
5022 * in the syncing context and we don't want the
5023 * overhead of making multiple copies of the data.
5024 */
5025 if (BP_IS_HOLE(db->db_blkptr)) {
5026 arc_buf_thaw(data);
5027 } else {
5028 dbuf_release_bp(db);
5029 }
5030 dbuf_remap(dn, db, tx);
5031 }
5032 }
5033
5034 if (parent != dn->dn_dbuf) {
5035 /* Our parent is an indirect block. */
5036 /* We have a dirty parent that has been scheduled for write. */
5037 ASSERT(parent && parent->db_data_pending);
5038 /* Our parent's buffer is one level closer to the dnode. */
5039 ASSERT(db->db_level == parent->db_level-1);
5040 /*
5041 * We're about to modify our parent's db_data by modifying
5042 * our block pointer, so the parent must be released.
5043 */
5044 ASSERT(arc_released(parent->db_buf));
5045 pio = parent->db_data_pending->dr_zio;
5046 } else {
5047 /* Our parent is the dnode itself. */
5048 ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
5049 db->db_blkid != DMU_SPILL_BLKID) ||
5050 (db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
5051 if (db->db_blkid != DMU_SPILL_BLKID)
5052 ASSERT3P(db->db_blkptr, ==,
5053 &dn->dn_phys->dn_blkptr[db->db_blkid]);
5054 pio = dn->dn_zio;
5055 }
5056
5057 ASSERT(db->db_level == 0 || data == db->db_buf);
5058 ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
5059 ASSERT(pio);
5060
5061 SET_BOOKMARK(&zb, os->os_dsl_dataset ?
5062 os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
5063 db->db.db_object, db->db_level, db->db_blkid);
5064
5065 if (db->db_blkid == DMU_SPILL_BLKID)
5066 wp_flag = WP_SPILL;
5067 wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
5068
5069 dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
5070
5071 /*
5072 * We copy the blkptr now (rather than when we instantiate the dirty
5073 * record), because its value can change between open context and
5074 * syncing context. We do not need to hold dn_struct_rwlock to read
5075 * db_blkptr because we are in syncing context.
5076 */
5077 dr->dr_bp_copy = *db->db_blkptr;
5078
5079 if (db->db_level == 0 &&
5080 dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
5081 /*
5082 * The BP for this block has been provided by open context
5083 * (by dmu_sync() or dmu_buf_write_embedded()).
5084 */
5085 abd_t *contents = (data != NULL) ?
5086 abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
5087
5088 dr->dr_zio = zio_write(pio, os->os_spa, txg, &dr->dr_bp_copy,
5089 contents, db->db.db_size, db->db.db_size, &zp,
5090 dbuf_write_override_ready, NULL, NULL,
5091 dbuf_write_override_done,
5092 dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
5093 mutex_enter(&db->db_mtx);
5094 dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
5095 zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
5096 dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite,
5097 dr->dt.dl.dr_brtwrite);
5098 mutex_exit(&db->db_mtx);
5099 } else if (db->db_state == DB_NOFILL) {
5100 ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
5101 zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
5102 dr->dr_zio = zio_write(pio, os->os_spa, txg,
5103 &dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
5104 dbuf_write_nofill_ready, NULL, NULL,
5105 dbuf_write_nofill_done, db,
5106 ZIO_PRIORITY_ASYNC_WRITE,
5107 ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
5108 } else {
5109 ASSERT(arc_released(data));
5110
5111 /*
5112 * For indirect blocks, we want to setup the children
5113 * ready callback so that we can properly handle an indirect
5114 * block that only contains holes.
5115 */
5116 arc_write_done_func_t *children_ready_cb = NULL;
5117 if (db->db_level != 0)
5118 children_ready_cb = dbuf_write_children_ready;
5119
5120 dr->dr_zio = arc_write(pio, os->os_spa, txg,
5121 &dr->dr_bp_copy, data, !DBUF_IS_CACHEABLE(db),
5122 dbuf_is_l2cacheable(db), &zp, dbuf_write_ready,
5123 children_ready_cb, dbuf_write_physdone,
5124 dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
5125 ZIO_FLAG_MUSTSUCCEED, &zb);
5126 }
5127 }
5128
5129 EXPORT_SYMBOL(dbuf_find);
5130 EXPORT_SYMBOL(dbuf_is_metadata);
5131 EXPORT_SYMBOL(dbuf_destroy);
5132 EXPORT_SYMBOL(dbuf_loan_arcbuf);
5133 EXPORT_SYMBOL(dbuf_whichblock);
5134 EXPORT_SYMBOL(dbuf_read);
5135 EXPORT_SYMBOL(dbuf_unoverride);
5136 EXPORT_SYMBOL(dbuf_free_range);
5137 EXPORT_SYMBOL(dbuf_new_size);
5138 EXPORT_SYMBOL(dbuf_release_bp);
5139 EXPORT_SYMBOL(dbuf_dirty);
5140 EXPORT_SYMBOL(dmu_buf_set_crypt_params);
5141 EXPORT_SYMBOL(dmu_buf_will_dirty);
5142 EXPORT_SYMBOL(dmu_buf_is_dirty);
5143 EXPORT_SYMBOL(dmu_buf_will_not_fill);
5144 EXPORT_SYMBOL(dmu_buf_will_fill);
5145 EXPORT_SYMBOL(dmu_buf_fill_done);
5146 EXPORT_SYMBOL(dmu_buf_rele);
5147 EXPORT_SYMBOL(dbuf_assign_arcbuf);
5148 EXPORT_SYMBOL(dbuf_prefetch);
5149 EXPORT_SYMBOL(dbuf_hold_impl);
5150 EXPORT_SYMBOL(dbuf_hold);
5151 EXPORT_SYMBOL(dbuf_hold_level);
5152 EXPORT_SYMBOL(dbuf_create_bonus);
5153 EXPORT_SYMBOL(dbuf_spill_set_blksz);
5154 EXPORT_SYMBOL(dbuf_rm_spill);
5155 EXPORT_SYMBOL(dbuf_add_ref);
5156 EXPORT_SYMBOL(dbuf_rele);
5157 EXPORT_SYMBOL(dbuf_rele_and_unlock);
5158 EXPORT_SYMBOL(dbuf_refcount);
5159 EXPORT_SYMBOL(dbuf_sync_list);
5160 EXPORT_SYMBOL(dmu_buf_set_user);
5161 EXPORT_SYMBOL(dmu_buf_set_user_ie);
5162 EXPORT_SYMBOL(dmu_buf_get_user);
5163 EXPORT_SYMBOL(dmu_buf_get_blkptr);
5164
5165 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, max_bytes, U64, ZMOD_RW,
5166 "Maximum size in bytes of the dbuf cache.");
5167
5168 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, hiwater_pct, UINT, ZMOD_RW,
5169 "Percentage over dbuf_cache_max_bytes for direct dbuf eviction.");
5170
5171 ZFS_MODULE_PARAM(zfs_dbuf_cache, dbuf_cache_, lowater_pct, UINT, ZMOD_RW,
5172 "Percentage below dbuf_cache_max_bytes when dbuf eviction stops.");
5173
5174 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_max_bytes, U64, ZMOD_RW,
5175 "Maximum size in bytes of dbuf metadata cache.");
5176
5177 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, cache_shift, UINT, ZMOD_RW,
5178 "Set size of dbuf cache to log2 fraction of arc size.");
5179
5180 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, metadata_cache_shift, UINT, ZMOD_RW,
5181 "Set size of dbuf metadata cache to log2 fraction of arc size.");
5182
5183 ZFS_MODULE_PARAM(zfs_dbuf, dbuf_, mutex_cache_shift, UINT, ZMOD_RD,
5184 "Set size of dbuf cache mutex array as log2 shift.");